[VOL-4292] OpenOLT Adapter changes for gRPC migration

Change-Id: I5af2125f2c2f53ffc78c474a94314bba408f8bae
diff --git a/vendor/github.com/DataDog/zstd/.travis.yml b/vendor/github.com/DataDog/zstd/.travis.yml
deleted file mode 100644
index 629470c..0000000
--- a/vendor/github.com/DataDog/zstd/.travis.yml
+++ /dev/null
@@ -1,31 +0,0 @@
-dist: xenial
-language: go
-
-go:
-  - 1.10.x
-  - 1.11.x
-  - 1.12.x
-
-os:
-  - linux
-  - osx
-
-matrix:
-  include:
-    name: "Go 1.11.x CentOS 32bits"
-    language: go
-    go: 1.11.x
-    os: linux
-    services:
-      - docker
-    script:
-      # Please update Go version in travis_test_32 as needed
-      - "docker run -i -v \"${PWD}:/zstd\" toopher/centos-i386:centos6 /bin/bash -c \"linux32 --32bit i386 /zstd/travis_test_32.sh\""
-
-install:
-  - "wget https://github.com/DataDog/zstd/files/2246767/mr.zip"
-  - "unzip mr.zip"
-script:
-  - "go build"
-  - "PAYLOAD=`pwd`/mr go test -v"
-  - "PAYLOAD=`pwd`/mr go test -bench ."
diff --git a/vendor/github.com/DataDog/zstd/LICENSE b/vendor/github.com/DataDog/zstd/LICENSE
deleted file mode 100644
index 345c1eb..0000000
--- a/vendor/github.com/DataDog/zstd/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Simplified BSD License
-
-Copyright (c) 2016, Datadog <info@datadoghq.com>
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-    * Redistributions of source code must retain the above copyright notice,
-      this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above copyright notice,
-      this list of conditions and the following disclaimer in the documentation
-      and/or other materials provided with the distribution.
-    * Neither the name of the copyright holder nor the names of its contributors
-      may be used to endorse or promote products derived from this software
-      without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/DataDog/zstd/README.md b/vendor/github.com/DataDog/zstd/README.md
deleted file mode 100644
index b32c3e7..0000000
--- a/vendor/github.com/DataDog/zstd/README.md
+++ /dev/null
@@ -1,120 +0,0 @@
-# Zstd Go Wrapper
-
-[C Zstd Homepage](https://github.com/Cyan4973/zstd)
-
-The current headers and C files are from *v1.4.1* (Commit
-[52181f8](https://github.com/facebook/zstd/releases/tag/v1.4.1)).
-
-## Usage
-
-There are two main APIs:
-
-* simple Compress/Decompress
-* streaming API (io.Reader/io.Writer)
-
-The compress/decompress APIs mirror that of lz4, while the streaming API was
-designed to be a drop-in replacement for zlib.
-
-### Simple `Compress/Decompress`
-
-
-```go
-// Compress compresses the byte array given in src and writes it to dst.
-// If you already have a buffer allocated, you can pass it to prevent allocation
-// If not, you can pass nil as dst.
-// If the buffer is too small, it will be reallocated, resized, and returned bu the function
-// If dst is nil, this will allocate the worst case size (CompressBound(src))
-Compress(dst, src []byte) ([]byte, error)
-```
-
-```go
-// CompressLevel is the same as Compress but you can pass another compression level
-CompressLevel(dst, src []byte, level int) ([]byte, error)
-```
-
-```go
-// Decompress will decompress your payload into dst.
-// If you already have a buffer allocated, you can pass it to prevent allocation
-// If not, you can pass nil as dst (allocates a 4*src size as default).
-// If the buffer is too small, it will retry 3 times by doubling the dst size
-// After max retries, it will switch to the slower stream API to be sure to be able
-// to decompress. Currently switches if compression ratio > 4*2**3=32.
-Decompress(dst, src []byte) ([]byte, error)
-```
-
-### Stream API
-
-```go
-// NewWriter creates a new object that can optionally be initialized with
-// a precomputed dictionary. If dict is nil, compress without a dictionary.
-// The dictionary array should not be changed during the use of this object.
-// You MUST CALL Close() to write the last bytes of a zstd stream and free C objects.
-NewWriter(w io.Writer) *Writer
-NewWriterLevel(w io.Writer, level int) *Writer
-NewWriterLevelDict(w io.Writer, level int, dict []byte) *Writer
-
-// Write compresses the input data and write it to the underlying writer
-(w *Writer) Write(p []byte) (int, error)
-
-// Close flushes the buffer and frees C zstd objects
-(w *Writer) Close() error
-```
-
-```go
-// NewReader returns a new io.ReadCloser that will decompress data from the
-// underlying reader.  If a dictionary is provided to NewReaderDict, it must
-// not be modified until Close is called.  It is the caller's responsibility
-// to call Close, which frees up C objects.
-NewReader(r io.Reader) io.ReadCloser
-NewReaderDict(r io.Reader, dict []byte) io.ReadCloser
-```
-
-### Benchmarks (benchmarked with v0.5.0)
-
-The author of Zstd also wrote lz4. Zstd is intended to occupy a speed/ratio
-level similar to what zlib currently provides.  In our tests, the can always
-be made to be better than zlib by chosing an appropriate level while still
-keeping compression and decompression time faster than zlib.
-
-You can run the benchmarks against your own payloads by using the Go benchmarks tool.
-Just export your payload filepath as the `PAYLOAD` environment variable and run the benchmarks:
-
-```go
-go test -bench .
-```
-
-Compression of a 7Mb pdf zstd (this wrapper) vs [czlib](https://github.com/DataDog/czlib):
-```
-BenchmarkCompression               5     221056624 ns/op      67.34 MB/s
-BenchmarkDecompression           100      18370416 ns/op     810.32 MB/s
-
-BenchmarkFzlibCompress             2     610156603 ns/op      24.40 MB/s
-BenchmarkFzlibDecompress          20      81195246 ns/op     183.33 MB/s
-```
-
-Ratio is also better by a margin of ~20%.
-Compression speed is always better than zlib on all the payloads we tested;
-However, [czlib](https://github.com/DataDog/czlib) has optimisations that make it
-faster at decompressiong small payloads:
-
-```
-Testing with size: 11... czlib: 8.97 MB/s, zstd: 3.26 MB/s
-Testing with size: 27... czlib: 23.3 MB/s, zstd: 8.22 MB/s
-Testing with size: 62... czlib: 31.6 MB/s, zstd: 19.49 MB/s
-Testing with size: 141... czlib: 74.54 MB/s, zstd: 42.55 MB/s
-Testing with size: 323... czlib: 155.14 MB/s, zstd: 99.39 MB/s
-Testing with size: 739... czlib: 235.9 MB/s, zstd: 216.45 MB/s
-Testing with size: 1689... czlib: 116.45 MB/s, zstd: 345.64 MB/s
-Testing with size: 3858... czlib: 176.39 MB/s, zstd: 617.56 MB/s
-Testing with size: 8811... czlib: 254.11 MB/s, zstd: 824.34 MB/s
-Testing with size: 20121... czlib: 197.43 MB/s, zstd: 1339.11 MB/s
-Testing with size: 45951... czlib: 201.62 MB/s, zstd: 1951.57 MB/s
-```
-
-zstd starts to shine with payloads > 1KB
-
-### Stability - Current state: STABLE
-
-The C library seems to be pretty stable and according to the author has been tested and fuzzed.
-
-For the Go wrapper, the test cover most usual cases and we have succesfully tested it on all staging and prod data.
diff --git a/vendor/github.com/DataDog/zstd/ZSTD_LICENSE b/vendor/github.com/DataDog/zstd/ZSTD_LICENSE
deleted file mode 100644
index a793a80..0000000
--- a/vendor/github.com/DataDog/zstd/ZSTD_LICENSE
+++ /dev/null
@@ -1,30 +0,0 @@
-BSD License
-
-For Zstandard software
-
-Copyright (c) 2016-present, Facebook, Inc. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice, this
-   list of conditions and the following disclaimer.
-
- * Redistributions in binary form must reproduce the above copyright notice,
-   this list of conditions and the following disclaimer in the documentation
-   and/or other materials provided with the distribution.
-
- * Neither the name Facebook nor the names of its contributors may be used to
-   endorse or promote products derived from this software without specific
-   prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/DataDog/zstd/bitstream.h b/vendor/github.com/DataDog/zstd/bitstream.h
deleted file mode 100644
index d955bd6..0000000
--- a/vendor/github.com/DataDog/zstd/bitstream.h
+++ /dev/null
@@ -1,455 +0,0 @@
-/* ******************************************************************
-   bitstream
-   Part of FSE library
-   Copyright (C) 2013-present, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-****************************************************************** */
-#ifndef BITSTREAM_H_MODULE
-#define BITSTREAM_H_MODULE
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/*
-*  This API consists of small unitary functions, which must be inlined for best performance.
-*  Since link-time-optimization is not available for all compilers,
-*  these functions are defined into a .h to be included.
-*/
-
-/*-****************************************
-*  Dependencies
-******************************************/
-#include "mem.h"            /* unaligned access routines */
-#include "debug.h"          /* assert(), DEBUGLOG(), RAWLOG() */
-#include "error_private.h"  /* error codes and messages */
-
-
-/*=========================================
-*  Target specific
-=========================================*/
-#if defined(__BMI__) && defined(__GNUC__)
-#  include <immintrin.h>   /* support for bextr (experimental) */
-#endif
-
-#define STREAM_ACCUMULATOR_MIN_32  25
-#define STREAM_ACCUMULATOR_MIN_64  57
-#define STREAM_ACCUMULATOR_MIN    ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
-
-
-/*-******************************************
-*  bitStream encoding API (write forward)
-********************************************/
-/* bitStream can mix input from multiple sources.
- * A critical property of these streams is that they encode and decode in **reverse** direction.
- * So the first bit sequence you add will be the last to be read, like a LIFO stack.
- */
-typedef struct {
-    size_t bitContainer;
-    unsigned bitPos;
-    char*  startPtr;
-    char*  ptr;
-    char*  endPtr;
-} BIT_CStream_t;
-
-MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity);
-MEM_STATIC void   BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
-MEM_STATIC void   BIT_flushBits(BIT_CStream_t* bitC);
-MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC);
-
-/* Start with initCStream, providing the size of buffer to write into.
-*  bitStream will never write outside of this buffer.
-*  `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code.
-*
-*  bits are first added to a local register.
-*  Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
-*  Writing data into memory is an explicit operation, performed by the flushBits function.
-*  Hence keep track how many bits are potentially stored into local register to avoid register overflow.
-*  After a flushBits, a maximum of 7 bits might still be stored into local register.
-*
-*  Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers.
-*
-*  Last operation is to close the bitStream.
-*  The function returns the final size of CStream in bytes.
-*  If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable)
-*/
-
-
-/*-********************************************
-*  bitStream decoding API (read backward)
-**********************************************/
-typedef struct {
-    size_t   bitContainer;
-    unsigned bitsConsumed;
-    const char* ptr;
-    const char* start;
-    const char* limitPtr;
-} BIT_DStream_t;
-
-typedef enum { BIT_DStream_unfinished = 0,
-               BIT_DStream_endOfBuffer = 1,
-               BIT_DStream_completed = 2,
-               BIT_DStream_overflow = 3 } BIT_DStream_status;  /* result of BIT_reloadDStream() */
-               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
-
-MEM_STATIC size_t   BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
-MEM_STATIC size_t   BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
-MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
-MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
-
-
-/* Start by invoking BIT_initDStream().
-*  A chunk of the bitStream is then stored into a local register.
-*  Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
-*  You can then retrieve bitFields stored into the local register, **in reverse order**.
-*  Local register is explicitly reloaded from memory by the BIT_reloadDStream() method.
-*  A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished.
-*  Otherwise, it can be less than that, so proceed accordingly.
-*  Checking if DStream has reached its end can be performed with BIT_endOfDStream().
-*/
-
-
-/*-****************************************
-*  unsafe API
-******************************************/
-MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
-/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */
-
-MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC);
-/* unsafe version; does not check buffer overflow */
-
-MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
-/* faster, but works only if nbBits >= 1 */
-
-
-
-/*-**************************************************************
-*  Internal functions
-****************************************************************/
-MEM_STATIC unsigned BIT_highbit32 (U32 val)
-{
-    assert(val != 0);
-    {
-#   if defined(_MSC_VER)   /* Visual */
-        unsigned long r=0;
-        _BitScanReverse ( &r, val );
-        return (unsigned) r;
-#   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* Use GCC Intrinsic */
-        return 31 - __builtin_clz (val);
-#   else   /* Software version */
-        static const unsigned DeBruijnClz[32] = { 0,  9,  1, 10, 13, 21,  2, 29,
-                                                 11, 14, 16, 18, 22, 25,  3, 30,
-                                                  8, 12, 20, 28, 15, 17, 24,  7,
-                                                 19, 27, 23,  6, 26,  5,  4, 31 };
-        U32 v = val;
-        v |= v >> 1;
-        v |= v >> 2;
-        v |= v >> 4;
-        v |= v >> 8;
-        v |= v >> 16;
-        return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
-#   endif
-    }
-}
-
-/*=====    Local Constants   =====*/
-static const unsigned BIT_mask[] = {
-    0,          1,         3,         7,         0xF,       0x1F,
-    0x3F,       0x7F,      0xFF,      0x1FF,     0x3FF,     0x7FF,
-    0xFFF,      0x1FFF,    0x3FFF,    0x7FFF,    0xFFFF,    0x1FFFF,
-    0x3FFFF,    0x7FFFF,   0xFFFFF,   0x1FFFFF,  0x3FFFFF,  0x7FFFFF,
-    0xFFFFFF,   0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF,
-    0x3FFFFFFF, 0x7FFFFFFF}; /* up to 31 bits */
-#define BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0]))
-
-/*-**************************************************************
-*  bitStream encoding
-****************************************************************/
-/*! BIT_initCStream() :
- *  `dstCapacity` must be > sizeof(size_t)
- *  @return : 0 if success,
- *            otherwise an error code (can be tested using ERR_isError()) */
-MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
-                                  void* startPtr, size_t dstCapacity)
-{
-    bitC->bitContainer = 0;
-    bitC->bitPos = 0;
-    bitC->startPtr = (char*)startPtr;
-    bitC->ptr = bitC->startPtr;
-    bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer);
-    if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall);
-    return 0;
-}
-
-/*! BIT_addBits() :
- *  can add up to 31 bits into `bitC`.
- *  Note : does not check for register overflow ! */
-MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
-                            size_t value, unsigned nbBits)
-{
-    MEM_STATIC_ASSERT(BIT_MASK_SIZE == 32);
-    assert(nbBits < BIT_MASK_SIZE);
-    assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
-    bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
-    bitC->bitPos += nbBits;
-}
-
-/*! BIT_addBitsFast() :
- *  works only if `value` is _clean_,
- *  meaning all high bits above nbBits are 0 */
-MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC,
-                                size_t value, unsigned nbBits)
-{
-    assert((value>>nbBits) == 0);
-    assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
-    bitC->bitContainer |= value << bitC->bitPos;
-    bitC->bitPos += nbBits;
-}
-
-/*! BIT_flushBitsFast() :
- *  assumption : bitContainer has not overflowed
- *  unsafe version; does not check buffer overflow */
-MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC)
-{
-    size_t const nbBytes = bitC->bitPos >> 3;
-    assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
-    MEM_writeLEST(bitC->ptr, bitC->bitContainer);
-    bitC->ptr += nbBytes;
-    assert(bitC->ptr <= bitC->endPtr);
-    bitC->bitPos &= 7;
-    bitC->bitContainer >>= nbBytes*8;
-}
-
-/*! BIT_flushBits() :
- *  assumption : bitContainer has not overflowed
- *  safe version; check for buffer overflow, and prevents it.
- *  note : does not signal buffer overflow.
- *  overflow will be revealed later on using BIT_closeCStream() */
-MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC)
-{
-    size_t const nbBytes = bitC->bitPos >> 3;
-    assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
-    MEM_writeLEST(bitC->ptr, bitC->bitContainer);
-    bitC->ptr += nbBytes;
-    if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
-    bitC->bitPos &= 7;
-    bitC->bitContainer >>= nbBytes*8;
-}
-
-/*! BIT_closeCStream() :
- *  @return : size of CStream, in bytes,
- *            or 0 if it could not fit into dstBuffer */
-MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)
-{
-    BIT_addBitsFast(bitC, 1, 1);   /* endMark */
-    BIT_flushBits(bitC);
-    if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
-    return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
-}
-
-
-/*-********************************************************
-*  bitStream decoding
-**********************************************************/
-/*! BIT_initDStream() :
- *  Initialize a BIT_DStream_t.
- * `bitD` : a pointer to an already allocated BIT_DStream_t structure.
- * `srcSize` must be the *exact* size of the bitStream, in bytes.
- * @return : size of stream (== srcSize), or an errorCode if a problem is detected
- */
-MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
-{
-    if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
-
-    bitD->start = (const char*)srcBuffer;
-    bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer);
-
-    if (srcSize >=  sizeof(bitD->bitContainer)) {  /* normal case */
-        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
-        bitD->bitContainer = MEM_readLEST(bitD->ptr);
-        { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
-          bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;  /* ensures bitsConsumed is always set */
-          if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
-    } else {
-        bitD->ptr   = bitD->start;
-        bitD->bitContainer = *(const BYTE*)(bitD->start);
-        switch(srcSize)
-        {
-        case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
-                /* fall-through */
-
-        case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
-                /* fall-through */
-
-        case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
-                /* fall-through */
-
-        case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
-                /* fall-through */
-
-        case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
-                /* fall-through */
-
-        case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) <<  8;
-                /* fall-through */
-
-        default: break;
-        }
-        {   BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
-            bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
-            if (lastByte == 0) return ERROR(corruption_detected);  /* endMark not present */
-        }
-        bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
-    }
-
-    return srcSize;
-}
-
-MEM_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start)
-{
-    return bitContainer >> start;
-}
-
-MEM_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits)
-{
-    U32 const regMask = sizeof(bitContainer)*8 - 1;
-    /* if start > regMask, bitstream is corrupted, and result is undefined */
-    assert(nbBits < BIT_MASK_SIZE);
-    return (bitContainer >> (start & regMask)) & BIT_mask[nbBits];
-}
-
-MEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
-{
-    assert(nbBits < BIT_MASK_SIZE);
-    return bitContainer & BIT_mask[nbBits];
-}
-
-/*! BIT_lookBits() :
- *  Provides next n bits from local register.
- *  local register is not modified.
- *  On 32-bits, maxNbBits==24.
- *  On 64-bits, maxNbBits==56.
- * @return : value extracted */
-MEM_STATIC size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
-{
-    /* arbitrate between double-shift and shift+mask */
-#if 1
-    /* if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8,
-     * bitstream is likely corrupted, and result is undefined */
-    return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits);
-#else
-    /* this code path is slower on my os-x laptop */
-    U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
-    return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask);
-#endif
-}
-
-/*! BIT_lookBitsFast() :
- *  unsafe version; only works if nbBits >= 1 */
-MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
-{
-    U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
-    assert(nbBits >= 1);
-    return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask);
-}
-
-MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
-{
-    bitD->bitsConsumed += nbBits;
-}
-
-/*! BIT_readBits() :
- *  Read (consume) next n bits from local register and update.
- *  Pay attention to not read more than nbBits contained into local register.
- * @return : extracted value. */
-MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)
-{
-    size_t const value = BIT_lookBits(bitD, nbBits);
-    BIT_skipBits(bitD, nbBits);
-    return value;
-}
-
-/*! BIT_readBitsFast() :
- *  unsafe version; only works only if nbBits >= 1 */
-MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits)
-{
-    size_t const value = BIT_lookBitsFast(bitD, nbBits);
-    assert(nbBits >= 1);
-    BIT_skipBits(bitD, nbBits);
-    return value;
-}
-
-/*! BIT_reloadDStream() :
- *  Refill `bitD` from buffer previously set in BIT_initDStream() .
- *  This function is safe, it guarantees it will not read beyond src buffer.
- * @return : status of `BIT_DStream_t` internal register.
- *           when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
-MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
-{
-    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* overflow detected, like end of stream */
-        return BIT_DStream_overflow;
-
-    if (bitD->ptr >= bitD->limitPtr) {
-        bitD->ptr -= bitD->bitsConsumed >> 3;
-        bitD->bitsConsumed &= 7;
-        bitD->bitContainer = MEM_readLEST(bitD->ptr);
-        return BIT_DStream_unfinished;
-    }
-    if (bitD->ptr == bitD->start) {
-        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
-        return BIT_DStream_completed;
-    }
-    /* start < ptr < limitPtr */
-    {   U32 nbBytes = bitD->bitsConsumed >> 3;
-        BIT_DStream_status result = BIT_DStream_unfinished;
-        if (bitD->ptr - nbBytes < bitD->start) {
-            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */
-            result = BIT_DStream_endOfBuffer;
-        }
-        bitD->ptr -= nbBytes;
-        bitD->bitsConsumed -= nbBytes*8;
-        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */
-        return result;
-    }
-}
-
-/*! BIT_endOfDStream() :
- * @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
- */
-MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
-{
-    return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
-}
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* BITSTREAM_H_MODULE */
diff --git a/vendor/github.com/DataDog/zstd/compiler.h b/vendor/github.com/DataDog/zstd/compiler.h
deleted file mode 100644
index 87bf51a..0000000
--- a/vendor/github.com/DataDog/zstd/compiler.h
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef ZSTD_COMPILER_H
-#define ZSTD_COMPILER_H
-
-/*-*******************************************************
-*  Compiler specifics
-*********************************************************/
-/* force inlining */
-
-#if !defined(ZSTD_NO_INLINE)
-#if defined (__GNUC__) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
-#  define INLINE_KEYWORD inline
-#else
-#  define INLINE_KEYWORD
-#endif
-
-#if defined(__GNUC__)
-#  define FORCE_INLINE_ATTR __attribute__((always_inline))
-#elif defined(_MSC_VER)
-#  define FORCE_INLINE_ATTR __forceinline
-#else
-#  define FORCE_INLINE_ATTR
-#endif
-
-#else
-
-#define INLINE_KEYWORD
-#define FORCE_INLINE_ATTR
-
-#endif
-
-/**
- * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
- * parameters. They must be inlined for the compiler to eliminate the constant
- * branches.
- */
-#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
-/**
- * HINT_INLINE is used to help the compiler generate better code. It is *not*
- * used for "templates", so it can be tweaked based on the compilers
- * performance.
- *
- * gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the
- * always_inline attribute.
- *
- * clang up to 5.0.0 (trunk) benefit tremendously from the always_inline
- * attribute.
- */
-#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5
-#  define HINT_INLINE static INLINE_KEYWORD
-#else
-#  define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR
-#endif
-
-/* force no inlining */
-#ifdef _MSC_VER
-#  define FORCE_NOINLINE static __declspec(noinline)
-#else
-#  ifdef __GNUC__
-#    define FORCE_NOINLINE static __attribute__((__noinline__))
-#  else
-#    define FORCE_NOINLINE static
-#  endif
-#endif
-
-/* target attribute */
-#ifndef __has_attribute
-  #define __has_attribute(x) 0  /* Compatibility with non-clang compilers. */
-#endif
-#if defined(__GNUC__)
-#  define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
-#else
-#  define TARGET_ATTRIBUTE(target)
-#endif
-
-/* Enable runtime BMI2 dispatch based on the CPU.
- * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default.
- */
-#ifndef DYNAMIC_BMI2
-  #if ((defined(__clang__) && __has_attribute(__target__)) \
-      || (defined(__GNUC__) \
-          && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \
-      && (defined(__x86_64__) || defined(_M_X86)) \
-      && !defined(__BMI2__)
-  #  define DYNAMIC_BMI2 1
-  #else
-  #  define DYNAMIC_BMI2 0
-  #endif
-#endif
-
-/* prefetch
- * can be disabled, by declaring NO_PREFETCH build macro */
-#if defined(NO_PREFETCH)
-#  define PREFETCH_L1(ptr)  (void)(ptr)  /* disabled */
-#  define PREFETCH_L2(ptr)  (void)(ptr)  /* disabled */
-#else
-#  if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86))  /* _mm_prefetch() is not defined outside of x86/x64 */
-#    include <mmintrin.h>   /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
-#    define PREFETCH_L1(ptr)  _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
-#    define PREFETCH_L2(ptr)  _mm_prefetch((const char*)(ptr), _MM_HINT_T1)
-#  elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
-#    define PREFETCH_L1(ptr)  __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
-#    define PREFETCH_L2(ptr)  __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */)
-#  else
-#    define PREFETCH_L1(ptr) (void)(ptr)  /* disabled */
-#    define PREFETCH_L2(ptr) (void)(ptr)  /* disabled */
-#  endif
-#endif  /* NO_PREFETCH */
-
-#define CACHELINE_SIZE 64
-
-#define PREFETCH_AREA(p, s)  {            \
-    const char* const _ptr = (const char*)(p);  \
-    size_t const _size = (size_t)(s);     \
-    size_t _pos;                          \
-    for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) {  \
-        PREFETCH_L2(_ptr + _pos);         \
-    }                                     \
-}
-
-/* vectorization */
-#if !defined(__clang__) && defined(__GNUC__)
-#  define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
-#else
-#  define DONT_VECTORIZE
-#endif
-
-/* disable warnings */
-#ifdef _MSC_VER    /* Visual Studio */
-#  include <intrin.h>                    /* For Visual 2005 */
-#  pragma warning(disable : 4100)        /* disable: C4100: unreferenced formal parameter */
-#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
-#  pragma warning(disable : 4204)        /* disable: C4204: non-constant aggregate initializer */
-#  pragma warning(disable : 4214)        /* disable: C4214: non-int bitfields */
-#  pragma warning(disable : 4324)        /* disable: C4324: padded structure */
-#endif
-
-#endif /* ZSTD_COMPILER_H */
diff --git a/vendor/github.com/DataDog/zstd/cover.c b/vendor/github.com/DataDog/zstd/cover.c
deleted file mode 100644
index 6219967..0000000
--- a/vendor/github.com/DataDog/zstd/cover.c
+++ /dev/null
@@ -1,1237 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-/* *****************************************************************************
- * Constructs a dictionary using a heuristic based on the following paper:
- *
- * Liao, Petri, Moffat, Wirth
- * Effective Construction of Relative Lempel-Ziv Dictionaries
- * Published in WWW 2016.
- *
- * Adapted from code originally written by @ot (Giuseppe Ottaviano).
- ******************************************************************************/
-
-/*-*************************************
-*  Dependencies
-***************************************/
-#include <stdio.h>  /* fprintf */
-#include <stdlib.h> /* malloc, free, qsort */
-#include <string.h> /* memset */
-#include <time.h>   /* clock */
-
-#include "mem.h" /* read */
-#include "pool.h"
-#include "threading.h"
-#include "cover.h"
-#include "zstd_internal.h" /* includes zstd.h */
-#ifndef ZDICT_STATIC_LINKING_ONLY
-#define ZDICT_STATIC_LINKING_ONLY
-#endif
-#include "zdict.h"
-
-/*-*************************************
-*  Constants
-***************************************/
-#define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))
-#define DEFAULT_SPLITPOINT 1.0
-
-/*-*************************************
-*  Console display
-***************************************/
-static int g_displayLevel = 2;
-#define DISPLAY(...)                                                           \
-  {                                                                            \
-    fprintf(stderr, __VA_ARGS__);                                              \
-    fflush(stderr);                                                            \
-  }
-#define LOCALDISPLAYLEVEL(displayLevel, l, ...)                                \
-  if (displayLevel >= l) {                                                     \
-    DISPLAY(__VA_ARGS__);                                                      \
-  } /* 0 : no display;   1: errors;   2: default;  3: details;  4: debug */
-#define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__)
-
-#define LOCALDISPLAYUPDATE(displayLevel, l, ...)                               \
-  if (displayLevel >= l) {                                                     \
-    if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) {             \
-      g_time = clock();                                                        \
-      DISPLAY(__VA_ARGS__);                                                    \
-    }                                                                          \
-  }
-#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__)
-static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100;
-static clock_t g_time = 0;
-
-/*-*************************************
-* Hash table
-***************************************
-* A small specialized hash map for storing activeDmers.
-* The map does not resize, so if it becomes full it will loop forever.
-* Thus, the map must be large enough to store every value.
-* The map implements linear probing and keeps its load less than 0.5.
-*/
-
-#define MAP_EMPTY_VALUE ((U32)-1)
-typedef struct COVER_map_pair_t_s {
-  U32 key;
-  U32 value;
-} COVER_map_pair_t;
-
-typedef struct COVER_map_s {
-  COVER_map_pair_t *data;
-  U32 sizeLog;
-  U32 size;
-  U32 sizeMask;
-} COVER_map_t;
-
-/**
- * Clear the map.
- */
-static void COVER_map_clear(COVER_map_t *map) {
-  memset(map->data, MAP_EMPTY_VALUE, map->size * sizeof(COVER_map_pair_t));
-}
-
-/**
- * Initializes a map of the given size.
- * Returns 1 on success and 0 on failure.
- * The map must be destroyed with COVER_map_destroy().
- * The map is only guaranteed to be large enough to hold size elements.
- */
-static int COVER_map_init(COVER_map_t *map, U32 size) {
-  map->sizeLog = ZSTD_highbit32(size) + 2;
-  map->size = (U32)1 << map->sizeLog;
-  map->sizeMask = map->size - 1;
-  map->data = (COVER_map_pair_t *)malloc(map->size * sizeof(COVER_map_pair_t));
-  if (!map->data) {
-    map->sizeLog = 0;
-    map->size = 0;
-    return 0;
-  }
-  COVER_map_clear(map);
-  return 1;
-}
-
-/**
- * Internal hash function
- */
-static const U32 prime4bytes = 2654435761U;
-static U32 COVER_map_hash(COVER_map_t *map, U32 key) {
-  return (key * prime4bytes) >> (32 - map->sizeLog);
-}
-
-/**
- * Helper function that returns the index that a key should be placed into.
- */
-static U32 COVER_map_index(COVER_map_t *map, U32 key) {
-  const U32 hash = COVER_map_hash(map, key);
-  U32 i;
-  for (i = hash;; i = (i + 1) & map->sizeMask) {
-    COVER_map_pair_t *pos = &map->data[i];
-    if (pos->value == MAP_EMPTY_VALUE) {
-      return i;
-    }
-    if (pos->key == key) {
-      return i;
-    }
-  }
-}
-
-/**
- * Returns the pointer to the value for key.
- * If key is not in the map, it is inserted and the value is set to 0.
- * The map must not be full.
- */
-static U32 *COVER_map_at(COVER_map_t *map, U32 key) {
-  COVER_map_pair_t *pos = &map->data[COVER_map_index(map, key)];
-  if (pos->value == MAP_EMPTY_VALUE) {
-    pos->key = key;
-    pos->value = 0;
-  }
-  return &pos->value;
-}
-
-/**
- * Deletes key from the map if present.
- */
-static void COVER_map_remove(COVER_map_t *map, U32 key) {
-  U32 i = COVER_map_index(map, key);
-  COVER_map_pair_t *del = &map->data[i];
-  U32 shift = 1;
-  if (del->value == MAP_EMPTY_VALUE) {
-    return;
-  }
-  for (i = (i + 1) & map->sizeMask;; i = (i + 1) & map->sizeMask) {
-    COVER_map_pair_t *const pos = &map->data[i];
-    /* If the position is empty we are done */
-    if (pos->value == MAP_EMPTY_VALUE) {
-      del->value = MAP_EMPTY_VALUE;
-      return;
-    }
-    /* If pos can be moved to del do so */
-    if (((i - COVER_map_hash(map, pos->key)) & map->sizeMask) >= shift) {
-      del->key = pos->key;
-      del->value = pos->value;
-      del = pos;
-      shift = 1;
-    } else {
-      ++shift;
-    }
-  }
-}
-
-/**
- * Destroys a map that is inited with COVER_map_init().
- */
-static void COVER_map_destroy(COVER_map_t *map) {
-  if (map->data) {
-    free(map->data);
-  }
-  map->data = NULL;
-  map->size = 0;
-}
-
-/*-*************************************
-* Context
-***************************************/
-
-typedef struct {
-  const BYTE *samples;
-  size_t *offsets;
-  const size_t *samplesSizes;
-  size_t nbSamples;
-  size_t nbTrainSamples;
-  size_t nbTestSamples;
-  U32 *suffix;
-  size_t suffixSize;
-  U32 *freqs;
-  U32 *dmerAt;
-  unsigned d;
-} COVER_ctx_t;
-
-/* We need a global context for qsort... */
-static COVER_ctx_t *g_ctx = NULL;
-
-/*-*************************************
-*  Helper functions
-***************************************/
-
-/**
- * Returns the sum of the sample sizes.
- */
-size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) {
-  size_t sum = 0;
-  unsigned i;
-  for (i = 0; i < nbSamples; ++i) {
-    sum += samplesSizes[i];
-  }
-  return sum;
-}
-
-/**
- * Returns -1 if the dmer at lp is less than the dmer at rp.
- * Return 0 if the dmers at lp and rp are equal.
- * Returns 1 if the dmer at lp is greater than the dmer at rp.
- */
-static int COVER_cmp(COVER_ctx_t *ctx, const void *lp, const void *rp) {
-  U32 const lhs = *(U32 const *)lp;
-  U32 const rhs = *(U32 const *)rp;
-  return memcmp(ctx->samples + lhs, ctx->samples + rhs, ctx->d);
-}
-/**
- * Faster version for d <= 8.
- */
-static int COVER_cmp8(COVER_ctx_t *ctx, const void *lp, const void *rp) {
-  U64 const mask = (ctx->d == 8) ? (U64)-1 : (((U64)1 << (8 * ctx->d)) - 1);
-  U64 const lhs = MEM_readLE64(ctx->samples + *(U32 const *)lp) & mask;
-  U64 const rhs = MEM_readLE64(ctx->samples + *(U32 const *)rp) & mask;
-  if (lhs < rhs) {
-    return -1;
-  }
-  return (lhs > rhs);
-}
-
-/**
- * Same as COVER_cmp() except ties are broken by pointer value
- * NOTE: g_ctx must be set to call this function.  A global is required because
- * qsort doesn't take an opaque pointer.
- */
-static int COVER_strict_cmp(const void *lp, const void *rp) {
-  int result = COVER_cmp(g_ctx, lp, rp);
-  if (result == 0) {
-    result = lp < rp ? -1 : 1;
-  }
-  return result;
-}
-/**
- * Faster version for d <= 8.
- */
-static int COVER_strict_cmp8(const void *lp, const void *rp) {
-  int result = COVER_cmp8(g_ctx, lp, rp);
-  if (result == 0) {
-    result = lp < rp ? -1 : 1;
-  }
-  return result;
-}
-
-/**
- * Returns the first pointer in [first, last) whose element does not compare
- * less than value.  If no such element exists it returns last.
- */
-static const size_t *COVER_lower_bound(const size_t *first, const size_t *last,
-                                       size_t value) {
-  size_t count = last - first;
-  while (count != 0) {
-    size_t step = count / 2;
-    const size_t *ptr = first;
-    ptr += step;
-    if (*ptr < value) {
-      first = ++ptr;
-      count -= step + 1;
-    } else {
-      count = step;
-    }
-  }
-  return first;
-}
-
-/**
- * Generic groupBy function.
- * Groups an array sorted by cmp into groups with equivalent values.
- * Calls grp for each group.
- */
-static void
-COVER_groupBy(const void *data, size_t count, size_t size, COVER_ctx_t *ctx,
-              int (*cmp)(COVER_ctx_t *, const void *, const void *),
-              void (*grp)(COVER_ctx_t *, const void *, const void *)) {
-  const BYTE *ptr = (const BYTE *)data;
-  size_t num = 0;
-  while (num < count) {
-    const BYTE *grpEnd = ptr + size;
-    ++num;
-    while (num < count && cmp(ctx, ptr, grpEnd) == 0) {
-      grpEnd += size;
-      ++num;
-    }
-    grp(ctx, ptr, grpEnd);
-    ptr = grpEnd;
-  }
-}
-
-/*-*************************************
-*  Cover functions
-***************************************/
-
-/**
- * Called on each group of positions with the same dmer.
- * Counts the frequency of each dmer and saves it in the suffix array.
- * Fills `ctx->dmerAt`.
- */
-static void COVER_group(COVER_ctx_t *ctx, const void *group,
-                        const void *groupEnd) {
-  /* The group consists of all the positions with the same first d bytes. */
-  const U32 *grpPtr = (const U32 *)group;
-  const U32 *grpEnd = (const U32 *)groupEnd;
-  /* The dmerId is how we will reference this dmer.
-   * This allows us to map the whole dmer space to a much smaller space, the
-   * size of the suffix array.
-   */
-  const U32 dmerId = (U32)(grpPtr - ctx->suffix);
-  /* Count the number of samples this dmer shows up in */
-  U32 freq = 0;
-  /* Details */
-  const size_t *curOffsetPtr = ctx->offsets;
-  const size_t *offsetsEnd = ctx->offsets + ctx->nbSamples;
-  /* Once *grpPtr >= curSampleEnd this occurrence of the dmer is in a
-   * different sample than the last.
-   */
-  size_t curSampleEnd = ctx->offsets[0];
-  for (; grpPtr != grpEnd; ++grpPtr) {
-    /* Save the dmerId for this position so we can get back to it. */
-    ctx->dmerAt[*grpPtr] = dmerId;
-    /* Dictionaries only help for the first reference to the dmer.
-     * After that zstd can reference the match from the previous reference.
-     * So only count each dmer once for each sample it is in.
-     */
-    if (*grpPtr < curSampleEnd) {
-      continue;
-    }
-    freq += 1;
-    /* Binary search to find the end of the sample *grpPtr is in.
-     * In the common case that grpPtr + 1 == grpEnd we can skip the binary
-     * search because the loop is over.
-     */
-    if (grpPtr + 1 != grpEnd) {
-      const size_t *sampleEndPtr =
-          COVER_lower_bound(curOffsetPtr, offsetsEnd, *grpPtr);
-      curSampleEnd = *sampleEndPtr;
-      curOffsetPtr = sampleEndPtr + 1;
-    }
-  }
-  /* At this point we are never going to look at this segment of the suffix
-   * array again.  We take advantage of this fact to save memory.
-   * We store the frequency of the dmer in the first position of the group,
-   * which is dmerId.
-   */
-  ctx->suffix[dmerId] = freq;
-}
-
-
-/**
- * Selects the best segment in an epoch.
- * Segments of are scored according to the function:
- *
- * Let F(d) be the frequency of dmer d.
- * Let S_i be the dmer at position i of segment S which has length k.
- *
- *     Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
- *
- * Once the dmer d is in the dictionary we set F(d) = 0.
- */
-static COVER_segment_t COVER_selectSegment(const COVER_ctx_t *ctx, U32 *freqs,
-                                           COVER_map_t *activeDmers, U32 begin,
-                                           U32 end,
-                                           ZDICT_cover_params_t parameters) {
-  /* Constants */
-  const U32 k = parameters.k;
-  const U32 d = parameters.d;
-  const U32 dmersInK = k - d + 1;
-  /* Try each segment (activeSegment) and save the best (bestSegment) */
-  COVER_segment_t bestSegment = {0, 0, 0};
-  COVER_segment_t activeSegment;
-  /* Reset the activeDmers in the segment */
-  COVER_map_clear(activeDmers);
-  /* The activeSegment starts at the beginning of the epoch. */
-  activeSegment.begin = begin;
-  activeSegment.end = begin;
-  activeSegment.score = 0;
-  /* Slide the activeSegment through the whole epoch.
-   * Save the best segment in bestSegment.
-   */
-  while (activeSegment.end < end) {
-    /* The dmerId for the dmer at the next position */
-    U32 newDmer = ctx->dmerAt[activeSegment.end];
-    /* The entry in activeDmers for this dmerId */
-    U32 *newDmerOcc = COVER_map_at(activeDmers, newDmer);
-    /* If the dmer isn't already present in the segment add its score. */
-    if (*newDmerOcc == 0) {
-      /* The paper suggest using the L-0.5 norm, but experiments show that it
-       * doesn't help.
-       */
-      activeSegment.score += freqs[newDmer];
-    }
-    /* Add the dmer to the segment */
-    activeSegment.end += 1;
-    *newDmerOcc += 1;
-
-    /* If the window is now too large, drop the first position */
-    if (activeSegment.end - activeSegment.begin == dmersInK + 1) {
-      U32 delDmer = ctx->dmerAt[activeSegment.begin];
-      U32 *delDmerOcc = COVER_map_at(activeDmers, delDmer);
-      activeSegment.begin += 1;
-      *delDmerOcc -= 1;
-      /* If this is the last occurrence of the dmer, subtract its score */
-      if (*delDmerOcc == 0) {
-        COVER_map_remove(activeDmers, delDmer);
-        activeSegment.score -= freqs[delDmer];
-      }
-    }
-
-    /* If this segment is the best so far save it */
-    if (activeSegment.score > bestSegment.score) {
-      bestSegment = activeSegment;
-    }
-  }
-  {
-    /* Trim off the zero frequency head and tail from the segment. */
-    U32 newBegin = bestSegment.end;
-    U32 newEnd = bestSegment.begin;
-    U32 pos;
-    for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
-      U32 freq = freqs[ctx->dmerAt[pos]];
-      if (freq != 0) {
-        newBegin = MIN(newBegin, pos);
-        newEnd = pos + 1;
-      }
-    }
-    bestSegment.begin = newBegin;
-    bestSegment.end = newEnd;
-  }
-  {
-    /* Zero out the frequency of each dmer covered by the chosen segment. */
-    U32 pos;
-    for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
-      freqs[ctx->dmerAt[pos]] = 0;
-    }
-  }
-  return bestSegment;
-}
-
-/**
- * Check the validity of the parameters.
- * Returns non-zero if the parameters are valid and 0 otherwise.
- */
-static int COVER_checkParameters(ZDICT_cover_params_t parameters,
-                                 size_t maxDictSize) {
-  /* k and d are required parameters */
-  if (parameters.d == 0 || parameters.k == 0) {
-    return 0;
-  }
-  /* k <= maxDictSize */
-  if (parameters.k > maxDictSize) {
-    return 0;
-  }
-  /* d <= k */
-  if (parameters.d > parameters.k) {
-    return 0;
-  }
-  /* 0 < splitPoint <= 1 */
-  if (parameters.splitPoint <= 0 || parameters.splitPoint > 1){
-    return 0;
-  }
-  return 1;
-}
-
-/**
- * Clean up a context initialized with `COVER_ctx_init()`.
- */
-static void COVER_ctx_destroy(COVER_ctx_t *ctx) {
-  if (!ctx) {
-    return;
-  }
-  if (ctx->suffix) {
-    free(ctx->suffix);
-    ctx->suffix = NULL;
-  }
-  if (ctx->freqs) {
-    free(ctx->freqs);
-    ctx->freqs = NULL;
-  }
-  if (ctx->dmerAt) {
-    free(ctx->dmerAt);
-    ctx->dmerAt = NULL;
-  }
-  if (ctx->offsets) {
-    free(ctx->offsets);
-    ctx->offsets = NULL;
-  }
-}
-
-/**
- * Prepare a context for dictionary building.
- * The context is only dependent on the parameter `d` and can used multiple
- * times.
- * Returns 0 on success or error code on error.
- * The context must be destroyed with `COVER_ctx_destroy()`.
- */
-static size_t COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,
-                          const size_t *samplesSizes, unsigned nbSamples,
-                          unsigned d, double splitPoint) {
-  const BYTE *const samples = (const BYTE *)samplesBuffer;
-  const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples);
-  /* Split samples into testing and training sets */
-  const unsigned nbTrainSamples = splitPoint < 1.0 ? (unsigned)((double)nbSamples * splitPoint) : nbSamples;
-  const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples;
-  const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize;
-  const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize;
-  /* Checks */
-  if (totalSamplesSize < MAX(d, sizeof(U64)) ||
-      totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) {
-    DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n",
-                 (unsigned)(totalSamplesSize>>20), (COVER_MAX_SAMPLES_SIZE >> 20));
-    return ERROR(srcSize_wrong);
-  }
-  /* Check if there are at least 5 training samples */
-  if (nbTrainSamples < 5) {
-    DISPLAYLEVEL(1, "Total number of training samples is %u and is invalid.", nbTrainSamples);
-    return ERROR(srcSize_wrong);
-  }
-  /* Check if there's testing sample */
-  if (nbTestSamples < 1) {
-    DISPLAYLEVEL(1, "Total number of testing samples is %u and is invalid.", nbTestSamples);
-    return ERROR(srcSize_wrong);
-  }
-  /* Zero the context */
-  memset(ctx, 0, sizeof(*ctx));
-  DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples,
-               (unsigned)trainingSamplesSize);
-  DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples,
-               (unsigned)testSamplesSize);
-  ctx->samples = samples;
-  ctx->samplesSizes = samplesSizes;
-  ctx->nbSamples = nbSamples;
-  ctx->nbTrainSamples = nbTrainSamples;
-  ctx->nbTestSamples = nbTestSamples;
-  /* Partial suffix array */
-  ctx->suffixSize = trainingSamplesSize - MAX(d, sizeof(U64)) + 1;
-  ctx->suffix = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
-  /* Maps index to the dmerID */
-  ctx->dmerAt = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
-  /* The offsets of each file */
-  ctx->offsets = (size_t *)malloc((nbSamples + 1) * sizeof(size_t));
-  if (!ctx->suffix || !ctx->dmerAt || !ctx->offsets) {
-    DISPLAYLEVEL(1, "Failed to allocate scratch buffers\n");
-    COVER_ctx_destroy(ctx);
-    return ERROR(memory_allocation);
-  }
-  ctx->freqs = NULL;
-  ctx->d = d;
-
-  /* Fill offsets from the samplesSizes */
-  {
-    U32 i;
-    ctx->offsets[0] = 0;
-    for (i = 1; i <= nbSamples; ++i) {
-      ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1];
-    }
-  }
-  DISPLAYLEVEL(2, "Constructing partial suffix array\n");
-  {
-    /* suffix is a partial suffix array.
-     * It only sorts suffixes by their first parameters.d bytes.
-     * The sort is stable, so each dmer group is sorted by position in input.
-     */
-    U32 i;
-    for (i = 0; i < ctx->suffixSize; ++i) {
-      ctx->suffix[i] = i;
-    }
-    /* qsort doesn't take an opaque pointer, so pass as a global.
-     * On OpenBSD qsort() is not guaranteed to be stable, their mergesort() is.
-     */
-    g_ctx = ctx;
-#if defined(__OpenBSD__)
-    mergesort(ctx->suffix, ctx->suffixSize, sizeof(U32),
-          (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
-#else
-    qsort(ctx->suffix, ctx->suffixSize, sizeof(U32),
-          (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
-#endif
-  }
-  DISPLAYLEVEL(2, "Computing frequencies\n");
-  /* For each dmer group (group of positions with the same first d bytes):
-   * 1. For each position we set dmerAt[position] = dmerID.  The dmerID is
-   *    (groupBeginPtr - suffix).  This allows us to go from position to
-   *    dmerID so we can look up values in freq.
-   * 2. We calculate how many samples the dmer occurs in and save it in
-   *    freqs[dmerId].
-   */
-  COVER_groupBy(ctx->suffix, ctx->suffixSize, sizeof(U32), ctx,
-                (ctx->d <= 8 ? &COVER_cmp8 : &COVER_cmp), &COVER_group);
-  ctx->freqs = ctx->suffix;
-  ctx->suffix = NULL;
-  return 0;
-}
-
-void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel)
-{
-  const double ratio = (double)nbDmers / maxDictSize;
-  if (ratio >= 10) {
-      return;
-  }
-  LOCALDISPLAYLEVEL(displayLevel, 1,
-                    "WARNING: The maximum dictionary size %u is too large "
-                    "compared to the source size %u! "
-                    "size(source)/size(dictionary) = %f, but it should be >= "
-                    "10! This may lead to a subpar dictionary! We recommend "
-                    "training on sources at least 10x, and up to 100x the "
-                    "size of the dictionary!\n", (U32)maxDictSize,
-                    (U32)nbDmers, ratio);
-}
-
-COVER_epoch_info_t COVER_computeEpochs(U32 maxDictSize,
-                                       U32 nbDmers, U32 k, U32 passes)
-{
-  const U32 minEpochSize = k * 10;
-  COVER_epoch_info_t epochs;
-  epochs.num = MAX(1, maxDictSize / k / passes);
-  epochs.size = nbDmers / epochs.num;
-  if (epochs.size >= minEpochSize) {
-      assert(epochs.size * epochs.num <= nbDmers);
-      return epochs;
-  }
-  epochs.size = MIN(minEpochSize, nbDmers);
-  epochs.num = nbDmers / epochs.size;
-  assert(epochs.size * epochs.num <= nbDmers);
-  return epochs;
-}
-
-/**
- * Given the prepared context build the dictionary.
- */
-static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs,
-                                    COVER_map_t *activeDmers, void *dictBuffer,
-                                    size_t dictBufferCapacity,
-                                    ZDICT_cover_params_t parameters) {
-  BYTE *const dict = (BYTE *)dictBuffer;
-  size_t tail = dictBufferCapacity;
-  /* Divide the data into epochs. We will select one segment from each epoch. */
-  const COVER_epoch_info_t epochs = COVER_computeEpochs(
-      (U32)dictBufferCapacity, (U32)ctx->suffixSize, parameters.k, 4);
-  const size_t maxZeroScoreRun = MAX(10, MIN(100, epochs.num >> 3));
-  size_t zeroScoreRun = 0;
-  size_t epoch;
-  DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n",
-                (U32)epochs.num, (U32)epochs.size);
-  /* Loop through the epochs until there are no more segments or the dictionary
-   * is full.
-   */
-  for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num) {
-    const U32 epochBegin = (U32)(epoch * epochs.size);
-    const U32 epochEnd = epochBegin + epochs.size;
-    size_t segmentSize;
-    /* Select a segment */
-    COVER_segment_t segment = COVER_selectSegment(
-        ctx, freqs, activeDmers, epochBegin, epochEnd, parameters);
-    /* If the segment covers no dmers, then we are out of content.
-     * There may be new content in other epochs, for continue for some time.
-     */
-    if (segment.score == 0) {
-      if (++zeroScoreRun >= maxZeroScoreRun) {
-          break;
-      }
-      continue;
-    }
-    zeroScoreRun = 0;
-    /* Trim the segment if necessary and if it is too small then we are done */
-    segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail);
-    if (segmentSize < parameters.d) {
-      break;
-    }
-    /* We fill the dictionary from the back to allow the best segments to be
-     * referenced with the smallest offsets.
-     */
-    tail -= segmentSize;
-    memcpy(dict + tail, ctx->samples + segment.begin, segmentSize);
-    DISPLAYUPDATE(
-        2, "\r%u%%       ",
-        (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity));
-  }
-  DISPLAYLEVEL(2, "\r%79s\r", "");
-  return tail;
-}
-
-ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover(
-    void *dictBuffer, size_t dictBufferCapacity,
-    const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,
-    ZDICT_cover_params_t parameters)
-{
-  BYTE* const dict = (BYTE*)dictBuffer;
-  COVER_ctx_t ctx;
-  COVER_map_t activeDmers;
-  parameters.splitPoint = 1.0;
-  /* Initialize global data */
-  g_displayLevel = parameters.zParams.notificationLevel;
-  /* Checks */
-  if (!COVER_checkParameters(parameters, dictBufferCapacity)) {
-    DISPLAYLEVEL(1, "Cover parameters incorrect\n");
-    return ERROR(parameter_outOfBound);
-  }
-  if (nbSamples == 0) {
-    DISPLAYLEVEL(1, "Cover must have at least one input file\n");
-    return ERROR(srcSize_wrong);
-  }
-  if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
-    DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
-                 ZDICT_DICTSIZE_MIN);
-    return ERROR(dstSize_tooSmall);
-  }
-  /* Initialize context and activeDmers */
-  {
-    size_t const initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples,
-                      parameters.d, parameters.splitPoint);
-    if (ZSTD_isError(initVal)) {
-      return initVal;
-    }
-  }
-  COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, g_displayLevel);
-  if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
-    DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
-    COVER_ctx_destroy(&ctx);
-    return ERROR(memory_allocation);
-  }
-
-  DISPLAYLEVEL(2, "Building dictionary\n");
-  {
-    const size_t tail =
-        COVER_buildDictionary(&ctx, ctx.freqs, &activeDmers, dictBuffer,
-                              dictBufferCapacity, parameters);
-    const size_t dictionarySize = ZDICT_finalizeDictionary(
-        dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
-        samplesBuffer, samplesSizes, nbSamples, parameters.zParams);
-    if (!ZSTD_isError(dictionarySize)) {
-      DISPLAYLEVEL(2, "Constructed dictionary of size %u\n",
-                   (unsigned)dictionarySize);
-    }
-    COVER_ctx_destroy(&ctx);
-    COVER_map_destroy(&activeDmers);
-    return dictionarySize;
-  }
-}
-
-
-
-size_t COVER_checkTotalCompressedSize(const ZDICT_cover_params_t parameters,
-                                    const size_t *samplesSizes, const BYTE *samples,
-                                    size_t *offsets,
-                                    size_t nbTrainSamples, size_t nbSamples,
-                                    BYTE *const dict, size_t dictBufferCapacity) {
-  size_t totalCompressedSize = ERROR(GENERIC);
-  /* Pointers */
-  ZSTD_CCtx *cctx;
-  ZSTD_CDict *cdict;
-  void *dst;
-  /* Local variables */
-  size_t dstCapacity;
-  size_t i;
-  /* Allocate dst with enough space to compress the maximum sized sample */
-  {
-    size_t maxSampleSize = 0;
-    i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0;
-    for (; i < nbSamples; ++i) {
-      maxSampleSize = MAX(samplesSizes[i], maxSampleSize);
-    }
-    dstCapacity = ZSTD_compressBound(maxSampleSize);
-    dst = malloc(dstCapacity);
-  }
-  /* Create the cctx and cdict */
-  cctx = ZSTD_createCCtx();
-  cdict = ZSTD_createCDict(dict, dictBufferCapacity,
-                           parameters.zParams.compressionLevel);
-  if (!dst || !cctx || !cdict) {
-    goto _compressCleanup;
-  }
-  /* Compress each sample and sum their sizes (or error) */
-  totalCompressedSize = dictBufferCapacity;
-  i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0;
-  for (; i < nbSamples; ++i) {
-    const size_t size = ZSTD_compress_usingCDict(
-        cctx, dst, dstCapacity, samples + offsets[i],
-        samplesSizes[i], cdict);
-    if (ZSTD_isError(size)) {
-      totalCompressedSize = size;
-      goto _compressCleanup;
-    }
-    totalCompressedSize += size;
-  }
-_compressCleanup:
-  ZSTD_freeCCtx(cctx);
-  ZSTD_freeCDict(cdict);
-  if (dst) {
-    free(dst);
-  }
-  return totalCompressedSize;
-}
-
-
-/**
- * Initialize the `COVER_best_t`.
- */
-void COVER_best_init(COVER_best_t *best) {
-  if (best==NULL) return; /* compatible with init on NULL */
-  (void)ZSTD_pthread_mutex_init(&best->mutex, NULL);
-  (void)ZSTD_pthread_cond_init(&best->cond, NULL);
-  best->liveJobs = 0;
-  best->dict = NULL;
-  best->dictSize = 0;
-  best->compressedSize = (size_t)-1;
-  memset(&best->parameters, 0, sizeof(best->parameters));
-}
-
-/**
- * Wait until liveJobs == 0.
- */
-void COVER_best_wait(COVER_best_t *best) {
-  if (!best) {
-    return;
-  }
-  ZSTD_pthread_mutex_lock(&best->mutex);
-  while (best->liveJobs != 0) {
-    ZSTD_pthread_cond_wait(&best->cond, &best->mutex);
-  }
-  ZSTD_pthread_mutex_unlock(&best->mutex);
-}
-
-/**
- * Call COVER_best_wait() and then destroy the COVER_best_t.
- */
-void COVER_best_destroy(COVER_best_t *best) {
-  if (!best) {
-    return;
-  }
-  COVER_best_wait(best);
-  if (best->dict) {
-    free(best->dict);
-  }
-  ZSTD_pthread_mutex_destroy(&best->mutex);
-  ZSTD_pthread_cond_destroy(&best->cond);
-}
-
-/**
- * Called when a thread is about to be launched.
- * Increments liveJobs.
- */
-void COVER_best_start(COVER_best_t *best) {
-  if (!best) {
-    return;
-  }
-  ZSTD_pthread_mutex_lock(&best->mutex);
-  ++best->liveJobs;
-  ZSTD_pthread_mutex_unlock(&best->mutex);
-}
-
-/**
- * Called when a thread finishes executing, both on error or success.
- * Decrements liveJobs and signals any waiting threads if liveJobs == 0.
- * If this dictionary is the best so far save it and its parameters.
- */
-void COVER_best_finish(COVER_best_t *best, ZDICT_cover_params_t parameters,
-                              COVER_dictSelection_t selection) {
-  void* dict = selection.dictContent;
-  size_t compressedSize = selection.totalCompressedSize;
-  size_t dictSize = selection.dictSize;
-  if (!best) {
-    return;
-  }
-  {
-    size_t liveJobs;
-    ZSTD_pthread_mutex_lock(&best->mutex);
-    --best->liveJobs;
-    liveJobs = best->liveJobs;
-    /* If the new dictionary is better */
-    if (compressedSize < best->compressedSize) {
-      /* Allocate space if necessary */
-      if (!best->dict || best->dictSize < dictSize) {
-        if (best->dict) {
-          free(best->dict);
-        }
-        best->dict = malloc(dictSize);
-        if (!best->dict) {
-          best->compressedSize = ERROR(GENERIC);
-          best->dictSize = 0;
-          ZSTD_pthread_cond_signal(&best->cond);
-          ZSTD_pthread_mutex_unlock(&best->mutex);
-          return;
-        }
-      }
-      /* Save the dictionary, parameters, and size */
-      if (!dict) {
-        return;
-      }
-      memcpy(best->dict, dict, dictSize);
-      best->dictSize = dictSize;
-      best->parameters = parameters;
-      best->compressedSize = compressedSize;
-    }
-    if (liveJobs == 0) {
-      ZSTD_pthread_cond_broadcast(&best->cond);
-    }
-    ZSTD_pthread_mutex_unlock(&best->mutex);
-  }
-}
-
-COVER_dictSelection_t COVER_dictSelectionError(size_t error) {
-    COVER_dictSelection_t selection = { NULL, 0, error };
-    return selection;
-}
-
-unsigned COVER_dictSelectionIsError(COVER_dictSelection_t selection) {
-  return (ZSTD_isError(selection.totalCompressedSize) || !selection.dictContent);
-}
-
-void COVER_dictSelectionFree(COVER_dictSelection_t selection){
-  free(selection.dictContent);
-}
-
-COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent,
-        size_t dictContentSize, const BYTE* samplesBuffer, const size_t* samplesSizes, unsigned nbFinalizeSamples,
-        size_t nbCheckSamples, size_t nbSamples, ZDICT_cover_params_t params, size_t* offsets, size_t totalCompressedSize) {
-
-  size_t largestDict = 0;
-  size_t largestCompressed = 0;
-  BYTE* customDictContentEnd = customDictContent + dictContentSize;
-
-  BYTE * largestDictbuffer = (BYTE *)malloc(dictContentSize);
-  BYTE * candidateDictBuffer = (BYTE *)malloc(dictContentSize);
-  double regressionTolerance = ((double)params.shrinkDictMaxRegression / 100.0) + 1.00;
-
-  if (!largestDictbuffer || !candidateDictBuffer) {
-    free(largestDictbuffer);
-    free(candidateDictBuffer);
-    return COVER_dictSelectionError(dictContentSize);
-  }
-
-  /* Initial dictionary size and compressed size */
-  memcpy(largestDictbuffer, customDictContent, dictContentSize);
-  dictContentSize = ZDICT_finalizeDictionary(
-    largestDictbuffer, dictContentSize, customDictContent, dictContentSize,
-    samplesBuffer, samplesSizes, nbFinalizeSamples, params.zParams);
-
-  if (ZDICT_isError(dictContentSize)) {
-    free(largestDictbuffer);
-    free(candidateDictBuffer);
-    return COVER_dictSelectionError(dictContentSize);
-  }
-
-  totalCompressedSize = COVER_checkTotalCompressedSize(params, samplesSizes,
-                                                       samplesBuffer, offsets,
-                                                       nbCheckSamples, nbSamples,
-                                                       largestDictbuffer, dictContentSize);
-
-  if (ZSTD_isError(totalCompressedSize)) {
-    free(largestDictbuffer);
-    free(candidateDictBuffer);
-    return COVER_dictSelectionError(totalCompressedSize);
-  }
-
-  if (params.shrinkDict == 0) {
-    COVER_dictSelection_t selection = { largestDictbuffer, dictContentSize, totalCompressedSize };
-    free(candidateDictBuffer);
-    return selection;
-  }
-
-  largestDict = dictContentSize;
-  largestCompressed = totalCompressedSize;
-  dictContentSize = ZDICT_DICTSIZE_MIN;
-
-  /* Largest dict is initially at least ZDICT_DICTSIZE_MIN */
-  while (dictContentSize < largestDict) {
-    memcpy(candidateDictBuffer, largestDictbuffer, largestDict);
-    dictContentSize = ZDICT_finalizeDictionary(
-      candidateDictBuffer, dictContentSize, customDictContentEnd - dictContentSize, dictContentSize,
-      samplesBuffer, samplesSizes, nbFinalizeSamples, params.zParams);
-
-    if (ZDICT_isError(dictContentSize)) {
-      free(largestDictbuffer);
-      free(candidateDictBuffer);
-      return COVER_dictSelectionError(dictContentSize);
-
-    }
-
-    totalCompressedSize = COVER_checkTotalCompressedSize(params, samplesSizes,
-                                                         samplesBuffer, offsets,
-                                                         nbCheckSamples, nbSamples,
-                                                         candidateDictBuffer, dictContentSize);
-
-    if (ZSTD_isError(totalCompressedSize)) {
-      free(largestDictbuffer);
-      free(candidateDictBuffer);
-      return COVER_dictSelectionError(totalCompressedSize);
-    }
-
-    if (totalCompressedSize <= largestCompressed * regressionTolerance) {
-      COVER_dictSelection_t selection = { candidateDictBuffer, dictContentSize, totalCompressedSize };
-      free(largestDictbuffer);
-      return selection;
-    }
-    dictContentSize *= 2;
-  }
-  dictContentSize = largestDict;
-  totalCompressedSize = largestCompressed;
-  {
-    COVER_dictSelection_t selection = { largestDictbuffer, dictContentSize, totalCompressedSize };
-    free(candidateDictBuffer);
-    return selection;
-  }
-}
-
-/**
- * Parameters for COVER_tryParameters().
- */
-typedef struct COVER_tryParameters_data_s {
-  const COVER_ctx_t *ctx;
-  COVER_best_t *best;
-  size_t dictBufferCapacity;
-  ZDICT_cover_params_t parameters;
-} COVER_tryParameters_data_t;
-
-/**
- * Tries a set of parameters and updates the COVER_best_t with the results.
- * This function is thread safe if zstd is compiled with multithreaded support.
- * It takes its parameters as an *OWNING* opaque pointer to support threading.
- */
-static void COVER_tryParameters(void *opaque) {
-  /* Save parameters as local variables */
-  COVER_tryParameters_data_t *const data = (COVER_tryParameters_data_t *)opaque;
-  const COVER_ctx_t *const ctx = data->ctx;
-  const ZDICT_cover_params_t parameters = data->parameters;
-  size_t dictBufferCapacity = data->dictBufferCapacity;
-  size_t totalCompressedSize = ERROR(GENERIC);
-  /* Allocate space for hash table, dict, and freqs */
-  COVER_map_t activeDmers;
-  BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity);
-  COVER_dictSelection_t selection = COVER_dictSelectionError(ERROR(GENERIC));
-  U32 *freqs = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
-  if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
-    DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
-    goto _cleanup;
-  }
-  if (!dict || !freqs) {
-    DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n");
-    goto _cleanup;
-  }
-  /* Copy the frequencies because we need to modify them */
-  memcpy(freqs, ctx->freqs, ctx->suffixSize * sizeof(U32));
-  /* Build the dictionary */
-  {
-    const size_t tail = COVER_buildDictionary(ctx, freqs, &activeDmers, dict,
-                                              dictBufferCapacity, parameters);
-    selection = COVER_selectDict(dict + tail, dictBufferCapacity - tail,
-        ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbTrainSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets,
-        totalCompressedSize);
-
-    if (COVER_dictSelectionIsError(selection)) {
-      DISPLAYLEVEL(1, "Failed to select dictionary\n");
-      goto _cleanup;
-    }
-  }
-_cleanup:
-  free(dict);
-  COVER_best_finish(data->best, parameters, selection);
-  free(data);
-  COVER_map_destroy(&activeDmers);
-  COVER_dictSelectionFree(selection);
-  if (freqs) {
-    free(freqs);
-  }
-}
-
-ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover(
-    void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer,
-    const size_t *samplesSizes, unsigned nbSamples,
-    ZDICT_cover_params_t *parameters) {
-  /* constants */
-  const unsigned nbThreads = parameters->nbThreads;
-  const double splitPoint =
-      parameters->splitPoint <= 0.0 ? DEFAULT_SPLITPOINT : parameters->splitPoint;
-  const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d;
-  const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d;
-  const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k;
-  const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k;
-  const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps;
-  const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1);
-  const unsigned kIterations =
-      (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);
-  const unsigned shrinkDict = 0;
-  /* Local variables */
-  const int displayLevel = parameters->zParams.notificationLevel;
-  unsigned iteration = 1;
-  unsigned d;
-  unsigned k;
-  COVER_best_t best;
-  POOL_ctx *pool = NULL;
-  int warned = 0;
-
-  /* Checks */
-  if (splitPoint <= 0 || splitPoint > 1) {
-    LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n");
-    return ERROR(parameter_outOfBound);
-  }
-  if (kMinK < kMaxD || kMaxK < kMinK) {
-    LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n");
-    return ERROR(parameter_outOfBound);
-  }
-  if (nbSamples == 0) {
-    DISPLAYLEVEL(1, "Cover must have at least one input file\n");
-    return ERROR(srcSize_wrong);
-  }
-  if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
-    DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
-                 ZDICT_DICTSIZE_MIN);
-    return ERROR(dstSize_tooSmall);
-  }
-  if (nbThreads > 1) {
-    pool = POOL_create(nbThreads, 1);
-    if (!pool) {
-      return ERROR(memory_allocation);
-    }
-  }
-  /* Initialization */
-  COVER_best_init(&best);
-  /* Turn down global display level to clean up display at level 2 and below */
-  g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1;
-  /* Loop through d first because each new value needs a new context */
-  LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n",
-                    kIterations);
-  for (d = kMinD; d <= kMaxD; d += 2) {
-    /* Initialize the context for this value of d */
-    COVER_ctx_t ctx;
-    LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d);
-    {
-      const size_t initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint);
-      if (ZSTD_isError(initVal)) {
-        LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n");
-        COVER_best_destroy(&best);
-        POOL_free(pool);
-        return initVal;
-      }
-    }
-    if (!warned) {
-      COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, displayLevel);
-      warned = 1;
-    }
-    /* Loop through k reusing the same context */
-    for (k = kMinK; k <= kMaxK; k += kStepSize) {
-      /* Prepare the arguments */
-      COVER_tryParameters_data_t *data = (COVER_tryParameters_data_t *)malloc(
-          sizeof(COVER_tryParameters_data_t));
-      LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k);
-      if (!data) {
-        LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n");
-        COVER_best_destroy(&best);
-        COVER_ctx_destroy(&ctx);
-        POOL_free(pool);
-        return ERROR(memory_allocation);
-      }
-      data->ctx = &ctx;
-      data->best = &best;
-      data->dictBufferCapacity = dictBufferCapacity;
-      data->parameters = *parameters;
-      data->parameters.k = k;
-      data->parameters.d = d;
-      data->parameters.splitPoint = splitPoint;
-      data->parameters.steps = kSteps;
-      data->parameters.shrinkDict = shrinkDict;
-      data->parameters.zParams.notificationLevel = g_displayLevel;
-      /* Check the parameters */
-      if (!COVER_checkParameters(data->parameters, dictBufferCapacity)) {
-        DISPLAYLEVEL(1, "Cover parameters incorrect\n");
-        free(data);
-        continue;
-      }
-      /* Call the function and pass ownership of data to it */
-      COVER_best_start(&best);
-      if (pool) {
-        POOL_add(pool, &COVER_tryParameters, data);
-      } else {
-        COVER_tryParameters(data);
-      }
-      /* Print status */
-      LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%%       ",
-                         (unsigned)((iteration * 100) / kIterations));
-      ++iteration;
-    }
-    COVER_best_wait(&best);
-    COVER_ctx_destroy(&ctx);
-  }
-  LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", "");
-  /* Fill the output buffer and parameters with output of the best parameters */
-  {
-    const size_t dictSize = best.dictSize;
-    if (ZSTD_isError(best.compressedSize)) {
-      const size_t compressedSize = best.compressedSize;
-      COVER_best_destroy(&best);
-      POOL_free(pool);
-      return compressedSize;
-    }
-    *parameters = best.parameters;
-    memcpy(dictBuffer, best.dict, dictSize);
-    COVER_best_destroy(&best);
-    POOL_free(pool);
-    return dictSize;
-  }
-}
diff --git a/vendor/github.com/DataDog/zstd/cover.h b/vendor/github.com/DataDog/zstd/cover.h
deleted file mode 100644
index d9e0636..0000000
--- a/vendor/github.com/DataDog/zstd/cover.h
+++ /dev/null
@@ -1,147 +0,0 @@
-#include <stdio.h>  /* fprintf */
-#include <stdlib.h> /* malloc, free, qsort */
-#include <string.h> /* memset */
-#include <time.h>   /* clock */
-#include "mem.h" /* read */
-#include "pool.h"
-#include "threading.h"
-#include "zstd_internal.h" /* includes zstd.h */
-#ifndef ZDICT_STATIC_LINKING_ONLY
-#define ZDICT_STATIC_LINKING_ONLY
-#endif
-#include "zdict.h"
-
-/**
- * COVER_best_t is used for two purposes:
- * 1. Synchronizing threads.
- * 2. Saving the best parameters and dictionary.
- *
- * All of the methods except COVER_best_init() are thread safe if zstd is
- * compiled with multithreaded support.
- */
-typedef struct COVER_best_s {
-  ZSTD_pthread_mutex_t mutex;
-  ZSTD_pthread_cond_t cond;
-  size_t liveJobs;
-  void *dict;
-  size_t dictSize;
-  ZDICT_cover_params_t parameters;
-  size_t compressedSize;
-} COVER_best_t;
-
-/**
- * A segment is a range in the source as well as the score of the segment.
- */
-typedef struct {
-  U32 begin;
-  U32 end;
-  U32 score;
-} COVER_segment_t;
-
-/**
- *Number of epochs and size of each epoch.
- */
-typedef struct {
-  U32 num;
-  U32 size;
-} COVER_epoch_info_t;
-
-/**
- * Struct used for the dictionary selection function.
- */
-typedef struct COVER_dictSelection {
-  BYTE* dictContent;
-  size_t dictSize;
-  size_t totalCompressedSize;
-} COVER_dictSelection_t;
-
-/**
- * Computes the number of epochs and the size of each epoch.
- * We will make sure that each epoch gets at least 10 * k bytes.
- *
- * The COVER algorithms divide the data up into epochs of equal size and
- * select one segment from each epoch.
- *
- * @param maxDictSize The maximum allowed dictionary size.
- * @param nbDmers     The number of dmers we are training on.
- * @param k           The parameter k (segment size).
- * @param passes      The target number of passes over the dmer corpus.
- *                    More passes means a better dictionary.
- */
-COVER_epoch_info_t COVER_computeEpochs(U32 maxDictSize, U32 nbDmers,
-                                       U32 k, U32 passes);
-
-/**
- * Warns the user when their corpus is too small.
- */
-void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel);
-
-/**
- *  Checks total compressed size of a dictionary
- */
-size_t COVER_checkTotalCompressedSize(const ZDICT_cover_params_t parameters,
-                                      const size_t *samplesSizes, const BYTE *samples,
-                                      size_t *offsets,
-                                      size_t nbTrainSamples, size_t nbSamples,
-                                      BYTE *const dict, size_t dictBufferCapacity);
-
-/**
- * Returns the sum of the sample sizes.
- */
-size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) ;
-
-/**
- * Initialize the `COVER_best_t`.
- */
-void COVER_best_init(COVER_best_t *best);
-
-/**
- * Wait until liveJobs == 0.
- */
-void COVER_best_wait(COVER_best_t *best);
-
-/**
- * Call COVER_best_wait() and then destroy the COVER_best_t.
- */
-void COVER_best_destroy(COVER_best_t *best);
-
-/**
- * Called when a thread is about to be launched.
- * Increments liveJobs.
- */
-void COVER_best_start(COVER_best_t *best);
-
-/**
- * Called when a thread finishes executing, both on error or success.
- * Decrements liveJobs and signals any waiting threads if liveJobs == 0.
- * If this dictionary is the best so far save it and its parameters.
- */
-void COVER_best_finish(COVER_best_t *best, ZDICT_cover_params_t parameters,
-                       COVER_dictSelection_t selection);
-/**
- * Error function for COVER_selectDict function. Checks if the return
- * value is an error.
- */
-unsigned COVER_dictSelectionIsError(COVER_dictSelection_t selection);
-
- /**
-  * Error function for COVER_selectDict function. Returns a struct where
-  * return.totalCompressedSize is a ZSTD error.
-  */
-COVER_dictSelection_t COVER_dictSelectionError(size_t error);
-
-/**
- * Always call after selectDict is called to free up used memory from
- * newly created dictionary.
- */
-void COVER_dictSelectionFree(COVER_dictSelection_t selection);
-
-/**
- * Called to finalize the dictionary and select one based on whether or not
- * the shrink-dict flag was enabled. If enabled the dictionary used is the
- * smallest dictionary within a specified regression of the compressed size
- * from the largest dictionary.
- */
- COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent,
-                       size_t dictContentSize, const BYTE* samplesBuffer, const size_t* samplesSizes, unsigned nbFinalizeSamples,
-                       size_t nbCheckSamples, size_t nbSamples, ZDICT_cover_params_t params, size_t* offsets, size_t totalCompressedSize);
diff --git a/vendor/github.com/DataDog/zstd/cpu.h b/vendor/github.com/DataDog/zstd/cpu.h
deleted file mode 100644
index 5f0923f..0000000
--- a/vendor/github.com/DataDog/zstd/cpu.h
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * Copyright (c) 2018-present, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef ZSTD_COMMON_CPU_H
-#define ZSTD_COMMON_CPU_H
-
-/**
- * Implementation taken from folly/CpuId.h
- * https://github.com/facebook/folly/blob/master/folly/CpuId.h
- */
-
-#include <string.h>
-
-#include "mem.h"
-
-#ifdef _MSC_VER
-#include <intrin.h>
-#endif
-
-typedef struct {
-    U32 f1c;
-    U32 f1d;
-    U32 f7b;
-    U32 f7c;
-} ZSTD_cpuid_t;
-
-MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) {
-    U32 f1c = 0;
-    U32 f1d = 0;
-    U32 f7b = 0;
-    U32 f7c = 0;
-#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86))
-    int reg[4];
-    __cpuid((int*)reg, 0);
-    {
-        int const n = reg[0];
-        if (n >= 1) {
-            __cpuid((int*)reg, 1);
-            f1c = (U32)reg[2];
-            f1d = (U32)reg[3];
-        }
-        if (n >= 7) {
-            __cpuidex((int*)reg, 7, 0);
-            f7b = (U32)reg[1];
-            f7c = (U32)reg[2];
-        }
-    }
-#elif defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__)
-    /* The following block like the normal cpuid branch below, but gcc
-     * reserves ebx for use of its pic register so we must specially
-     * handle the save and restore to avoid clobbering the register
-     */
-    U32 n;
-    __asm__(
-        "pushl %%ebx\n\t"
-        "cpuid\n\t"
-        "popl %%ebx\n\t"
-        : "=a"(n)
-        : "a"(0)
-        : "ecx", "edx");
-    if (n >= 1) {
-      U32 f1a;
-      __asm__(
-          "pushl %%ebx\n\t"
-          "cpuid\n\t"
-          "popl %%ebx\n\t"
-          : "=a"(f1a), "=c"(f1c), "=d"(f1d)
-          : "a"(1));
-    }
-    if (n >= 7) {
-      __asm__(
-          "pushl %%ebx\n\t"
-          "cpuid\n\t"
-          "movl %%ebx, %%eax\n\t"
-          "popl %%ebx"
-          : "=a"(f7b), "=c"(f7c)
-          : "a"(7), "c"(0)
-          : "edx");
-    }
-#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__)
-    U32 n;
-    __asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "ecx", "edx");
-    if (n >= 1) {
-      U32 f1a;
-      __asm__("cpuid" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1) : "ebx");
-    }
-    if (n >= 7) {
-      U32 f7a;
-      __asm__("cpuid"
-              : "=a"(f7a), "=b"(f7b), "=c"(f7c)
-              : "a"(7), "c"(0)
-              : "edx");
-    }
-#endif
-    {
-        ZSTD_cpuid_t cpuid;
-        cpuid.f1c = f1c;
-        cpuid.f1d = f1d;
-        cpuid.f7b = f7b;
-        cpuid.f7c = f7c;
-        return cpuid;
-    }
-}
-
-#define X(name, r, bit)                                                        \
-  MEM_STATIC int ZSTD_cpuid_##name(ZSTD_cpuid_t const cpuid) {                 \
-    return ((cpuid.r) & (1U << bit)) != 0;                                     \
-  }
-
-/* cpuid(1): Processor Info and Feature Bits. */
-#define C(name, bit) X(name, f1c, bit)
-  C(sse3, 0)
-  C(pclmuldq, 1)
-  C(dtes64, 2)
-  C(monitor, 3)
-  C(dscpl, 4)
-  C(vmx, 5)
-  C(smx, 6)
-  C(eist, 7)
-  C(tm2, 8)
-  C(ssse3, 9)
-  C(cnxtid, 10)
-  C(fma, 12)
-  C(cx16, 13)
-  C(xtpr, 14)
-  C(pdcm, 15)
-  C(pcid, 17)
-  C(dca, 18)
-  C(sse41, 19)
-  C(sse42, 20)
-  C(x2apic, 21)
-  C(movbe, 22)
-  C(popcnt, 23)
-  C(tscdeadline, 24)
-  C(aes, 25)
-  C(xsave, 26)
-  C(osxsave, 27)
-  C(avx, 28)
-  C(f16c, 29)
-  C(rdrand, 30)
-#undef C
-#define D(name, bit) X(name, f1d, bit)
-  D(fpu, 0)
-  D(vme, 1)
-  D(de, 2)
-  D(pse, 3)
-  D(tsc, 4)
-  D(msr, 5)
-  D(pae, 6)
-  D(mce, 7)
-  D(cx8, 8)
-  D(apic, 9)
-  D(sep, 11)
-  D(mtrr, 12)
-  D(pge, 13)
-  D(mca, 14)
-  D(cmov, 15)
-  D(pat, 16)
-  D(pse36, 17)
-  D(psn, 18)
-  D(clfsh, 19)
-  D(ds, 21)
-  D(acpi, 22)
-  D(mmx, 23)
-  D(fxsr, 24)
-  D(sse, 25)
-  D(sse2, 26)
-  D(ss, 27)
-  D(htt, 28)
-  D(tm, 29)
-  D(pbe, 31)
-#undef D
-
-/* cpuid(7): Extended Features. */
-#define B(name, bit) X(name, f7b, bit)
-  B(bmi1, 3)
-  B(hle, 4)
-  B(avx2, 5)
-  B(smep, 7)
-  B(bmi2, 8)
-  B(erms, 9)
-  B(invpcid, 10)
-  B(rtm, 11)
-  B(mpx, 14)
-  B(avx512f, 16)
-  B(avx512dq, 17)
-  B(rdseed, 18)
-  B(adx, 19)
-  B(smap, 20)
-  B(avx512ifma, 21)
-  B(pcommit, 22)
-  B(clflushopt, 23)
-  B(clwb, 24)
-  B(avx512pf, 26)
-  B(avx512er, 27)
-  B(avx512cd, 28)
-  B(sha, 29)
-  B(avx512bw, 30)
-  B(avx512vl, 31)
-#undef B
-#define C(name, bit) X(name, f7c, bit)
-  C(prefetchwt1, 0)
-  C(avx512vbmi, 1)
-#undef C
-
-#undef X
-
-#endif /* ZSTD_COMMON_CPU_H */
diff --git a/vendor/github.com/DataDog/zstd/debug.c b/vendor/github.com/DataDog/zstd/debug.c
deleted file mode 100644
index 3ebdd1c..0000000
--- a/vendor/github.com/DataDog/zstd/debug.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/* ******************************************************************
-   debug
-   Part of FSE library
-   Copyright (C) 2013-present, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-****************************************************************** */
-
-
-/*
- * This module only hosts one global variable
- * which can be used to dynamically influence the verbosity of traces,
- * such as DEBUGLOG and RAWLOG
- */
-
-#include "debug.h"
-
-int g_debuglevel = DEBUGLEVEL;
diff --git a/vendor/github.com/DataDog/zstd/debug.h b/vendor/github.com/DataDog/zstd/debug.h
deleted file mode 100644
index b4fc89d..0000000
--- a/vendor/github.com/DataDog/zstd/debug.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/* ******************************************************************
-   debug
-   Part of FSE library
-   Copyright (C) 2013-present, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-****************************************************************** */
-
-
-/*
- * The purpose of this header is to enable debug functions.
- * They regroup assert(), DEBUGLOG() and RAWLOG() for run-time,
- * and DEBUG_STATIC_ASSERT() for compile-time.
- *
- * By default, DEBUGLEVEL==0, which means run-time debug is disabled.
- *
- * Level 1 enables assert() only.
- * Starting level 2, traces can be generated and pushed to stderr.
- * The higher the level, the more verbose the traces.
- *
- * It's possible to dynamically adjust level using variable g_debug_level,
- * which is only declared if DEBUGLEVEL>=2,
- * and is a global variable, not multi-thread protected (use with care)
- */
-
-#ifndef DEBUG_H_12987983217
-#define DEBUG_H_12987983217
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-/* static assert is triggered at compile time, leaving no runtime artefact.
- * static assert only works with compile-time constants.
- * Also, this variant can only be used inside a function. */
-#define DEBUG_STATIC_ASSERT(c) (void)sizeof(char[(c) ? 1 : -1])
-
-
-/* DEBUGLEVEL is expected to be defined externally,
- * typically through compiler command line.
- * Value must be a number. */
-#ifndef DEBUGLEVEL
-#  define DEBUGLEVEL 0
-#endif
-
-
-/* DEBUGFILE can be defined externally,
- * typically through compiler command line.
- * note : currently useless.
- * Value must be stderr or stdout */
-#ifndef DEBUGFILE
-#  define DEBUGFILE stderr
-#endif
-
-
-/* recommended values for DEBUGLEVEL :
- * 0 : release mode, no debug, all run-time checks disabled
- * 1 : enables assert() only, no display
- * 2 : reserved, for currently active debug path
- * 3 : events once per object lifetime (CCtx, CDict, etc.)
- * 4 : events once per frame
- * 5 : events once per block
- * 6 : events once per sequence (verbose)
- * 7+: events at every position (*very* verbose)
- *
- * It's generally inconvenient to output traces > 5.
- * In which case, it's possible to selectively trigger high verbosity levels
- * by modifying g_debug_level.
- */
-
-#if (DEBUGLEVEL>=1)
-#  include <assert.h>
-#else
-#  ifndef assert   /* assert may be already defined, due to prior #include <assert.h> */
-#    define assert(condition) ((void)0)   /* disable assert (default) */
-#  endif
-#endif
-
-#if (DEBUGLEVEL>=2)
-#  include <stdio.h>
-extern int g_debuglevel; /* the variable is only declared,
-                            it actually lives in debug.c,
-                            and is shared by the whole process.
-                            It's not thread-safe.
-                            It's useful when enabling very verbose levels
-                            on selective conditions (such as position in src) */
-
-#  define RAWLOG(l, ...) {                                      \
-                if (l<=g_debuglevel) {                          \
-                    fprintf(stderr, __VA_ARGS__);               \
-            }   }
-#  define DEBUGLOG(l, ...) {                                    \
-                if (l<=g_debuglevel) {                          \
-                    fprintf(stderr, __FILE__ ": " __VA_ARGS__); \
-                    fprintf(stderr, " \n");                     \
-            }   }
-#else
-#  define RAWLOG(l, ...)      {}    /* disabled */
-#  define DEBUGLOG(l, ...)    {}    /* disabled */
-#endif
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* DEBUG_H_12987983217 */
diff --git a/vendor/github.com/DataDog/zstd/divsufsort.c b/vendor/github.com/DataDog/zstd/divsufsort.c
deleted file mode 100644
index ead9220..0000000
--- a/vendor/github.com/DataDog/zstd/divsufsort.c
+++ /dev/null
@@ -1,1913 +0,0 @@
-/*
- * divsufsort.c for libdivsufsort-lite
- * Copyright (c) 2003-2008 Yuta Mori All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*- Compiler specifics -*/
-#ifdef __clang__
-#pragma clang diagnostic ignored "-Wshorten-64-to-32"
-#endif
-
-#if defined(_MSC_VER)
-#  pragma warning(disable : 4244)
-#  pragma warning(disable : 4127)    /* C4127 : Condition expression is constant */
-#endif
-
-
-/*- Dependencies -*/
-#include <assert.h>
-#include <stdio.h>
-#include <stdlib.h>
-
-#include "divsufsort.h"
-
-/*- Constants -*/
-#if defined(INLINE)
-# undef INLINE
-#endif
-#if !defined(INLINE)
-# define INLINE __inline
-#endif
-#if defined(ALPHABET_SIZE) && (ALPHABET_SIZE < 1)
-# undef ALPHABET_SIZE
-#endif
-#if !defined(ALPHABET_SIZE)
-# define ALPHABET_SIZE (256)
-#endif
-#define BUCKET_A_SIZE (ALPHABET_SIZE)
-#define BUCKET_B_SIZE (ALPHABET_SIZE * ALPHABET_SIZE)
-#if defined(SS_INSERTIONSORT_THRESHOLD)
-# if SS_INSERTIONSORT_THRESHOLD < 1
-#  undef SS_INSERTIONSORT_THRESHOLD
-#  define SS_INSERTIONSORT_THRESHOLD (1)
-# endif
-#else
-# define SS_INSERTIONSORT_THRESHOLD (8)
-#endif
-#if defined(SS_BLOCKSIZE)
-# if SS_BLOCKSIZE < 0
-#  undef SS_BLOCKSIZE
-#  define SS_BLOCKSIZE (0)
-# elif 32768 <= SS_BLOCKSIZE
-#  undef SS_BLOCKSIZE
-#  define SS_BLOCKSIZE (32767)
-# endif
-#else
-# define SS_BLOCKSIZE (1024)
-#endif
-/* minstacksize = log(SS_BLOCKSIZE) / log(3) * 2 */
-#if SS_BLOCKSIZE == 0
-# define SS_MISORT_STACKSIZE (96)
-#elif SS_BLOCKSIZE <= 4096
-# define SS_MISORT_STACKSIZE (16)
-#else
-# define SS_MISORT_STACKSIZE (24)
-#endif
-#define SS_SMERGE_STACKSIZE (32)
-#define TR_INSERTIONSORT_THRESHOLD (8)
-#define TR_STACKSIZE (64)
-
-
-/*- Macros -*/
-#ifndef SWAP
-# define SWAP(_a, _b) do { t = (_a); (_a) = (_b); (_b) = t; } while(0)
-#endif /* SWAP */
-#ifndef MIN
-# define MIN(_a, _b) (((_a) < (_b)) ? (_a) : (_b))
-#endif /* MIN */
-#ifndef MAX
-# define MAX(_a, _b) (((_a) > (_b)) ? (_a) : (_b))
-#endif /* MAX */
-#define STACK_PUSH(_a, _b, _c, _d)\
-  do {\
-    assert(ssize < STACK_SIZE);\
-    stack[ssize].a = (_a), stack[ssize].b = (_b),\
-    stack[ssize].c = (_c), stack[ssize++].d = (_d);\
-  } while(0)
-#define STACK_PUSH5(_a, _b, _c, _d, _e)\
-  do {\
-    assert(ssize < STACK_SIZE);\
-    stack[ssize].a = (_a), stack[ssize].b = (_b),\
-    stack[ssize].c = (_c), stack[ssize].d = (_d), stack[ssize++].e = (_e);\
-  } while(0)
-#define STACK_POP(_a, _b, _c, _d)\
-  do {\
-    assert(0 <= ssize);\
-    if(ssize == 0) { return; }\
-    (_a) = stack[--ssize].a, (_b) = stack[ssize].b,\
-    (_c) = stack[ssize].c, (_d) = stack[ssize].d;\
-  } while(0)
-#define STACK_POP5(_a, _b, _c, _d, _e)\
-  do {\
-    assert(0 <= ssize);\
-    if(ssize == 0) { return; }\
-    (_a) = stack[--ssize].a, (_b) = stack[ssize].b,\
-    (_c) = stack[ssize].c, (_d) = stack[ssize].d, (_e) = stack[ssize].e;\
-  } while(0)
-#define BUCKET_A(_c0) bucket_A[(_c0)]
-#if ALPHABET_SIZE == 256
-#define BUCKET_B(_c0, _c1) (bucket_B[((_c1) << 8) | (_c0)])
-#define BUCKET_BSTAR(_c0, _c1) (bucket_B[((_c0) << 8) | (_c1)])
-#else
-#define BUCKET_B(_c0, _c1) (bucket_B[(_c1) * ALPHABET_SIZE + (_c0)])
-#define BUCKET_BSTAR(_c0, _c1) (bucket_B[(_c0) * ALPHABET_SIZE + (_c1)])
-#endif
-
-
-/*- Private Functions -*/
-
-static const int lg_table[256]= {
- -1,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
-  5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
-  6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
-  6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
-  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
-  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
-  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
-  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7
-};
-
-#if (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE)
-
-static INLINE
-int
-ss_ilg(int n) {
-#if SS_BLOCKSIZE == 0
-  return (n & 0xffff0000) ?
-          ((n & 0xff000000) ?
-            24 + lg_table[(n >> 24) & 0xff] :
-            16 + lg_table[(n >> 16) & 0xff]) :
-          ((n & 0x0000ff00) ?
-             8 + lg_table[(n >>  8) & 0xff] :
-             0 + lg_table[(n >>  0) & 0xff]);
-#elif SS_BLOCKSIZE < 256
-  return lg_table[n];
-#else
-  return (n & 0xff00) ?
-          8 + lg_table[(n >> 8) & 0xff] :
-          0 + lg_table[(n >> 0) & 0xff];
-#endif
-}
-
-#endif /* (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) */
-
-#if SS_BLOCKSIZE != 0
-
-static const int sqq_table[256] = {
-  0,  16,  22,  27,  32,  35,  39,  42,  45,  48,  50,  53,  55,  57,  59,  61,
- 64,  65,  67,  69,  71,  73,  75,  76,  78,  80,  81,  83,  84,  86,  87,  89,
- 90,  91,  93,  94,  96,  97,  98,  99, 101, 102, 103, 104, 106, 107, 108, 109,
-110, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126,
-128, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
-143, 144, 144, 145, 146, 147, 148, 149, 150, 150, 151, 152, 153, 154, 155, 155,
-156, 157, 158, 159, 160, 160, 161, 162, 163, 163, 164, 165, 166, 167, 167, 168,
-169, 170, 170, 171, 172, 173, 173, 174, 175, 176, 176, 177, 178, 178, 179, 180,
-181, 181, 182, 183, 183, 184, 185, 185, 186, 187, 187, 188, 189, 189, 190, 191,
-192, 192, 193, 193, 194, 195, 195, 196, 197, 197, 198, 199, 199, 200, 201, 201,
-202, 203, 203, 204, 204, 205, 206, 206, 207, 208, 208, 209, 209, 210, 211, 211,
-212, 212, 213, 214, 214, 215, 215, 216, 217, 217, 218, 218, 219, 219, 220, 221,
-221, 222, 222, 223, 224, 224, 225, 225, 226, 226, 227, 227, 228, 229, 229, 230,
-230, 231, 231, 232, 232, 233, 234, 234, 235, 235, 236, 236, 237, 237, 238, 238,
-239, 240, 240, 241, 241, 242, 242, 243, 243, 244, 244, 245, 245, 246, 246, 247,
-247, 248, 248, 249, 249, 250, 250, 251, 251, 252, 252, 253, 253, 254, 254, 255
-};
-
-static INLINE
-int
-ss_isqrt(int x) {
-  int y, e;
-
-  if(x >= (SS_BLOCKSIZE * SS_BLOCKSIZE)) { return SS_BLOCKSIZE; }
-  e = (x & 0xffff0000) ?
-        ((x & 0xff000000) ?
-          24 + lg_table[(x >> 24) & 0xff] :
-          16 + lg_table[(x >> 16) & 0xff]) :
-        ((x & 0x0000ff00) ?
-           8 + lg_table[(x >>  8) & 0xff] :
-           0 + lg_table[(x >>  0) & 0xff]);
-
-  if(e >= 16) {
-    y = sqq_table[x >> ((e - 6) - (e & 1))] << ((e >> 1) - 7);
-    if(e >= 24) { y = (y + 1 + x / y) >> 1; }
-    y = (y + 1 + x / y) >> 1;
-  } else if(e >= 8) {
-    y = (sqq_table[x >> ((e - 6) - (e & 1))] >> (7 - (e >> 1))) + 1;
-  } else {
-    return sqq_table[x] >> 4;
-  }
-
-  return (x < (y * y)) ? y - 1 : y;
-}
-
-#endif /* SS_BLOCKSIZE != 0 */
-
-
-/*---------------------------------------------------------------------------*/
-
-/* Compares two suffixes. */
-static INLINE
-int
-ss_compare(const unsigned char *T,
-           const int *p1, const int *p2,
-           int depth) {
-  const unsigned char *U1, *U2, *U1n, *U2n;
-
-  for(U1 = T + depth + *p1,
-      U2 = T + depth + *p2,
-      U1n = T + *(p1 + 1) + 2,
-      U2n = T + *(p2 + 1) + 2;
-      (U1 < U1n) && (U2 < U2n) && (*U1 == *U2);
-      ++U1, ++U2) {
-  }
-
-  return U1 < U1n ?
-        (U2 < U2n ? *U1 - *U2 : 1) :
-        (U2 < U2n ? -1 : 0);
-}
-
-
-/*---------------------------------------------------------------------------*/
-
-#if (SS_BLOCKSIZE != 1) && (SS_INSERTIONSORT_THRESHOLD != 1)
-
-/* Insertionsort for small size groups */
-static
-void
-ss_insertionsort(const unsigned char *T, const int *PA,
-                 int *first, int *last, int depth) {
-  int *i, *j;
-  int t;
-  int r;
-
-  for(i = last - 2; first <= i; --i) {
-    for(t = *i, j = i + 1; 0 < (r = ss_compare(T, PA + t, PA + *j, depth));) {
-      do { *(j - 1) = *j; } while((++j < last) && (*j < 0));
-      if(last <= j) { break; }
-    }
-    if(r == 0) { *j = ~*j; }
-    *(j - 1) = t;
-  }
-}
-
-#endif /* (SS_BLOCKSIZE != 1) && (SS_INSERTIONSORT_THRESHOLD != 1) */
-
-
-/*---------------------------------------------------------------------------*/
-
-#if (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE)
-
-static INLINE
-void
-ss_fixdown(const unsigned char *Td, const int *PA,
-           int *SA, int i, int size) {
-  int j, k;
-  int v;
-  int c, d, e;
-
-  for(v = SA[i], c = Td[PA[v]]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) {
-    d = Td[PA[SA[k = j++]]];
-    if(d < (e = Td[PA[SA[j]]])) { k = j; d = e; }
-    if(d <= c) { break; }
-  }
-  SA[i] = v;
-}
-
-/* Simple top-down heapsort. */
-static
-void
-ss_heapsort(const unsigned char *Td, const int *PA, int *SA, int size) {
-  int i, m;
-  int t;
-
-  m = size;
-  if((size % 2) == 0) {
-    m--;
-    if(Td[PA[SA[m / 2]]] < Td[PA[SA[m]]]) { SWAP(SA[m], SA[m / 2]); }
-  }
-
-  for(i = m / 2 - 1; 0 <= i; --i) { ss_fixdown(Td, PA, SA, i, m); }
-  if((size % 2) == 0) { SWAP(SA[0], SA[m]); ss_fixdown(Td, PA, SA, 0, m); }
-  for(i = m - 1; 0 < i; --i) {
-    t = SA[0], SA[0] = SA[i];
-    ss_fixdown(Td, PA, SA, 0, i);
-    SA[i] = t;
-  }
-}
-
-
-/*---------------------------------------------------------------------------*/
-
-/* Returns the median of three elements. */
-static INLINE
-int *
-ss_median3(const unsigned char *Td, const int *PA,
-           int *v1, int *v2, int *v3) {
-  int *t;
-  if(Td[PA[*v1]] > Td[PA[*v2]]) { SWAP(v1, v2); }
-  if(Td[PA[*v2]] > Td[PA[*v3]]) {
-    if(Td[PA[*v1]] > Td[PA[*v3]]) { return v1; }
-    else { return v3; }
-  }
-  return v2;
-}
-
-/* Returns the median of five elements. */
-static INLINE
-int *
-ss_median5(const unsigned char *Td, const int *PA,
-           int *v1, int *v2, int *v3, int *v4, int *v5) {
-  int *t;
-  if(Td[PA[*v2]] > Td[PA[*v3]]) { SWAP(v2, v3); }
-  if(Td[PA[*v4]] > Td[PA[*v5]]) { SWAP(v4, v5); }
-  if(Td[PA[*v2]] > Td[PA[*v4]]) { SWAP(v2, v4); SWAP(v3, v5); }
-  if(Td[PA[*v1]] > Td[PA[*v3]]) { SWAP(v1, v3); }
-  if(Td[PA[*v1]] > Td[PA[*v4]]) { SWAP(v1, v4); SWAP(v3, v5); }
-  if(Td[PA[*v3]] > Td[PA[*v4]]) { return v4; }
-  return v3;
-}
-
-/* Returns the pivot element. */
-static INLINE
-int *
-ss_pivot(const unsigned char *Td, const int *PA, int *first, int *last) {
-  int *middle;
-  int t;
-
-  t = last - first;
-  middle = first + t / 2;
-
-  if(t <= 512) {
-    if(t <= 32) {
-      return ss_median3(Td, PA, first, middle, last - 1);
-    } else {
-      t >>= 2;
-      return ss_median5(Td, PA, first, first + t, middle, last - 1 - t, last - 1);
-    }
-  }
-  t >>= 3;
-  first  = ss_median3(Td, PA, first, first + t, first + (t << 1));
-  middle = ss_median3(Td, PA, middle - t, middle, middle + t);
-  last   = ss_median3(Td, PA, last - 1 - (t << 1), last - 1 - t, last - 1);
-  return ss_median3(Td, PA, first, middle, last);
-}
-
-
-/*---------------------------------------------------------------------------*/
-
-/* Binary partition for substrings. */
-static INLINE
-int *
-ss_partition(const int *PA,
-                    int *first, int *last, int depth) {
-  int *a, *b;
-  int t;
-  for(a = first - 1, b = last;;) {
-    for(; (++a < b) && ((PA[*a] + depth) >= (PA[*a + 1] + 1));) { *a = ~*a; }
-    for(; (a < --b) && ((PA[*b] + depth) <  (PA[*b + 1] + 1));) { }
-    if(b <= a) { break; }
-    t = ~*b;
-    *b = *a;
-    *a = t;
-  }
-  if(first < a) { *first = ~*first; }
-  return a;
-}
-
-/* Multikey introsort for medium size groups. */
-static
-void
-ss_mintrosort(const unsigned char *T, const int *PA,
-              int *first, int *last,
-              int depth) {
-#define STACK_SIZE SS_MISORT_STACKSIZE
-  struct { int *a, *b, c; int d; } stack[STACK_SIZE];
-  const unsigned char *Td;
-  int *a, *b, *c, *d, *e, *f;
-  int s, t;
-  int ssize;
-  int limit;
-  int v, x = 0;
-
-  for(ssize = 0, limit = ss_ilg(last - first);;) {
-
-    if((last - first) <= SS_INSERTIONSORT_THRESHOLD) {
-#if 1 < SS_INSERTIONSORT_THRESHOLD
-      if(1 < (last - first)) { ss_insertionsort(T, PA, first, last, depth); }
-#endif
-      STACK_POP(first, last, depth, limit);
-      continue;
-    }
-
-    Td = T + depth;
-    if(limit-- == 0) { ss_heapsort(Td, PA, first, last - first); }
-    if(limit < 0) {
-      for(a = first + 1, v = Td[PA[*first]]; a < last; ++a) {
-        if((x = Td[PA[*a]]) != v) {
-          if(1 < (a - first)) { break; }
-          v = x;
-          first = a;
-        }
-      }
-      if(Td[PA[*first] - 1] < v) {
-        first = ss_partition(PA, first, a, depth);
-      }
-      if((a - first) <= (last - a)) {
-        if(1 < (a - first)) {
-          STACK_PUSH(a, last, depth, -1);
-          last = a, depth += 1, limit = ss_ilg(a - first);
-        } else {
-          first = a, limit = -1;
-        }
-      } else {
-        if(1 < (last - a)) {
-          STACK_PUSH(first, a, depth + 1, ss_ilg(a - first));
-          first = a, limit = -1;
-        } else {
-          last = a, depth += 1, limit = ss_ilg(a - first);
-        }
-      }
-      continue;
-    }
-
-    /* choose pivot */
-    a = ss_pivot(Td, PA, first, last);
-    v = Td[PA[*a]];
-    SWAP(*first, *a);
-
-    /* partition */
-    for(b = first; (++b < last) && ((x = Td[PA[*b]]) == v);) { }
-    if(((a = b) < last) && (x < v)) {
-      for(; (++b < last) && ((x = Td[PA[*b]]) <= v);) {
-        if(x == v) { SWAP(*b, *a); ++a; }
-      }
-    }
-    for(c = last; (b < --c) && ((x = Td[PA[*c]]) == v);) { }
-    if((b < (d = c)) && (x > v)) {
-      for(; (b < --c) && ((x = Td[PA[*c]]) >= v);) {
-        if(x == v) { SWAP(*c, *d); --d; }
-      }
-    }
-    for(; b < c;) {
-      SWAP(*b, *c);
-      for(; (++b < c) && ((x = Td[PA[*b]]) <= v);) {
-        if(x == v) { SWAP(*b, *a); ++a; }
-      }
-      for(; (b < --c) && ((x = Td[PA[*c]]) >= v);) {
-        if(x == v) { SWAP(*c, *d); --d; }
-      }
-    }
-
-    if(a <= d) {
-      c = b - 1;
-
-      if((s = a - first) > (t = b - a)) { s = t; }
-      for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
-      if((s = d - c) > (t = last - d - 1)) { s = t; }
-      for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
-
-      a = first + (b - a), c = last - (d - c);
-      b = (v <= Td[PA[*a] - 1]) ? a : ss_partition(PA, a, c, depth);
-
-      if((a - first) <= (last - c)) {
-        if((last - c) <= (c - b)) {
-          STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
-          STACK_PUSH(c, last, depth, limit);
-          last = a;
-        } else if((a - first) <= (c - b)) {
-          STACK_PUSH(c, last, depth, limit);
-          STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
-          last = a;
-        } else {
-          STACK_PUSH(c, last, depth, limit);
-          STACK_PUSH(first, a, depth, limit);
-          first = b, last = c, depth += 1, limit = ss_ilg(c - b);
-        }
-      } else {
-        if((a - first) <= (c - b)) {
-          STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
-          STACK_PUSH(first, a, depth, limit);
-          first = c;
-        } else if((last - c) <= (c - b)) {
-          STACK_PUSH(first, a, depth, limit);
-          STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
-          first = c;
-        } else {
-          STACK_PUSH(first, a, depth, limit);
-          STACK_PUSH(c, last, depth, limit);
-          first = b, last = c, depth += 1, limit = ss_ilg(c - b);
-        }
-      }
-    } else {
-      limit += 1;
-      if(Td[PA[*first] - 1] < v) {
-        first = ss_partition(PA, first, last, depth);
-        limit = ss_ilg(last - first);
-      }
-      depth += 1;
-    }
-  }
-#undef STACK_SIZE
-}
-
-#endif /* (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) */
-
-
-/*---------------------------------------------------------------------------*/
-
-#if SS_BLOCKSIZE != 0
-
-static INLINE
-void
-ss_blockswap(int *a, int *b, int n) {
-  int t;
-  for(; 0 < n; --n, ++a, ++b) {
-    t = *a, *a = *b, *b = t;
-  }
-}
-
-static INLINE
-void
-ss_rotate(int *first, int *middle, int *last) {
-  int *a, *b, t;
-  int l, r;
-  l = middle - first, r = last - middle;
-  for(; (0 < l) && (0 < r);) {
-    if(l == r) { ss_blockswap(first, middle, l); break; }
-    if(l < r) {
-      a = last - 1, b = middle - 1;
-      t = *a;
-      do {
-        *a-- = *b, *b-- = *a;
-        if(b < first) {
-          *a = t;
-          last = a;
-          if((r -= l + 1) <= l) { break; }
-          a -= 1, b = middle - 1;
-          t = *a;
-        }
-      } while(1);
-    } else {
-      a = first, b = middle;
-      t = *a;
-      do {
-        *a++ = *b, *b++ = *a;
-        if(last <= b) {
-          *a = t;
-          first = a + 1;
-          if((l -= r + 1) <= r) { break; }
-          a += 1, b = middle;
-          t = *a;
-        }
-      } while(1);
-    }
-  }
-}
-
-
-/*---------------------------------------------------------------------------*/
-
-static
-void
-ss_inplacemerge(const unsigned char *T, const int *PA,
-                int *first, int *middle, int *last,
-                int depth) {
-  const int *p;
-  int *a, *b;
-  int len, half;
-  int q, r;
-  int x;
-
-  for(;;) {
-    if(*(last - 1) < 0) { x = 1; p = PA + ~*(last - 1); }
-    else                { x = 0; p = PA +  *(last - 1); }
-    for(a = first, len = middle - first, half = len >> 1, r = -1;
-        0 < len;
-        len = half, half >>= 1) {
-      b = a + half;
-      q = ss_compare(T, PA + ((0 <= *b) ? *b : ~*b), p, depth);
-      if(q < 0) {
-        a = b + 1;
-        half -= (len & 1) ^ 1;
-      } else {
-        r = q;
-      }
-    }
-    if(a < middle) {
-      if(r == 0) { *a = ~*a; }
-      ss_rotate(a, middle, last);
-      last -= middle - a;
-      middle = a;
-      if(first == middle) { break; }
-    }
-    --last;
-    if(x != 0) { while(*--last < 0) { } }
-    if(middle == last) { break; }
-  }
-}
-
-
-/*---------------------------------------------------------------------------*/
-
-/* Merge-forward with internal buffer. */
-static
-void
-ss_mergeforward(const unsigned char *T, const int *PA,
-                int *first, int *middle, int *last,
-                int *buf, int depth) {
-  int *a, *b, *c, *bufend;
-  int t;
-  int r;
-
-  bufend = buf + (middle - first) - 1;
-  ss_blockswap(buf, first, middle - first);
-
-  for(t = *(a = first), b = buf, c = middle;;) {
-    r = ss_compare(T, PA + *b, PA + *c, depth);
-    if(r < 0) {
-      do {
-        *a++ = *b;
-        if(bufend <= b) { *bufend = t; return; }
-        *b++ = *a;
-      } while(*b < 0);
-    } else if(r > 0) {
-      do {
-        *a++ = *c, *c++ = *a;
-        if(last <= c) {
-          while(b < bufend) { *a++ = *b, *b++ = *a; }
-          *a = *b, *b = t;
-          return;
-        }
-      } while(*c < 0);
-    } else {
-      *c = ~*c;
-      do {
-        *a++ = *b;
-        if(bufend <= b) { *bufend = t; return; }
-        *b++ = *a;
-      } while(*b < 0);
-
-      do {
-        *a++ = *c, *c++ = *a;
-        if(last <= c) {
-          while(b < bufend) { *a++ = *b, *b++ = *a; }
-          *a = *b, *b = t;
-          return;
-        }
-      } while(*c < 0);
-    }
-  }
-}
-
-/* Merge-backward with internal buffer. */
-static
-void
-ss_mergebackward(const unsigned char *T, const int *PA,
-                 int *first, int *middle, int *last,
-                 int *buf, int depth) {
-  const int *p1, *p2;
-  int *a, *b, *c, *bufend;
-  int t;
-  int r;
-  int x;
-
-  bufend = buf + (last - middle) - 1;
-  ss_blockswap(buf, middle, last - middle);
-
-  x = 0;
-  if(*bufend < 0)       { p1 = PA + ~*bufend; x |= 1; }
-  else                  { p1 = PA +  *bufend; }
-  if(*(middle - 1) < 0) { p2 = PA + ~*(middle - 1); x |= 2; }
-  else                  { p2 = PA +  *(middle - 1); }
-  for(t = *(a = last - 1), b = bufend, c = middle - 1;;) {
-    r = ss_compare(T, p1, p2, depth);
-    if(0 < r) {
-      if(x & 1) { do { *a-- = *b, *b-- = *a; } while(*b < 0); x ^= 1; }
-      *a-- = *b;
-      if(b <= buf) { *buf = t; break; }
-      *b-- = *a;
-      if(*b < 0) { p1 = PA + ~*b; x |= 1; }
-      else       { p1 = PA +  *b; }
-    } else if(r < 0) {
-      if(x & 2) { do { *a-- = *c, *c-- = *a; } while(*c < 0); x ^= 2; }
-      *a-- = *c, *c-- = *a;
-      if(c < first) {
-        while(buf < b) { *a-- = *b, *b-- = *a; }
-        *a = *b, *b = t;
-        break;
-      }
-      if(*c < 0) { p2 = PA + ~*c; x |= 2; }
-      else       { p2 = PA +  *c; }
-    } else {
-      if(x & 1) { do { *a-- = *b, *b-- = *a; } while(*b < 0); x ^= 1; }
-      *a-- = ~*b;
-      if(b <= buf) { *buf = t; break; }
-      *b-- = *a;
-      if(x & 2) { do { *a-- = *c, *c-- = *a; } while(*c < 0); x ^= 2; }
-      *a-- = *c, *c-- = *a;
-      if(c < first) {
-        while(buf < b) { *a-- = *b, *b-- = *a; }
-        *a = *b, *b = t;
-        break;
-      }
-      if(*b < 0) { p1 = PA + ~*b; x |= 1; }
-      else       { p1 = PA +  *b; }
-      if(*c < 0) { p2 = PA + ~*c; x |= 2; }
-      else       { p2 = PA +  *c; }
-    }
-  }
-}
-
-/* D&C based merge. */
-static
-void
-ss_swapmerge(const unsigned char *T, const int *PA,
-             int *first, int *middle, int *last,
-             int *buf, int bufsize, int depth) {
-#define STACK_SIZE SS_SMERGE_STACKSIZE
-#define GETIDX(a) ((0 <= (a)) ? (a) : (~(a)))
-#define MERGE_CHECK(a, b, c)\
-  do {\
-    if(((c) & 1) ||\
-       (((c) & 2) && (ss_compare(T, PA + GETIDX(*((a) - 1)), PA + *(a), depth) == 0))) {\
-      *(a) = ~*(a);\
-    }\
-    if(((c) & 4) && ((ss_compare(T, PA + GETIDX(*((b) - 1)), PA + *(b), depth) == 0))) {\
-      *(b) = ~*(b);\
-    }\
-  } while(0)
-  struct { int *a, *b, *c; int d; } stack[STACK_SIZE];
-  int *l, *r, *lm, *rm;
-  int m, len, half;
-  int ssize;
-  int check, next;
-
-  for(check = 0, ssize = 0;;) {
-    if((last - middle) <= bufsize) {
-      if((first < middle) && (middle < last)) {
-        ss_mergebackward(T, PA, first, middle, last, buf, depth);
-      }
-      MERGE_CHECK(first, last, check);
-      STACK_POP(first, middle, last, check);
-      continue;
-    }
-
-    if((middle - first) <= bufsize) {
-      if(first < middle) {
-        ss_mergeforward(T, PA, first, middle, last, buf, depth);
-      }
-      MERGE_CHECK(first, last, check);
-      STACK_POP(first, middle, last, check);
-      continue;
-    }
-
-    for(m = 0, len = MIN(middle - first, last - middle), half = len >> 1;
-        0 < len;
-        len = half, half >>= 1) {
-      if(ss_compare(T, PA + GETIDX(*(middle + m + half)),
-                       PA + GETIDX(*(middle - m - half - 1)), depth) < 0) {
-        m += half + 1;
-        half -= (len & 1) ^ 1;
-      }
-    }
-
-    if(0 < m) {
-      lm = middle - m, rm = middle + m;
-      ss_blockswap(lm, middle, m);
-      l = r = middle, next = 0;
-      if(rm < last) {
-        if(*rm < 0) {
-          *rm = ~*rm;
-          if(first < lm) { for(; *--l < 0;) { } next |= 4; }
-          next |= 1;
-        } else if(first < lm) {
-          for(; *r < 0; ++r) { }
-          next |= 2;
-        }
-      }
-
-      if((l - first) <= (last - r)) {
-        STACK_PUSH(r, rm, last, (next & 3) | (check & 4));
-        middle = lm, last = l, check = (check & 3) | (next & 4);
-      } else {
-        if((next & 2) && (r == middle)) { next ^= 6; }
-        STACK_PUSH(first, lm, l, (check & 3) | (next & 4));
-        first = r, middle = rm, check = (next & 3) | (check & 4);
-      }
-    } else {
-      if(ss_compare(T, PA + GETIDX(*(middle - 1)), PA + *middle, depth) == 0) {
-        *middle = ~*middle;
-      }
-      MERGE_CHECK(first, last, check);
-      STACK_POP(first, middle, last, check);
-    }
-  }
-#undef STACK_SIZE
-}
-
-#endif /* SS_BLOCKSIZE != 0 */
-
-
-/*---------------------------------------------------------------------------*/
-
-/* Substring sort */
-static
-void
-sssort(const unsigned char *T, const int *PA,
-       int *first, int *last,
-       int *buf, int bufsize,
-       int depth, int n, int lastsuffix) {
-  int *a;
-#if SS_BLOCKSIZE != 0
-  int *b, *middle, *curbuf;
-  int j, k, curbufsize, limit;
-#endif
-  int i;
-
-  if(lastsuffix != 0) { ++first; }
-
-#if SS_BLOCKSIZE == 0
-  ss_mintrosort(T, PA, first, last, depth);
-#else
-  if((bufsize < SS_BLOCKSIZE) &&
-      (bufsize < (last - first)) &&
-      (bufsize < (limit = ss_isqrt(last - first)))) {
-    if(SS_BLOCKSIZE < limit) { limit = SS_BLOCKSIZE; }
-    buf = middle = last - limit, bufsize = limit;
-  } else {
-    middle = last, limit = 0;
-  }
-  for(a = first, i = 0; SS_BLOCKSIZE < (middle - a); a += SS_BLOCKSIZE, ++i) {
-#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE
-    ss_mintrosort(T, PA, a, a + SS_BLOCKSIZE, depth);
-#elif 1 < SS_BLOCKSIZE
-    ss_insertionsort(T, PA, a, a + SS_BLOCKSIZE, depth);
-#endif
-    curbufsize = last - (a + SS_BLOCKSIZE);
-    curbuf = a + SS_BLOCKSIZE;
-    if(curbufsize <= bufsize) { curbufsize = bufsize, curbuf = buf; }
-    for(b = a, k = SS_BLOCKSIZE, j = i; j & 1; b -= k, k <<= 1, j >>= 1) {
-      ss_swapmerge(T, PA, b - k, b, b + k, curbuf, curbufsize, depth);
-    }
-  }
-#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE
-  ss_mintrosort(T, PA, a, middle, depth);
-#elif 1 < SS_BLOCKSIZE
-  ss_insertionsort(T, PA, a, middle, depth);
-#endif
-  for(k = SS_BLOCKSIZE; i != 0; k <<= 1, i >>= 1) {
-    if(i & 1) {
-      ss_swapmerge(T, PA, a - k, a, middle, buf, bufsize, depth);
-      a -= k;
-    }
-  }
-  if(limit != 0) {
-#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE
-    ss_mintrosort(T, PA, middle, last, depth);
-#elif 1 < SS_BLOCKSIZE
-    ss_insertionsort(T, PA, middle, last, depth);
-#endif
-    ss_inplacemerge(T, PA, first, middle, last, depth);
-  }
-#endif
-
-  if(lastsuffix != 0) {
-    /* Insert last type B* suffix. */
-    int PAi[2]; PAi[0] = PA[*(first - 1)], PAi[1] = n - 2;
-    for(a = first, i = *(first - 1);
-        (a < last) && ((*a < 0) || (0 < ss_compare(T, &(PAi[0]), PA + *a, depth)));
-        ++a) {
-      *(a - 1) = *a;
-    }
-    *(a - 1) = i;
-  }
-}
-
-
-/*---------------------------------------------------------------------------*/
-
-static INLINE
-int
-tr_ilg(int n) {
-  return (n & 0xffff0000) ?
-          ((n & 0xff000000) ?
-            24 + lg_table[(n >> 24) & 0xff] :
-            16 + lg_table[(n >> 16) & 0xff]) :
-          ((n & 0x0000ff00) ?
-             8 + lg_table[(n >>  8) & 0xff] :
-             0 + lg_table[(n >>  0) & 0xff]);
-}
-
-
-/*---------------------------------------------------------------------------*/
-
-/* Simple insertionsort for small size groups. */
-static
-void
-tr_insertionsort(const int *ISAd, int *first, int *last) {
-  int *a, *b;
-  int t, r;
-
-  for(a = first + 1; a < last; ++a) {
-    for(t = *a, b = a - 1; 0 > (r = ISAd[t] - ISAd[*b]);) {
-      do { *(b + 1) = *b; } while((first <= --b) && (*b < 0));
-      if(b < first) { break; }
-    }
-    if(r == 0) { *b = ~*b; }
-    *(b + 1) = t;
-  }
-}
-
-
-/*---------------------------------------------------------------------------*/
-
-static INLINE
-void
-tr_fixdown(const int *ISAd, int *SA, int i, int size) {
-  int j, k;
-  int v;
-  int c, d, e;
-
-  for(v = SA[i], c = ISAd[v]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) {
-    d = ISAd[SA[k = j++]];
-    if(d < (e = ISAd[SA[j]])) { k = j; d = e; }
-    if(d <= c) { break; }
-  }
-  SA[i] = v;
-}
-
-/* Simple top-down heapsort. */
-static
-void
-tr_heapsort(const int *ISAd, int *SA, int size) {
-  int i, m;
-  int t;
-
-  m = size;
-  if((size % 2) == 0) {
-    m--;
-    if(ISAd[SA[m / 2]] < ISAd[SA[m]]) { SWAP(SA[m], SA[m / 2]); }
-  }
-
-  for(i = m / 2 - 1; 0 <= i; --i) { tr_fixdown(ISAd, SA, i, m); }
-  if((size % 2) == 0) { SWAP(SA[0], SA[m]); tr_fixdown(ISAd, SA, 0, m); }
-  for(i = m - 1; 0 < i; --i) {
-    t = SA[0], SA[0] = SA[i];
-    tr_fixdown(ISAd, SA, 0, i);
-    SA[i] = t;
-  }
-}
-
-
-/*---------------------------------------------------------------------------*/
-
-/* Returns the median of three elements. */
-static INLINE
-int *
-tr_median3(const int *ISAd, int *v1, int *v2, int *v3) {
-  int *t;
-  if(ISAd[*v1] > ISAd[*v2]) { SWAP(v1, v2); }
-  if(ISAd[*v2] > ISAd[*v3]) {
-    if(ISAd[*v1] > ISAd[*v3]) { return v1; }
-    else { return v3; }
-  }
-  return v2;
-}
-
-/* Returns the median of five elements. */
-static INLINE
-int *
-tr_median5(const int *ISAd,
-           int *v1, int *v2, int *v3, int *v4, int *v5) {
-  int *t;
-  if(ISAd[*v2] > ISAd[*v3]) { SWAP(v2, v3); }
-  if(ISAd[*v4] > ISAd[*v5]) { SWAP(v4, v5); }
-  if(ISAd[*v2] > ISAd[*v4]) { SWAP(v2, v4); SWAP(v3, v5); }
-  if(ISAd[*v1] > ISAd[*v3]) { SWAP(v1, v3); }
-  if(ISAd[*v1] > ISAd[*v4]) { SWAP(v1, v4); SWAP(v3, v5); }
-  if(ISAd[*v3] > ISAd[*v4]) { return v4; }
-  return v3;
-}
-
-/* Returns the pivot element. */
-static INLINE
-int *
-tr_pivot(const int *ISAd, int *first, int *last) {
-  int *middle;
-  int t;
-
-  t = last - first;
-  middle = first + t / 2;
-
-  if(t <= 512) {
-    if(t <= 32) {
-      return tr_median3(ISAd, first, middle, last - 1);
-    } else {
-      t >>= 2;
-      return tr_median5(ISAd, first, first + t, middle, last - 1 - t, last - 1);
-    }
-  }
-  t >>= 3;
-  first  = tr_median3(ISAd, first, first + t, first + (t << 1));
-  middle = tr_median3(ISAd, middle - t, middle, middle + t);
-  last   = tr_median3(ISAd, last - 1 - (t << 1), last - 1 - t, last - 1);
-  return tr_median3(ISAd, first, middle, last);
-}
-
-
-/*---------------------------------------------------------------------------*/
-
-typedef struct _trbudget_t trbudget_t;
-struct _trbudget_t {
-  int chance;
-  int remain;
-  int incval;
-  int count;
-};
-
-static INLINE
-void
-trbudget_init(trbudget_t *budget, int chance, int incval) {
-  budget->chance = chance;
-  budget->remain = budget->incval = incval;
-}
-
-static INLINE
-int
-trbudget_check(trbudget_t *budget, int size) {
-  if(size <= budget->remain) { budget->remain -= size; return 1; }
-  if(budget->chance == 0) { budget->count += size; return 0; }
-  budget->remain += budget->incval - size;
-  budget->chance -= 1;
-  return 1;
-}
-
-
-/*---------------------------------------------------------------------------*/
-
-static INLINE
-void
-tr_partition(const int *ISAd,
-             int *first, int *middle, int *last,
-             int **pa, int **pb, int v) {
-  int *a, *b, *c, *d, *e, *f;
-  int t, s;
-  int x = 0;
-
-  for(b = middle - 1; (++b < last) && ((x = ISAd[*b]) == v);) { }
-  if(((a = b) < last) && (x < v)) {
-    for(; (++b < last) && ((x = ISAd[*b]) <= v);) {
-      if(x == v) { SWAP(*b, *a); ++a; }
-    }
-  }
-  for(c = last; (b < --c) && ((x = ISAd[*c]) == v);) { }
-  if((b < (d = c)) && (x > v)) {
-    for(; (b < --c) && ((x = ISAd[*c]) >= v);) {
-      if(x == v) { SWAP(*c, *d); --d; }
-    }
-  }
-  for(; b < c;) {
-    SWAP(*b, *c);
-    for(; (++b < c) && ((x = ISAd[*b]) <= v);) {
-      if(x == v) { SWAP(*b, *a); ++a; }
-    }
-    for(; (b < --c) && ((x = ISAd[*c]) >= v);) {
-      if(x == v) { SWAP(*c, *d); --d; }
-    }
-  }
-
-  if(a <= d) {
-    c = b - 1;
-    if((s = a - first) > (t = b - a)) { s = t; }
-    for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
-    if((s = d - c) > (t = last - d - 1)) { s = t; }
-    for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
-    first += (b - a), last -= (d - c);
-  }
-  *pa = first, *pb = last;
-}
-
-static
-void
-tr_copy(int *ISA, const int *SA,
-        int *first, int *a, int *b, int *last,
-        int depth) {
-  /* sort suffixes of middle partition
-     by using sorted order of suffixes of left and right partition. */
-  int *c, *d, *e;
-  int s, v;
-
-  v = b - SA - 1;
-  for(c = first, d = a - 1; c <= d; ++c) {
-    if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
-      *++d = s;
-      ISA[s] = d - SA;
-    }
-  }
-  for(c = last - 1, e = d + 1, d = b; e < d; --c) {
-    if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
-      *--d = s;
-      ISA[s] = d - SA;
-    }
-  }
-}
-
-static
-void
-tr_partialcopy(int *ISA, const int *SA,
-               int *first, int *a, int *b, int *last,
-               int depth) {
-  int *c, *d, *e;
-  int s, v;
-  int rank, lastrank, newrank = -1;
-
-  v = b - SA - 1;
-  lastrank = -1;
-  for(c = first, d = a - 1; c <= d; ++c) {
-    if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
-      *++d = s;
-      rank = ISA[s + depth];
-      if(lastrank != rank) { lastrank = rank; newrank = d - SA; }
-      ISA[s] = newrank;
-    }
-  }
-
-  lastrank = -1;
-  for(e = d; first <= e; --e) {
-    rank = ISA[*e];
-    if(lastrank != rank) { lastrank = rank; newrank = e - SA; }
-    if(newrank != rank) { ISA[*e] = newrank; }
-  }
-
-  lastrank = -1;
-  for(c = last - 1, e = d + 1, d = b; e < d; --c) {
-    if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
-      *--d = s;
-      rank = ISA[s + depth];
-      if(lastrank != rank) { lastrank = rank; newrank = d - SA; }
-      ISA[s] = newrank;
-    }
-  }
-}
-
-static
-void
-tr_introsort(int *ISA, const int *ISAd,
-             int *SA, int *first, int *last,
-             trbudget_t *budget) {
-#define STACK_SIZE TR_STACKSIZE
-  struct { const int *a; int *b, *c; int d, e; }stack[STACK_SIZE];
-  int *a, *b, *c;
-  int t;
-  int v, x = 0;
-  int incr = ISAd - ISA;
-  int limit, next;
-  int ssize, trlink = -1;
-
-  for(ssize = 0, limit = tr_ilg(last - first);;) {
-
-    if(limit < 0) {
-      if(limit == -1) {
-        /* tandem repeat partition */
-        tr_partition(ISAd - incr, first, first, last, &a, &b, last - SA - 1);
-
-        /* update ranks */
-        if(a < last) {
-          for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; }
-        }
-        if(b < last) {
-          for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; }
-        }
-
-        /* push */
-        if(1 < (b - a)) {
-          STACK_PUSH5(NULL, a, b, 0, 0);
-          STACK_PUSH5(ISAd - incr, first, last, -2, trlink);
-          trlink = ssize - 2;
-        }
-        if((a - first) <= (last - b)) {
-          if(1 < (a - first)) {
-            STACK_PUSH5(ISAd, b, last, tr_ilg(last - b), trlink);
-            last = a, limit = tr_ilg(a - first);
-          } else if(1 < (last - b)) {
-            first = b, limit = tr_ilg(last - b);
-          } else {
-            STACK_POP5(ISAd, first, last, limit, trlink);
-          }
-        } else {
-          if(1 < (last - b)) {
-            STACK_PUSH5(ISAd, first, a, tr_ilg(a - first), trlink);
-            first = b, limit = tr_ilg(last - b);
-          } else if(1 < (a - first)) {
-            last = a, limit = tr_ilg(a - first);
-          } else {
-            STACK_POP5(ISAd, first, last, limit, trlink);
-          }
-        }
-      } else if(limit == -2) {
-        /* tandem repeat copy */
-        a = stack[--ssize].b, b = stack[ssize].c;
-        if(stack[ssize].d == 0) {
-          tr_copy(ISA, SA, first, a, b, last, ISAd - ISA);
-        } else {
-          if(0 <= trlink) { stack[trlink].d = -1; }
-          tr_partialcopy(ISA, SA, first, a, b, last, ISAd - ISA);
-        }
-        STACK_POP5(ISAd, first, last, limit, trlink);
-      } else {
-        /* sorted partition */
-        if(0 <= *first) {
-          a = first;
-          do { ISA[*a] = a - SA; } while((++a < last) && (0 <= *a));
-          first = a;
-        }
-        if(first < last) {
-          a = first; do { *a = ~*a; } while(*++a < 0);
-          next = (ISA[*a] != ISAd[*a]) ? tr_ilg(a - first + 1) : -1;
-          if(++a < last) { for(b = first, v = a - SA - 1; b < a; ++b) { ISA[*b] = v; } }
-
-          /* push */
-          if(trbudget_check(budget, a - first)) {
-            if((a - first) <= (last - a)) {
-              STACK_PUSH5(ISAd, a, last, -3, trlink);
-              ISAd += incr, last = a, limit = next;
-            } else {
-              if(1 < (last - a)) {
-                STACK_PUSH5(ISAd + incr, first, a, next, trlink);
-                first = a, limit = -3;
-              } else {
-                ISAd += incr, last = a, limit = next;
-              }
-            }
-          } else {
-            if(0 <= trlink) { stack[trlink].d = -1; }
-            if(1 < (last - a)) {
-              first = a, limit = -3;
-            } else {
-              STACK_POP5(ISAd, first, last, limit, trlink);
-            }
-          }
-        } else {
-          STACK_POP5(ISAd, first, last, limit, trlink);
-        }
-      }
-      continue;
-    }
-
-    if((last - first) <= TR_INSERTIONSORT_THRESHOLD) {
-      tr_insertionsort(ISAd, first, last);
-      limit = -3;
-      continue;
-    }
-
-    if(limit-- == 0) {
-      tr_heapsort(ISAd, first, last - first);
-      for(a = last - 1; first < a; a = b) {
-        for(x = ISAd[*a], b = a - 1; (first <= b) && (ISAd[*b] == x); --b) { *b = ~*b; }
-      }
-      limit = -3;
-      continue;
-    }
-
-    /* choose pivot */
-    a = tr_pivot(ISAd, first, last);
-    SWAP(*first, *a);
-    v = ISAd[*first];
-
-    /* partition */
-    tr_partition(ISAd, first, first + 1, last, &a, &b, v);
-    if((last - first) != (b - a)) {
-      next = (ISA[*a] != v) ? tr_ilg(b - a) : -1;
-
-      /* update ranks */
-      for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; }
-      if(b < last) { for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; } }
-
-      /* push */
-      if((1 < (b - a)) && (trbudget_check(budget, b - a))) {
-        if((a - first) <= (last - b)) {
-          if((last - b) <= (b - a)) {
-            if(1 < (a - first)) {
-              STACK_PUSH5(ISAd + incr, a, b, next, trlink);
-              STACK_PUSH5(ISAd, b, last, limit, trlink);
-              last = a;
-            } else if(1 < (last - b)) {
-              STACK_PUSH5(ISAd + incr, a, b, next, trlink);
-              first = b;
-            } else {
-              ISAd += incr, first = a, last = b, limit = next;
-            }
-          } else if((a - first) <= (b - a)) {
-            if(1 < (a - first)) {
-              STACK_PUSH5(ISAd, b, last, limit, trlink);
-              STACK_PUSH5(ISAd + incr, a, b, next, trlink);
-              last = a;
-            } else {
-              STACK_PUSH5(ISAd, b, last, limit, trlink);
-              ISAd += incr, first = a, last = b, limit = next;
-            }
-          } else {
-            STACK_PUSH5(ISAd, b, last, limit, trlink);
-            STACK_PUSH5(ISAd, first, a, limit, trlink);
-            ISAd += incr, first = a, last = b, limit = next;
-          }
-        } else {
-          if((a - first) <= (b - a)) {
-            if(1 < (last - b)) {
-              STACK_PUSH5(ISAd + incr, a, b, next, trlink);
-              STACK_PUSH5(ISAd, first, a, limit, trlink);
-              first = b;
-            } else if(1 < (a - first)) {
-              STACK_PUSH5(ISAd + incr, a, b, next, trlink);
-              last = a;
-            } else {
-              ISAd += incr, first = a, last = b, limit = next;
-            }
-          } else if((last - b) <= (b - a)) {
-            if(1 < (last - b)) {
-              STACK_PUSH5(ISAd, first, a, limit, trlink);
-              STACK_PUSH5(ISAd + incr, a, b, next, trlink);
-              first = b;
-            } else {
-              STACK_PUSH5(ISAd, first, a, limit, trlink);
-              ISAd += incr, first = a, last = b, limit = next;
-            }
-          } else {
-            STACK_PUSH5(ISAd, first, a, limit, trlink);
-            STACK_PUSH5(ISAd, b, last, limit, trlink);
-            ISAd += incr, first = a, last = b, limit = next;
-          }
-        }
-      } else {
-        if((1 < (b - a)) && (0 <= trlink)) { stack[trlink].d = -1; }
-        if((a - first) <= (last - b)) {
-          if(1 < (a - first)) {
-            STACK_PUSH5(ISAd, b, last, limit, trlink);
-            last = a;
-          } else if(1 < (last - b)) {
-            first = b;
-          } else {
-            STACK_POP5(ISAd, first, last, limit, trlink);
-          }
-        } else {
-          if(1 < (last - b)) {
-            STACK_PUSH5(ISAd, first, a, limit, trlink);
-            first = b;
-          } else if(1 < (a - first)) {
-            last = a;
-          } else {
-            STACK_POP5(ISAd, first, last, limit, trlink);
-          }
-        }
-      }
-    } else {
-      if(trbudget_check(budget, last - first)) {
-        limit = tr_ilg(last - first), ISAd += incr;
-      } else {
-        if(0 <= trlink) { stack[trlink].d = -1; }
-        STACK_POP5(ISAd, first, last, limit, trlink);
-      }
-    }
-  }
-#undef STACK_SIZE
-}
-
-
-
-/*---------------------------------------------------------------------------*/
-
-/* Tandem repeat sort */
-static
-void
-trsort(int *ISA, int *SA, int n, int depth) {
-  int *ISAd;
-  int *first, *last;
-  trbudget_t budget;
-  int t, skip, unsorted;
-
-  trbudget_init(&budget, tr_ilg(n) * 2 / 3, n);
-/*  trbudget_init(&budget, tr_ilg(n) * 3 / 4, n); */
-  for(ISAd = ISA + depth; -n < *SA; ISAd += ISAd - ISA) {
-    first = SA;
-    skip = 0;
-    unsorted = 0;
-    do {
-      if((t = *first) < 0) { first -= t; skip += t; }
-      else {
-        if(skip != 0) { *(first + skip) = skip; skip = 0; }
-        last = SA + ISA[t] + 1;
-        if(1 < (last - first)) {
-          budget.count = 0;
-          tr_introsort(ISA, ISAd, SA, first, last, &budget);
-          if(budget.count != 0) { unsorted += budget.count; }
-          else { skip = first - last; }
-        } else if((last - first) == 1) {
-          skip = -1;
-        }
-        first = last;
-      }
-    } while(first < (SA + n));
-    if(skip != 0) { *(first + skip) = skip; }
-    if(unsorted == 0) { break; }
-  }
-}
-
-
-/*---------------------------------------------------------------------------*/
-
-/* Sorts suffixes of type B*. */
-static
-int
-sort_typeBstar(const unsigned char *T, int *SA,
-               int *bucket_A, int *bucket_B,
-               int n, int openMP) {
-  int *PAb, *ISAb, *buf;
-#ifdef LIBBSC_OPENMP
-  int *curbuf;
-  int l;
-#endif
-  int i, j, k, t, m, bufsize;
-  int c0, c1;
-#ifdef LIBBSC_OPENMP
-  int d0, d1;
-#endif
-  (void)openMP;
-
-  /* Initialize bucket arrays. */
-  for(i = 0; i < BUCKET_A_SIZE; ++i) { bucket_A[i] = 0; }
-  for(i = 0; i < BUCKET_B_SIZE; ++i) { bucket_B[i] = 0; }
-
-  /* Count the number of occurrences of the first one or two characters of each
-     type A, B and B* suffix. Moreover, store the beginning position of all
-     type B* suffixes into the array SA. */
-  for(i = n - 1, m = n, c0 = T[n - 1]; 0 <= i;) {
-    /* type A suffix. */
-    do { ++BUCKET_A(c1 = c0); } while((0 <= --i) && ((c0 = T[i]) >= c1));
-    if(0 <= i) {
-      /* type B* suffix. */
-      ++BUCKET_BSTAR(c0, c1);
-      SA[--m] = i;
-      /* type B suffix. */
-      for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) {
-        ++BUCKET_B(c0, c1);
-      }
-    }
-  }
-  m = n - m;
-/*
-note:
-  A type B* suffix is lexicographically smaller than a type B suffix that
-  begins with the same first two characters.
-*/
-
-  /* Calculate the index of start/end point of each bucket. */
-  for(c0 = 0, i = 0, j = 0; c0 < ALPHABET_SIZE; ++c0) {
-    t = i + BUCKET_A(c0);
-    BUCKET_A(c0) = i + j; /* start point */
-    i = t + BUCKET_B(c0, c0);
-    for(c1 = c0 + 1; c1 < ALPHABET_SIZE; ++c1) {
-      j += BUCKET_BSTAR(c0, c1);
-      BUCKET_BSTAR(c0, c1) = j; /* end point */
-      i += BUCKET_B(c0, c1);
-    }
-  }
-
-  if(0 < m) {
-    /* Sort the type B* suffixes by their first two characters. */
-    PAb = SA + n - m; ISAb = SA + m;
-    for(i = m - 2; 0 <= i; --i) {
-      t = PAb[i], c0 = T[t], c1 = T[t + 1];
-      SA[--BUCKET_BSTAR(c0, c1)] = i;
-    }
-    t = PAb[m - 1], c0 = T[t], c1 = T[t + 1];
-    SA[--BUCKET_BSTAR(c0, c1)] = m - 1;
-
-    /* Sort the type B* substrings using sssort. */
-#ifdef LIBBSC_OPENMP
-    if (openMP)
-    {
-        buf = SA + m;
-        c0 = ALPHABET_SIZE - 2, c1 = ALPHABET_SIZE - 1, j = m;
-#pragma omp parallel default(shared) private(bufsize, curbuf, k, l, d0, d1)
-        {
-          bufsize = (n - (2 * m)) / omp_get_num_threads();
-          curbuf = buf + omp_get_thread_num() * bufsize;
-          k = 0;
-          for(;;) {
-            #pragma omp critical(sssort_lock)
-            {
-              if(0 < (l = j)) {
-                d0 = c0, d1 = c1;
-                do {
-                  k = BUCKET_BSTAR(d0, d1);
-                  if(--d1 <= d0) {
-                    d1 = ALPHABET_SIZE - 1;
-                    if(--d0 < 0) { break; }
-                  }
-                } while(((l - k) <= 1) && (0 < (l = k)));
-                c0 = d0, c1 = d1, j = k;
-              }
-            }
-            if(l == 0) { break; }
-            sssort(T, PAb, SA + k, SA + l,
-                   curbuf, bufsize, 2, n, *(SA + k) == (m - 1));
-          }
-        }
-    }
-    else
-    {
-        buf = SA + m, bufsize = n - (2 * m);
-        for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) {
-          for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) {
-            i = BUCKET_BSTAR(c0, c1);
-            if(1 < (j - i)) {
-              sssort(T, PAb, SA + i, SA + j,
-                     buf, bufsize, 2, n, *(SA + i) == (m - 1));
-            }
-          }
-        }
-    }
-#else
-    buf = SA + m, bufsize = n - (2 * m);
-    for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) {
-      for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) {
-        i = BUCKET_BSTAR(c0, c1);
-        if(1 < (j - i)) {
-          sssort(T, PAb, SA + i, SA + j,
-                 buf, bufsize, 2, n, *(SA + i) == (m - 1));
-        }
-      }
-    }
-#endif
-
-    /* Compute ranks of type B* substrings. */
-    for(i = m - 1; 0 <= i; --i) {
-      if(0 <= SA[i]) {
-        j = i;
-        do { ISAb[SA[i]] = i; } while((0 <= --i) && (0 <= SA[i]));
-        SA[i + 1] = i - j;
-        if(i <= 0) { break; }
-      }
-      j = i;
-      do { ISAb[SA[i] = ~SA[i]] = j; } while(SA[--i] < 0);
-      ISAb[SA[i]] = j;
-    }
-
-    /* Construct the inverse suffix array of type B* suffixes using trsort. */
-    trsort(ISAb, SA, m, 1);
-
-    /* Set the sorted order of tyoe B* suffixes. */
-    for(i = n - 1, j = m, c0 = T[n - 1]; 0 <= i;) {
-      for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) >= c1); --i, c1 = c0) { }
-      if(0 <= i) {
-        t = i;
-        for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { }
-        SA[ISAb[--j]] = ((t == 0) || (1 < (t - i))) ? t : ~t;
-      }
-    }
-
-    /* Calculate the index of start/end point of each bucket. */
-    BUCKET_B(ALPHABET_SIZE - 1, ALPHABET_SIZE - 1) = n; /* end point */
-    for(c0 = ALPHABET_SIZE - 2, k = m - 1; 0 <= c0; --c0) {
-      i = BUCKET_A(c0 + 1) - 1;
-      for(c1 = ALPHABET_SIZE - 1; c0 < c1; --c1) {
-        t = i - BUCKET_B(c0, c1);
-        BUCKET_B(c0, c1) = i; /* end point */
-
-        /* Move all type B* suffixes to the correct position. */
-        for(i = t, j = BUCKET_BSTAR(c0, c1);
-            j <= k;
-            --i, --k) { SA[i] = SA[k]; }
-      }
-      BUCKET_BSTAR(c0, c0 + 1) = i - BUCKET_B(c0, c0) + 1; /* start point */
-      BUCKET_B(c0, c0) = i; /* end point */
-    }
-  }
-
-  return m;
-}
-
-/* Constructs the suffix array by using the sorted order of type B* suffixes. */
-static
-void
-construct_SA(const unsigned char *T, int *SA,
-             int *bucket_A, int *bucket_B,
-             int n, int m) {
-  int *i, *j, *k;
-  int s;
-  int c0, c1, c2;
-
-  if(0 < m) {
-    /* Construct the sorted order of type B suffixes by using
-       the sorted order of type B* suffixes. */
-    for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
-      /* Scan the suffix array from right to left. */
-      for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
-          j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
-          i <= j;
-          --j) {
-        if(0 < (s = *j)) {
-          assert(T[s] == c1);
-          assert(((s + 1) < n) && (T[s] <= T[s + 1]));
-          assert(T[s - 1] <= T[s]);
-          *j = ~s;
-          c0 = T[--s];
-          if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
-          if(c0 != c2) {
-            if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
-            k = SA + BUCKET_B(c2 = c0, c1);
-          }
-          assert(k < j); assert(k != NULL);
-          *k-- = s;
-        } else {
-          assert(((s == 0) && (T[s] == c1)) || (s < 0));
-          *j = ~s;
-        }
-      }
-    }
-  }
-
-  /* Construct the suffix array by using
-     the sorted order of type B suffixes. */
-  k = SA + BUCKET_A(c2 = T[n - 1]);
-  *k++ = (T[n - 2] < c2) ? ~(n - 1) : (n - 1);
-  /* Scan the suffix array from left to right. */
-  for(i = SA, j = SA + n; i < j; ++i) {
-    if(0 < (s = *i)) {
-      assert(T[s - 1] >= T[s]);
-      c0 = T[--s];
-      if((s == 0) || (T[s - 1] < c0)) { s = ~s; }
-      if(c0 != c2) {
-        BUCKET_A(c2) = k - SA;
-        k = SA + BUCKET_A(c2 = c0);
-      }
-      assert(i < k);
-      *k++ = s;
-    } else {
-      assert(s < 0);
-      *i = ~s;
-    }
-  }
-}
-
-/* Constructs the burrows-wheeler transformed string directly
-   by using the sorted order of type B* suffixes. */
-static
-int
-construct_BWT(const unsigned char *T, int *SA,
-              int *bucket_A, int *bucket_B,
-              int n, int m) {
-  int *i, *j, *k, *orig;
-  int s;
-  int c0, c1, c2;
-
-  if(0 < m) {
-    /* Construct the sorted order of type B suffixes by using
-       the sorted order of type B* suffixes. */
-    for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
-      /* Scan the suffix array from right to left. */
-      for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
-          j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
-          i <= j;
-          --j) {
-        if(0 < (s = *j)) {
-          assert(T[s] == c1);
-          assert(((s + 1) < n) && (T[s] <= T[s + 1]));
-          assert(T[s - 1] <= T[s]);
-          c0 = T[--s];
-          *j = ~((int)c0);
-          if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
-          if(c0 != c2) {
-            if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
-            k = SA + BUCKET_B(c2 = c0, c1);
-          }
-          assert(k < j); assert(k != NULL);
-          *k-- = s;
-        } else if(s != 0) {
-          *j = ~s;
-#ifndef NDEBUG
-        } else {
-          assert(T[s] == c1);
-#endif
-        }
-      }
-    }
-  }
-
-  /* Construct the BWTed string by using
-     the sorted order of type B suffixes. */
-  k = SA + BUCKET_A(c2 = T[n - 1]);
-  *k++ = (T[n - 2] < c2) ? ~((int)T[n - 2]) : (n - 1);
-  /* Scan the suffix array from left to right. */
-  for(i = SA, j = SA + n, orig = SA; i < j; ++i) {
-    if(0 < (s = *i)) {
-      assert(T[s - 1] >= T[s]);
-      c0 = T[--s];
-      *i = c0;
-      if((0 < s) && (T[s - 1] < c0)) { s = ~((int)T[s - 1]); }
-      if(c0 != c2) {
-        BUCKET_A(c2) = k - SA;
-        k = SA + BUCKET_A(c2 = c0);
-      }
-      assert(i < k);
-      *k++ = s;
-    } else if(s != 0) {
-      *i = ~s;
-    } else {
-      orig = i;
-    }
-  }
-
-  return orig - SA;
-}
-
-/* Constructs the burrows-wheeler transformed string directly
-   by using the sorted order of type B* suffixes. */
-static
-int
-construct_BWT_indexes(const unsigned char *T, int *SA,
-                      int *bucket_A, int *bucket_B,
-                      int n, int m,
-                      unsigned char * num_indexes, int * indexes) {
-  int *i, *j, *k, *orig;
-  int s;
-  int c0, c1, c2;
-
-  int mod = n / 8;
-  {
-      mod |= mod >> 1;  mod |= mod >> 2;
-      mod |= mod >> 4;  mod |= mod >> 8;
-      mod |= mod >> 16; mod >>= 1;
-
-      *num_indexes = (unsigned char)((n - 1) / (mod + 1));
-  }
-
-  if(0 < m) {
-    /* Construct the sorted order of type B suffixes by using
-       the sorted order of type B* suffixes. */
-    for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
-      /* Scan the suffix array from right to left. */
-      for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
-          j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
-          i <= j;
-          --j) {
-        if(0 < (s = *j)) {
-          assert(T[s] == c1);
-          assert(((s + 1) < n) && (T[s] <= T[s + 1]));
-          assert(T[s - 1] <= T[s]);
-
-          if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = j - SA;
-
-          c0 = T[--s];
-          *j = ~((int)c0);
-          if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
-          if(c0 != c2) {
-            if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
-            k = SA + BUCKET_B(c2 = c0, c1);
-          }
-          assert(k < j); assert(k != NULL);
-          *k-- = s;
-        } else if(s != 0) {
-          *j = ~s;
-#ifndef NDEBUG
-        } else {
-          assert(T[s] == c1);
-#endif
-        }
-      }
-    }
-  }
-
-  /* Construct the BWTed string by using
-     the sorted order of type B suffixes. */
-  k = SA + BUCKET_A(c2 = T[n - 1]);
-  if (T[n - 2] < c2) {
-    if (((n - 1) & mod) == 0) indexes[(n - 1) / (mod + 1) - 1] = k - SA;
-    *k++ = ~((int)T[n - 2]);
-  }
-  else {
-    *k++ = n - 1;
-  }
-
-  /* Scan the suffix array from left to right. */
-  for(i = SA, j = SA + n, orig = SA; i < j; ++i) {
-    if(0 < (s = *i)) {
-      assert(T[s - 1] >= T[s]);
-
-      if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = i - SA;
-
-      c0 = T[--s];
-      *i = c0;
-      if(c0 != c2) {
-        BUCKET_A(c2) = k - SA;
-        k = SA + BUCKET_A(c2 = c0);
-      }
-      assert(i < k);
-      if((0 < s) && (T[s - 1] < c0)) {
-          if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = k - SA;
-          *k++ = ~((int)T[s - 1]);
-      } else
-        *k++ = s;
-    } else if(s != 0) {
-      *i = ~s;
-    } else {
-      orig = i;
-    }
-  }
-
-  return orig - SA;
-}
-
-
-/*---------------------------------------------------------------------------*/
-
-/*- Function -*/
-
-int
-divsufsort(const unsigned char *T, int *SA, int n, int openMP) {
-  int *bucket_A, *bucket_B;
-  int m;
-  int err = 0;
-
-  /* Check arguments. */
-  if((T == NULL) || (SA == NULL) || (n < 0)) { return -1; }
-  else if(n == 0) { return 0; }
-  else if(n == 1) { SA[0] = 0; return 0; }
-  else if(n == 2) { m = (T[0] < T[1]); SA[m ^ 1] = 0, SA[m] = 1; return 0; }
-
-  bucket_A = (int *)malloc(BUCKET_A_SIZE * sizeof(int));
-  bucket_B = (int *)malloc(BUCKET_B_SIZE * sizeof(int));
-
-  /* Suffixsort. */
-  if((bucket_A != NULL) && (bucket_B != NULL)) {
-    m = sort_typeBstar(T, SA, bucket_A, bucket_B, n, openMP);
-    construct_SA(T, SA, bucket_A, bucket_B, n, m);
-  } else {
-    err = -2;
-  }
-
-  free(bucket_B);
-  free(bucket_A);
-
-  return err;
-}
-
-int
-divbwt(const unsigned char *T, unsigned char *U, int *A, int n, unsigned char * num_indexes, int * indexes, int openMP) {
-  int *B;
-  int *bucket_A, *bucket_B;
-  int m, pidx, i;
-
-  /* Check arguments. */
-  if((T == NULL) || (U == NULL) || (n < 0)) { return -1; }
-  else if(n <= 1) { if(n == 1) { U[0] = T[0]; } return n; }
-
-  if((B = A) == NULL) { B = (int *)malloc((size_t)(n + 1) * sizeof(int)); }
-  bucket_A = (int *)malloc(BUCKET_A_SIZE * sizeof(int));
-  bucket_B = (int *)malloc(BUCKET_B_SIZE * sizeof(int));
-
-  /* Burrows-Wheeler Transform. */
-  if((B != NULL) && (bucket_A != NULL) && (bucket_B != NULL)) {
-    m = sort_typeBstar(T, B, bucket_A, bucket_B, n, openMP);
-
-    if (num_indexes == NULL || indexes == NULL) {
-        pidx = construct_BWT(T, B, bucket_A, bucket_B, n, m);
-    } else {
-        pidx = construct_BWT_indexes(T, B, bucket_A, bucket_B, n, m, num_indexes, indexes);
-    }
-
-    /* Copy to output string. */
-    U[0] = T[n - 1];
-    for(i = 0; i < pidx; ++i) { U[i + 1] = (unsigned char)B[i]; }
-    for(i += 1; i < n; ++i) { U[i] = (unsigned char)B[i]; }
-    pidx += 1;
-  } else {
-    pidx = -2;
-  }
-
-  free(bucket_B);
-  free(bucket_A);
-  if(A == NULL) { free(B); }
-
-  return pidx;
-}
diff --git a/vendor/github.com/DataDog/zstd/divsufsort.h b/vendor/github.com/DataDog/zstd/divsufsort.h
deleted file mode 100644
index 5440994..0000000
--- a/vendor/github.com/DataDog/zstd/divsufsort.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * divsufsort.h for libdivsufsort-lite
- * Copyright (c) 2003-2008 Yuta Mori All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef _DIVSUFSORT_H
-#define _DIVSUFSORT_H 1
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-
-/*- Prototypes -*/
-
-/**
- * Constructs the suffix array of a given string.
- * @param T [0..n-1] The input string.
- * @param SA [0..n-1] The output array of suffixes.
- * @param n The length of the given string.
- * @param openMP enables OpenMP optimization.
- * @return 0 if no error occurred, -1 or -2 otherwise.
- */
-int
-divsufsort(const unsigned char *T, int *SA, int n, int openMP);
-
-/**
- * Constructs the burrows-wheeler transformed string of a given string.
- * @param T [0..n-1] The input string.
- * @param U [0..n-1] The output string. (can be T)
- * @param A [0..n-1] The temporary array. (can be NULL)
- * @param n The length of the given string.
- * @param num_indexes The length of secondary indexes array. (can be NULL)
- * @param indexes The secondary indexes array. (can be NULL)
- * @param openMP enables OpenMP optimization.
- * @return The primary index if no error occurred, -1 or -2 otherwise.
- */
-int
-divbwt(const unsigned char *T, unsigned char *U, int *A, int n, unsigned char * num_indexes, int * indexes, int openMP);
-
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif /* __cplusplus */
-
-#endif /* _DIVSUFSORT_H */
diff --git a/vendor/github.com/DataDog/zstd/entropy_common.c b/vendor/github.com/DataDog/zstd/entropy_common.c
deleted file mode 100644
index b12944e..0000000
--- a/vendor/github.com/DataDog/zstd/entropy_common.c
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
-   Common functions of New Generation Entropy library
-   Copyright (C) 2016, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
-    - Public forum : https://groups.google.com/forum/#!forum/lz4c
-*************************************************************************** */
-
-/* *************************************
-*  Dependencies
-***************************************/
-#include "mem.h"
-#include "error_private.h"       /* ERR_*, ERROR */
-#define FSE_STATIC_LINKING_ONLY  /* FSE_MIN_TABLELOG */
-#include "fse.h"
-#define HUF_STATIC_LINKING_ONLY  /* HUF_TABLELOG_ABSOLUTEMAX */
-#include "huf.h"
-
-
-/*===   Version   ===*/
-unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
-
-
-/*===   Error Management   ===*/
-unsigned FSE_isError(size_t code) { return ERR_isError(code); }
-const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); }
-
-unsigned HUF_isError(size_t code) { return ERR_isError(code); }
-const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); }
-
-
-/*-**************************************************************
-*  FSE NCount encoding-decoding
-****************************************************************/
-size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
-                 const void* headerBuffer, size_t hbSize)
-{
-    const BYTE* const istart = (const BYTE*) headerBuffer;
-    const BYTE* const iend = istart + hbSize;
-    const BYTE* ip = istart;
-    int nbBits;
-    int remaining;
-    int threshold;
-    U32 bitStream;
-    int bitCount;
-    unsigned charnum = 0;
-    int previous0 = 0;
-
-    if (hbSize < 4) {
-        /* This function only works when hbSize >= 4 */
-        char buffer[4];
-        memset(buffer, 0, sizeof(buffer));
-        memcpy(buffer, headerBuffer, hbSize);
-        {   size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr,
-                                                    buffer, sizeof(buffer));
-            if (FSE_isError(countSize)) return countSize;
-            if (countSize > hbSize) return ERROR(corruption_detected);
-            return countSize;
-    }   }
-    assert(hbSize >= 4);
-
-    /* init */
-    memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0]));   /* all symbols not present in NCount have a frequency of 0 */
-    bitStream = MEM_readLE32(ip);
-    nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG;   /* extract tableLog */
-    if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
-    bitStream >>= 4;
-    bitCount = 4;
-    *tableLogPtr = nbBits;
-    remaining = (1<<nbBits)+1;
-    threshold = 1<<nbBits;
-    nbBits++;
-
-    while ((remaining>1) & (charnum<=*maxSVPtr)) {
-        if (previous0) {
-            unsigned n0 = charnum;
-            while ((bitStream & 0xFFFF) == 0xFFFF) {
-                n0 += 24;
-                if (ip < iend-5) {
-                    ip += 2;
-                    bitStream = MEM_readLE32(ip) >> bitCount;
-                } else {
-                    bitStream >>= 16;
-                    bitCount   += 16;
-            }   }
-            while ((bitStream & 3) == 3) {
-                n0 += 3;
-                bitStream >>= 2;
-                bitCount += 2;
-            }
-            n0 += bitStream & 3;
-            bitCount += 2;
-            if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
-            while (charnum < n0) normalizedCounter[charnum++] = 0;
-            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
-                assert((bitCount >> 3) <= 3); /* For first condition to work */
-                ip += bitCount>>3;
-                bitCount &= 7;
-                bitStream = MEM_readLE32(ip) >> bitCount;
-            } else {
-                bitStream >>= 2;
-        }   }
-        {   int const max = (2*threshold-1) - remaining;
-            int count;
-
-            if ((bitStream & (threshold-1)) < (U32)max) {
-                count = bitStream & (threshold-1);
-                bitCount += nbBits-1;
-            } else {
-                count = bitStream & (2*threshold-1);
-                if (count >= threshold) count -= max;
-                bitCount += nbBits;
-            }
-
-            count--;   /* extra accuracy */
-            remaining -= count < 0 ? -count : count;   /* -1 means +1 */
-            normalizedCounter[charnum++] = (short)count;
-            previous0 = !count;
-            while (remaining < threshold) {
-                nbBits--;
-                threshold >>= 1;
-            }
-
-            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
-                ip += bitCount>>3;
-                bitCount &= 7;
-            } else {
-                bitCount -= (int)(8 * (iend - 4 - ip));
-                ip = iend - 4;
-            }
-            bitStream = MEM_readLE32(ip) >> (bitCount & 31);
-    }   }   /* while ((remaining>1) & (charnum<=*maxSVPtr)) */
-    if (remaining != 1) return ERROR(corruption_detected);
-    if (bitCount > 32) return ERROR(corruption_detected);
-    *maxSVPtr = charnum-1;
-
-    ip += (bitCount+7)>>3;
-    return ip-istart;
-}
-
-
-/*! HUF_readStats() :
-    Read compact Huffman tree, saved by HUF_writeCTable().
-    `huffWeight` is destination buffer.
-    `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
-    @return : size read from `src` , or an error Code .
-    Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
-*/
-size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
-                     U32* nbSymbolsPtr, U32* tableLogPtr,
-                     const void* src, size_t srcSize)
-{
-    U32 weightTotal;
-    const BYTE* ip = (const BYTE*) src;
-    size_t iSize;
-    size_t oSize;
-
-    if (!srcSize) return ERROR(srcSize_wrong);
-    iSize = ip[0];
-    /* memset(huffWeight, 0, hwSize);   *//* is not necessary, even though some analyzer complain ... */
-
-    if (iSize >= 128) {  /* special header */
-        oSize = iSize - 127;
-        iSize = ((oSize+1)/2);
-        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
-        if (oSize >= hwSize) return ERROR(corruption_detected);
-        ip += 1;
-        {   U32 n;
-            for (n=0; n<oSize; n+=2) {
-                huffWeight[n]   = ip[n/2] >> 4;
-                huffWeight[n+1] = ip[n/2] & 15;
-    }   }   }
-    else  {   /* header compressed with FSE (normal case) */
-        FSE_DTable fseWorkspace[FSE_DTABLE_SIZE_U32(6)];  /* 6 is max possible tableLog for HUF header (maybe even 5, to be tested) */
-        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
-        oSize = FSE_decompress_wksp(huffWeight, hwSize-1, ip+1, iSize, fseWorkspace, 6);   /* max (hwSize-1) values decoded, as last one is implied */
-        if (FSE_isError(oSize)) return oSize;
-    }
-
-    /* collect weight stats */
-    memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
-    weightTotal = 0;
-    {   U32 n; for (n=0; n<oSize; n++) {
-            if (huffWeight[n] >= HUF_TABLELOG_MAX) return ERROR(corruption_detected);
-            rankStats[huffWeight[n]]++;
-            weightTotal += (1 << huffWeight[n]) >> 1;
-    }   }
-    if (weightTotal == 0) return ERROR(corruption_detected);
-
-    /* get last non-null symbol weight (implied, total must be 2^n) */
-    {   U32 const tableLog = BIT_highbit32(weightTotal) + 1;
-        if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
-        *tableLogPtr = tableLog;
-        /* determine last weight */
-        {   U32 const total = 1 << tableLog;
-            U32 const rest = total - weightTotal;
-            U32 const verif = 1 << BIT_highbit32(rest);
-            U32 const lastWeight = BIT_highbit32(rest) + 1;
-            if (verif != rest) return ERROR(corruption_detected);    /* last value must be a clean power of 2 */
-            huffWeight[oSize] = (BYTE)lastWeight;
-            rankStats[lastWeight]++;
-    }   }
-
-    /* check tree construction validity */
-    if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected);   /* by construction : at least 2 elts of rank 1, must be even */
-
-    /* results */
-    *nbSymbolsPtr = (U32)(oSize+1);
-    return iSize+1;
-}
diff --git a/vendor/github.com/DataDog/zstd/error_private.c b/vendor/github.com/DataDog/zstd/error_private.c
deleted file mode 100644
index 7c1bb67..0000000
--- a/vendor/github.com/DataDog/zstd/error_private.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-/* The purpose of this file is to have a single list of error strings embedded in binary */
-
-#include "error_private.h"
-
-const char* ERR_getErrorString(ERR_enum code)
-{
-#ifdef ZSTD_STRIP_ERROR_STRINGS
-    (void)code;
-    return "Error strings stripped";
-#else
-    static const char* const notErrorCode = "Unspecified error code";
-    switch( code )
-    {
-    case PREFIX(no_error): return "No error detected";
-    case PREFIX(GENERIC):  return "Error (generic)";
-    case PREFIX(prefix_unknown): return "Unknown frame descriptor";
-    case PREFIX(version_unsupported): return "Version not supported";
-    case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter";
-    case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding";
-    case PREFIX(corruption_detected): return "Corrupted block detected";
-    case PREFIX(checksum_wrong): return "Restored data doesn't match checksum";
-    case PREFIX(parameter_unsupported): return "Unsupported parameter";
-    case PREFIX(parameter_outOfBound): return "Parameter is out of bound";
-    case PREFIX(init_missing): return "Context should be init first";
-    case PREFIX(memory_allocation): return "Allocation error : not enough memory";
-    case PREFIX(workSpace_tooSmall): return "workSpace buffer is not large enough";
-    case PREFIX(stage_wrong): return "Operation not authorized at current processing stage";
-    case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported";
-    case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large";
-    case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small";
-    case PREFIX(dictionary_corrupted): return "Dictionary is corrupted";
-    case PREFIX(dictionary_wrong): return "Dictionary mismatch";
-    case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples";
-    case PREFIX(dstSize_tooSmall): return "Destination buffer is too small";
-    case PREFIX(srcSize_wrong): return "Src size is incorrect";
-    case PREFIX(dstBuffer_null): return "Operation on NULL destination buffer";
-        /* following error codes are not stable and may be removed or changed in a future version */
-    case PREFIX(frameIndex_tooLarge): return "Frame index is too large";
-    case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking";
-    case PREFIX(maxCode):
-    default: return notErrorCode;
-    }
-#endif
-}
diff --git a/vendor/github.com/DataDog/zstd/error_private.h b/vendor/github.com/DataDog/zstd/error_private.h
deleted file mode 100644
index 0d2fa7e..0000000
--- a/vendor/github.com/DataDog/zstd/error_private.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-/* Note : this module is expected to remain private, do not expose it */
-
-#ifndef ERROR_H_MODULE
-#define ERROR_H_MODULE
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-/* ****************************************
-*  Dependencies
-******************************************/
-#include <stddef.h>        /* size_t */
-#include "zstd_errors.h"  /* enum list */
-
-
-/* ****************************************
-*  Compiler-specific
-******************************************/
-#if defined(__GNUC__)
-#  define ERR_STATIC static __attribute__((unused))
-#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-#  define ERR_STATIC static inline
-#elif defined(_MSC_VER)
-#  define ERR_STATIC static __inline
-#else
-#  define ERR_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */
-#endif
-
-
-/*-****************************************
-*  Customization (error_public.h)
-******************************************/
-typedef ZSTD_ErrorCode ERR_enum;
-#define PREFIX(name) ZSTD_error_##name
-
-
-/*-****************************************
-*  Error codes handling
-******************************************/
-#undef ERROR   /* reported already defined on VS 2015 (Rich Geldreich) */
-#define ERROR(name) ZSTD_ERROR(name)
-#define ZSTD_ERROR(name) ((size_t)-PREFIX(name))
-
-ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
-
-ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); }
-
-
-/*-****************************************
-*  Error Strings
-******************************************/
-
-const char* ERR_getErrorString(ERR_enum code);   /* error_private.c */
-
-ERR_STATIC const char* ERR_getErrorName(size_t code)
-{
-    return ERR_getErrorString(ERR_getErrorCode(code));
-}
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* ERROR_H_MODULE */
diff --git a/vendor/github.com/DataDog/zstd/errors.go b/vendor/github.com/DataDog/zstd/errors.go
deleted file mode 100644
index 38db0d5..0000000
--- a/vendor/github.com/DataDog/zstd/errors.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package zstd
-
-/*
-#define ZSTD_STATIC_LINKING_ONLY
-#include "zstd.h"
-*/
-import "C"
-
-// ErrorCode is an error returned by the zstd library.
-type ErrorCode int
-
-// Error returns the error string given by zstd
-func (e ErrorCode) Error() string {
-	return C.GoString(C.ZSTD_getErrorName(C.size_t(e)))
-}
-
-func cIsError(code int) bool {
-	return int(C.ZSTD_isError(C.size_t(code))) != 0
-}
-
-// getError returns an error for the return code, or nil if it's not an error
-func getError(code int) error {
-	if code < 0 && cIsError(code) {
-		return ErrorCode(code)
-	}
-	return nil
-}
-
-// IsDstSizeTooSmallError returns whether the error correspond to zstd standard sDstSizeTooSmall error
-func IsDstSizeTooSmallError(e error) bool {
-	if e != nil && e.Error() == "Destination buffer is too small" {
-		return true
-	}
-	return false
-}
diff --git a/vendor/github.com/DataDog/zstd/fastcover.c b/vendor/github.com/DataDog/zstd/fastcover.c
deleted file mode 100644
index 941bb5a..0000000
--- a/vendor/github.com/DataDog/zstd/fastcover.c
+++ /dev/null
@@ -1,747 +0,0 @@
-/*-*************************************
-*  Dependencies
-***************************************/
-#include <stdio.h>  /* fprintf */
-#include <stdlib.h> /* malloc, free, qsort */
-#include <string.h> /* memset */
-#include <time.h>   /* clock */
-
-#include "mem.h" /* read */
-#include "pool.h"
-#include "threading.h"
-#include "cover.h"
-#include "zstd_internal.h" /* includes zstd.h */
-#ifndef ZDICT_STATIC_LINKING_ONLY
-#define ZDICT_STATIC_LINKING_ONLY
-#endif
-#include "zdict.h"
-
-
-/*-*************************************
-*  Constants
-***************************************/
-#define FASTCOVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))
-#define FASTCOVER_MAX_F 31
-#define FASTCOVER_MAX_ACCEL 10
-#define DEFAULT_SPLITPOINT 0.75
-#define DEFAULT_F 20
-#define DEFAULT_ACCEL 1
-
-
-/*-*************************************
-*  Console display
-***************************************/
-static int g_displayLevel = 2;
-#define DISPLAY(...)                                                           \
-  {                                                                            \
-    fprintf(stderr, __VA_ARGS__);                                              \
-    fflush(stderr);                                                            \
-  }
-#define LOCALDISPLAYLEVEL(displayLevel, l, ...)                                \
-  if (displayLevel >= l) {                                                     \
-    DISPLAY(__VA_ARGS__);                                                      \
-  } /* 0 : no display;   1: errors;   2: default;  3: details;  4: debug */
-#define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__)
-
-#define LOCALDISPLAYUPDATE(displayLevel, l, ...)                               \
-  if (displayLevel >= l) {                                                     \
-    if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) {             \
-      g_time = clock();                                                        \
-      DISPLAY(__VA_ARGS__);                                                    \
-    }                                                                          \
-  }
-#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__)
-static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100;
-static clock_t g_time = 0;
-
-
-/*-*************************************
-* Hash Functions
-***************************************/
-static const U64 prime6bytes = 227718039650203ULL;
-static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u  << (64-48)) * prime6bytes) >> (64-h)) ; }
-static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
-
-static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
-static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
-static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
-
-
-/**
- * Hash the d-byte value pointed to by p and mod 2^f
- */
-static size_t FASTCOVER_hashPtrToIndex(const void* p, U32 h, unsigned d) {
-  if (d == 6) {
-    return ZSTD_hash6Ptr(p, h) & ((1 << h) - 1);
-  }
-  return ZSTD_hash8Ptr(p, h) & ((1 << h) - 1);
-}
-
-
-/*-*************************************
-* Acceleration
-***************************************/
-typedef struct {
-  unsigned finalize;    /* Percentage of training samples used for ZDICT_finalizeDictionary */
-  unsigned skip;        /* Number of dmer skipped between each dmer counted in computeFrequency */
-} FASTCOVER_accel_t;
-
-
-static const FASTCOVER_accel_t FASTCOVER_defaultAccelParameters[FASTCOVER_MAX_ACCEL+1] = {
-  { 100, 0 },   /* accel = 0, should not happen because accel = 0 defaults to accel = 1 */
-  { 100, 0 },   /* accel = 1 */
-  { 50, 1 },   /* accel = 2 */
-  { 34, 2 },   /* accel = 3 */
-  { 25, 3 },   /* accel = 4 */
-  { 20, 4 },   /* accel = 5 */
-  { 17, 5 },   /* accel = 6 */
-  { 14, 6 },   /* accel = 7 */
-  { 13, 7 },   /* accel = 8 */
-  { 11, 8 },   /* accel = 9 */
-  { 10, 9 },   /* accel = 10 */
-};
-
-
-/*-*************************************
-* Context
-***************************************/
-typedef struct {
-  const BYTE *samples;
-  size_t *offsets;
-  const size_t *samplesSizes;
-  size_t nbSamples;
-  size_t nbTrainSamples;
-  size_t nbTestSamples;
-  size_t nbDmers;
-  U32 *freqs;
-  unsigned d;
-  unsigned f;
-  FASTCOVER_accel_t accelParams;
-} FASTCOVER_ctx_t;
-
-
-/*-*************************************
-*  Helper functions
-***************************************/
-/**
- * Selects the best segment in an epoch.
- * Segments of are scored according to the function:
- *
- * Let F(d) be the frequency of all dmers with hash value d.
- * Let S_i be hash value of the dmer at position i of segment S which has length k.
- *
- *     Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
- *
- * Once the dmer with hash value d is in the dictionary we set F(d) = 0.
- */
-static COVER_segment_t FASTCOVER_selectSegment(const FASTCOVER_ctx_t *ctx,
-                                              U32 *freqs, U32 begin, U32 end,
-                                              ZDICT_cover_params_t parameters,
-                                              U16* segmentFreqs) {
-  /* Constants */
-  const U32 k = parameters.k;
-  const U32 d = parameters.d;
-  const U32 f = ctx->f;
-  const U32 dmersInK = k - d + 1;
-
-  /* Try each segment (activeSegment) and save the best (bestSegment) */
-  COVER_segment_t bestSegment = {0, 0, 0};
-  COVER_segment_t activeSegment;
-
-  /* Reset the activeDmers in the segment */
-  /* The activeSegment starts at the beginning of the epoch. */
-  activeSegment.begin = begin;
-  activeSegment.end = begin;
-  activeSegment.score = 0;
-
-  /* Slide the activeSegment through the whole epoch.
-   * Save the best segment in bestSegment.
-   */
-  while (activeSegment.end < end) {
-    /* Get hash value of current dmer */
-    const size_t idx = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.end, f, d);
-
-    /* Add frequency of this index to score if this is the first occurrence of index in active segment */
-    if (segmentFreqs[idx] == 0) {
-      activeSegment.score += freqs[idx];
-    }
-    /* Increment end of segment and segmentFreqs*/
-    activeSegment.end += 1;
-    segmentFreqs[idx] += 1;
-    /* If the window is now too large, drop the first position */
-    if (activeSegment.end - activeSegment.begin == dmersInK + 1) {
-      /* Get hash value of the dmer to be eliminated from active segment */
-      const size_t delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d);
-      segmentFreqs[delIndex] -= 1;
-      /* Subtract frequency of this index from score if this is the last occurrence of this index in active segment */
-      if (segmentFreqs[delIndex] == 0) {
-        activeSegment.score -= freqs[delIndex];
-      }
-      /* Increment start of segment */
-      activeSegment.begin += 1;
-    }
-
-    /* If this segment is the best so far save it */
-    if (activeSegment.score > bestSegment.score) {
-      bestSegment = activeSegment;
-    }
-  }
-
-  /* Zero out rest of segmentFreqs array */
-  while (activeSegment.begin < end) {
-    const size_t delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d);
-    segmentFreqs[delIndex] -= 1;
-    activeSegment.begin += 1;
-  }
-
-  {
-    /*  Zero the frequency of hash value of each dmer covered by the chosen segment. */
-    U32 pos;
-    for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
-      const size_t i = FASTCOVER_hashPtrToIndex(ctx->samples + pos, f, d);
-      freqs[i] = 0;
-    }
-  }
-
-  return bestSegment;
-}
-
-
-static int FASTCOVER_checkParameters(ZDICT_cover_params_t parameters,
-                                     size_t maxDictSize, unsigned f,
-                                     unsigned accel) {
-  /* k, d, and f are required parameters */
-  if (parameters.d == 0 || parameters.k == 0) {
-    return 0;
-  }
-  /* d has to be 6 or 8 */
-  if (parameters.d != 6 && parameters.d != 8) {
-    return 0;
-  }
-  /* k <= maxDictSize */
-  if (parameters.k > maxDictSize) {
-    return 0;
-  }
-  /* d <= k */
-  if (parameters.d > parameters.k) {
-    return 0;
-  }
-  /* 0 < f <= FASTCOVER_MAX_F*/
-  if (f > FASTCOVER_MAX_F || f == 0) {
-    return 0;
-  }
-  /* 0 < splitPoint <= 1 */
-  if (parameters.splitPoint <= 0 || parameters.splitPoint > 1) {
-    return 0;
-  }
-  /* 0 < accel <= 10 */
-  if (accel > 10 || accel == 0) {
-    return 0;
-  }
-  return 1;
-}
-
-
-/**
- * Clean up a context initialized with `FASTCOVER_ctx_init()`.
- */
-static void
-FASTCOVER_ctx_destroy(FASTCOVER_ctx_t* ctx)
-{
-    if (!ctx) return;
-
-    free(ctx->freqs);
-    ctx->freqs = NULL;
-
-    free(ctx->offsets);
-    ctx->offsets = NULL;
-}
-
-
-/**
- * Calculate for frequency of hash value of each dmer in ctx->samples
- */
-static void
-FASTCOVER_computeFrequency(U32* freqs, const FASTCOVER_ctx_t* ctx)
-{
-    const unsigned f = ctx->f;
-    const unsigned d = ctx->d;
-    const unsigned skip = ctx->accelParams.skip;
-    const unsigned readLength = MAX(d, 8);
-    size_t i;
-    assert(ctx->nbTrainSamples >= 5);
-    assert(ctx->nbTrainSamples <= ctx->nbSamples);
-    for (i = 0; i < ctx->nbTrainSamples; i++) {
-        size_t start = ctx->offsets[i];  /* start of current dmer */
-        size_t const currSampleEnd = ctx->offsets[i+1];
-        while (start + readLength <= currSampleEnd) {
-            const size_t dmerIndex = FASTCOVER_hashPtrToIndex(ctx->samples + start, f, d);
-            freqs[dmerIndex]++;
-            start = start + skip + 1;
-        }
-    }
-}
-
-
-/**
- * Prepare a context for dictionary building.
- * The context is only dependent on the parameter `d` and can used multiple
- * times.
- * Returns 0 on success or error code on error.
- * The context must be destroyed with `FASTCOVER_ctx_destroy()`.
- */
-static size_t
-FASTCOVER_ctx_init(FASTCOVER_ctx_t* ctx,
-                   const void* samplesBuffer,
-                   const size_t* samplesSizes, unsigned nbSamples,
-                   unsigned d, double splitPoint, unsigned f,
-                   FASTCOVER_accel_t accelParams)
-{
-    const BYTE* const samples = (const BYTE*)samplesBuffer;
-    const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples);
-    /* Split samples into testing and training sets */
-    const unsigned nbTrainSamples = splitPoint < 1.0 ? (unsigned)((double)nbSamples * splitPoint) : nbSamples;
-    const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples;
-    const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize;
-    const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize;
-
-    /* Checks */
-    if (totalSamplesSize < MAX(d, sizeof(U64)) ||
-        totalSamplesSize >= (size_t)FASTCOVER_MAX_SAMPLES_SIZE) {
-        DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n",
-                    (unsigned)(totalSamplesSize >> 20), (FASTCOVER_MAX_SAMPLES_SIZE >> 20));
-        return ERROR(srcSize_wrong);
-    }
-
-    /* Check if there are at least 5 training samples */
-    if (nbTrainSamples < 5) {
-        DISPLAYLEVEL(1, "Total number of training samples is %u and is invalid\n", nbTrainSamples);
-        return ERROR(srcSize_wrong);
-    }
-
-    /* Check if there's testing sample */
-    if (nbTestSamples < 1) {
-        DISPLAYLEVEL(1, "Total number of testing samples is %u and is invalid.\n", nbTestSamples);
-        return ERROR(srcSize_wrong);
-    }
-
-    /* Zero the context */
-    memset(ctx, 0, sizeof(*ctx));
-    DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples,
-                    (unsigned)trainingSamplesSize);
-    DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples,
-                    (unsigned)testSamplesSize);
-
-    ctx->samples = samples;
-    ctx->samplesSizes = samplesSizes;
-    ctx->nbSamples = nbSamples;
-    ctx->nbTrainSamples = nbTrainSamples;
-    ctx->nbTestSamples = nbTestSamples;
-    ctx->nbDmers = trainingSamplesSize - MAX(d, sizeof(U64)) + 1;
-    ctx->d = d;
-    ctx->f = f;
-    ctx->accelParams = accelParams;
-
-    /* The offsets of each file */
-    ctx->offsets = (size_t*)calloc((nbSamples + 1), sizeof(size_t));
-    if (ctx->offsets == NULL) {
-        DISPLAYLEVEL(1, "Failed to allocate scratch buffers \n");
-        FASTCOVER_ctx_destroy(ctx);
-        return ERROR(memory_allocation);
-    }
-
-    /* Fill offsets from the samplesSizes */
-    {   U32 i;
-        ctx->offsets[0] = 0;
-        assert(nbSamples >= 5);
-        for (i = 1; i <= nbSamples; ++i) {
-            ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1];
-        }
-    }
-
-    /* Initialize frequency array of size 2^f */
-    ctx->freqs = (U32*)calloc(((U64)1 << f), sizeof(U32));
-    if (ctx->freqs == NULL) {
-        DISPLAYLEVEL(1, "Failed to allocate frequency table \n");
-        FASTCOVER_ctx_destroy(ctx);
-        return ERROR(memory_allocation);
-    }
-
-    DISPLAYLEVEL(2, "Computing frequencies\n");
-    FASTCOVER_computeFrequency(ctx->freqs, ctx);
-
-    return 0;
-}
-
-
-/**
- * Given the prepared context build the dictionary.
- */
-static size_t
-FASTCOVER_buildDictionary(const FASTCOVER_ctx_t* ctx,
-                          U32* freqs,
-                          void* dictBuffer, size_t dictBufferCapacity,
-                          ZDICT_cover_params_t parameters,
-                          U16* segmentFreqs)
-{
-  BYTE *const dict = (BYTE *)dictBuffer;
-  size_t tail = dictBufferCapacity;
-  /* Divide the data into epochs. We will select one segment from each epoch. */
-  const COVER_epoch_info_t epochs = COVER_computeEpochs(
-      (U32)dictBufferCapacity, (U32)ctx->nbDmers, parameters.k, 1);
-  const size_t maxZeroScoreRun = 10;
-  size_t zeroScoreRun = 0;
-  size_t epoch;
-  DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n",
-                (U32)epochs.num, (U32)epochs.size);
-  /* Loop through the epochs until there are no more segments or the dictionary
-   * is full.
-   */
-  for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num) {
-    const U32 epochBegin = (U32)(epoch * epochs.size);
-    const U32 epochEnd = epochBegin + epochs.size;
-    size_t segmentSize;
-    /* Select a segment */
-    COVER_segment_t segment = FASTCOVER_selectSegment(
-        ctx, freqs, epochBegin, epochEnd, parameters, segmentFreqs);
-
-    /* If the segment covers no dmers, then we are out of content.
-     * There may be new content in other epochs, for continue for some time.
-     */
-    if (segment.score == 0) {
-      if (++zeroScoreRun >= maxZeroScoreRun) {
-          break;
-      }
-      continue;
-    }
-    zeroScoreRun = 0;
-
-    /* Trim the segment if necessary and if it is too small then we are done */
-    segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail);
-    if (segmentSize < parameters.d) {
-      break;
-    }
-
-    /* We fill the dictionary from the back to allow the best segments to be
-     * referenced with the smallest offsets.
-     */
-    tail -= segmentSize;
-    memcpy(dict + tail, ctx->samples + segment.begin, segmentSize);
-    DISPLAYUPDATE(
-        2, "\r%u%%       ",
-        (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity));
-  }
-  DISPLAYLEVEL(2, "\r%79s\r", "");
-  return tail;
-}
-
-/**
- * Parameters for FASTCOVER_tryParameters().
- */
-typedef struct FASTCOVER_tryParameters_data_s {
-    const FASTCOVER_ctx_t* ctx;
-    COVER_best_t* best;
-    size_t dictBufferCapacity;
-    ZDICT_cover_params_t parameters;
-} FASTCOVER_tryParameters_data_t;
-
-
-/**
- * Tries a set of parameters and updates the COVER_best_t with the results.
- * This function is thread safe if zstd is compiled with multithreaded support.
- * It takes its parameters as an *OWNING* opaque pointer to support threading.
- */
-static void FASTCOVER_tryParameters(void *opaque)
-{
-  /* Save parameters as local variables */
-  FASTCOVER_tryParameters_data_t *const data = (FASTCOVER_tryParameters_data_t *)opaque;
-  const FASTCOVER_ctx_t *const ctx = data->ctx;
-  const ZDICT_cover_params_t parameters = data->parameters;
-  size_t dictBufferCapacity = data->dictBufferCapacity;
-  size_t totalCompressedSize = ERROR(GENERIC);
-  /* Initialize array to keep track of frequency of dmer within activeSegment */
-  U16* segmentFreqs = (U16 *)calloc(((U64)1 << ctx->f), sizeof(U16));
-  /* Allocate space for hash table, dict, and freqs */
-  BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity);
-  COVER_dictSelection_t selection = COVER_dictSelectionError(ERROR(GENERIC));
-  U32 *freqs = (U32*) malloc(((U64)1 << ctx->f) * sizeof(U32));
-  if (!segmentFreqs || !dict || !freqs) {
-    DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n");
-    goto _cleanup;
-  }
-  /* Copy the frequencies because we need to modify them */
-  memcpy(freqs, ctx->freqs, ((U64)1 << ctx->f) * sizeof(U32));
-  /* Build the dictionary */
-  { const size_t tail = FASTCOVER_buildDictionary(ctx, freqs, dict, dictBufferCapacity,
-                                                    parameters, segmentFreqs);
-
-    const unsigned nbFinalizeSamples = (unsigned)(ctx->nbTrainSamples * ctx->accelParams.finalize / 100);
-    selection = COVER_selectDict(dict + tail, dictBufferCapacity - tail,
-         ctx->samples, ctx->samplesSizes, nbFinalizeSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets,
-         totalCompressedSize);
-
-    if (COVER_dictSelectionIsError(selection)) {
-      DISPLAYLEVEL(1, "Failed to select dictionary\n");
-      goto _cleanup;
-    }
-  }
-_cleanup:
-  free(dict);
-  COVER_best_finish(data->best, parameters, selection);
-  free(data);
-  free(segmentFreqs);
-  COVER_dictSelectionFree(selection);
-  free(freqs);
-}
-
-
-static void
-FASTCOVER_convertToCoverParams(ZDICT_fastCover_params_t fastCoverParams,
-                               ZDICT_cover_params_t* coverParams)
-{
-    coverParams->k = fastCoverParams.k;
-    coverParams->d = fastCoverParams.d;
-    coverParams->steps = fastCoverParams.steps;
-    coverParams->nbThreads = fastCoverParams.nbThreads;
-    coverParams->splitPoint = fastCoverParams.splitPoint;
-    coverParams->zParams = fastCoverParams.zParams;
-    coverParams->shrinkDict = fastCoverParams.shrinkDict;
-}
-
-
-static void
-FASTCOVER_convertToFastCoverParams(ZDICT_cover_params_t coverParams,
-                                   ZDICT_fastCover_params_t* fastCoverParams,
-                                   unsigned f, unsigned accel)
-{
-    fastCoverParams->k = coverParams.k;
-    fastCoverParams->d = coverParams.d;
-    fastCoverParams->steps = coverParams.steps;
-    fastCoverParams->nbThreads = coverParams.nbThreads;
-    fastCoverParams->splitPoint = coverParams.splitPoint;
-    fastCoverParams->f = f;
-    fastCoverParams->accel = accel;
-    fastCoverParams->zParams = coverParams.zParams;
-    fastCoverParams->shrinkDict = coverParams.shrinkDict;
-}
-
-
-ZDICTLIB_API size_t
-ZDICT_trainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity,
-                                const void* samplesBuffer,
-                                const size_t* samplesSizes, unsigned nbSamples,
-                                ZDICT_fastCover_params_t parameters)
-{
-    BYTE* const dict = (BYTE*)dictBuffer;
-    FASTCOVER_ctx_t ctx;
-    ZDICT_cover_params_t coverParams;
-    FASTCOVER_accel_t accelParams;
-    /* Initialize global data */
-    g_displayLevel = parameters.zParams.notificationLevel;
-    /* Assign splitPoint and f if not provided */
-    parameters.splitPoint = 1.0;
-    parameters.f = parameters.f == 0 ? DEFAULT_F : parameters.f;
-    parameters.accel = parameters.accel == 0 ? DEFAULT_ACCEL : parameters.accel;
-    /* Convert to cover parameter */
-    memset(&coverParams, 0 , sizeof(coverParams));
-    FASTCOVER_convertToCoverParams(parameters, &coverParams);
-    /* Checks */
-    if (!FASTCOVER_checkParameters(coverParams, dictBufferCapacity, parameters.f,
-                                   parameters.accel)) {
-      DISPLAYLEVEL(1, "FASTCOVER parameters incorrect\n");
-      return ERROR(parameter_outOfBound);
-    }
-    if (nbSamples == 0) {
-      DISPLAYLEVEL(1, "FASTCOVER must have at least one input file\n");
-      return ERROR(srcSize_wrong);
-    }
-    if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
-      DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
-                   ZDICT_DICTSIZE_MIN);
-      return ERROR(dstSize_tooSmall);
-    }
-    /* Assign corresponding FASTCOVER_accel_t to accelParams*/
-    accelParams = FASTCOVER_defaultAccelParameters[parameters.accel];
-    /* Initialize context */
-    {
-      size_t const initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples,
-                            coverParams.d, parameters.splitPoint, parameters.f,
-                            accelParams);
-      if (ZSTD_isError(initVal)) {
-        DISPLAYLEVEL(1, "Failed to initialize context\n");
-        return initVal;
-      }
-    }
-    COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, g_displayLevel);
-    /* Build the dictionary */
-    DISPLAYLEVEL(2, "Building dictionary\n");
-    {
-      /* Initialize array to keep track of frequency of dmer within activeSegment */
-      U16* segmentFreqs = (U16 *)calloc(((U64)1 << parameters.f), sizeof(U16));
-      const size_t tail = FASTCOVER_buildDictionary(&ctx, ctx.freqs, dictBuffer,
-                                                dictBufferCapacity, coverParams, segmentFreqs);
-      const unsigned nbFinalizeSamples = (unsigned)(ctx.nbTrainSamples * ctx.accelParams.finalize / 100);
-      const size_t dictionarySize = ZDICT_finalizeDictionary(
-          dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
-          samplesBuffer, samplesSizes, nbFinalizeSamples, coverParams.zParams);
-      if (!ZSTD_isError(dictionarySize)) {
-          DISPLAYLEVEL(2, "Constructed dictionary of size %u\n",
-                      (unsigned)dictionarySize);
-      }
-      FASTCOVER_ctx_destroy(&ctx);
-      free(segmentFreqs);
-      return dictionarySize;
-    }
-}
-
-
-ZDICTLIB_API size_t
-ZDICT_optimizeTrainFromBuffer_fastCover(
-                    void* dictBuffer, size_t dictBufferCapacity,
-                    const void* samplesBuffer,
-                    const size_t* samplesSizes, unsigned nbSamples,
-                    ZDICT_fastCover_params_t* parameters)
-{
-    ZDICT_cover_params_t coverParams;
-    FASTCOVER_accel_t accelParams;
-    /* constants */
-    const unsigned nbThreads = parameters->nbThreads;
-    const double splitPoint =
-        parameters->splitPoint <= 0.0 ? DEFAULT_SPLITPOINT : parameters->splitPoint;
-    const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d;
-    const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d;
-    const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k;
-    const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k;
-    const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps;
-    const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1);
-    const unsigned kIterations =
-        (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);
-    const unsigned f = parameters->f == 0 ? DEFAULT_F : parameters->f;
-    const unsigned accel = parameters->accel == 0 ? DEFAULT_ACCEL : parameters->accel;
-    const unsigned shrinkDict = 0;
-    /* Local variables */
-    const int displayLevel = parameters->zParams.notificationLevel;
-    unsigned iteration = 1;
-    unsigned d;
-    unsigned k;
-    COVER_best_t best;
-    POOL_ctx *pool = NULL;
-    int warned = 0;
-    /* Checks */
-    if (splitPoint <= 0 || splitPoint > 1) {
-      LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect splitPoint\n");
-      return ERROR(parameter_outOfBound);
-    }
-    if (accel == 0 || accel > FASTCOVER_MAX_ACCEL) {
-      LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect accel\n");
-      return ERROR(parameter_outOfBound);
-    }
-    if (kMinK < kMaxD || kMaxK < kMinK) {
-      LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect k\n");
-      return ERROR(parameter_outOfBound);
-    }
-    if (nbSamples == 0) {
-      LOCALDISPLAYLEVEL(displayLevel, 1, "FASTCOVER must have at least one input file\n");
-      return ERROR(srcSize_wrong);
-    }
-    if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
-      LOCALDISPLAYLEVEL(displayLevel, 1, "dictBufferCapacity must be at least %u\n",
-                   ZDICT_DICTSIZE_MIN);
-      return ERROR(dstSize_tooSmall);
-    }
-    if (nbThreads > 1) {
-      pool = POOL_create(nbThreads, 1);
-      if (!pool) {
-        return ERROR(memory_allocation);
-      }
-    }
-    /* Initialization */
-    COVER_best_init(&best);
-    memset(&coverParams, 0 , sizeof(coverParams));
-    FASTCOVER_convertToCoverParams(*parameters, &coverParams);
-    accelParams = FASTCOVER_defaultAccelParameters[accel];
-    /* Turn down global display level to clean up display at level 2 and below */
-    g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1;
-    /* Loop through d first because each new value needs a new context */
-    LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n",
-                      kIterations);
-    for (d = kMinD; d <= kMaxD; d += 2) {
-      /* Initialize the context for this value of d */
-      FASTCOVER_ctx_t ctx;
-      LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d);
-      {
-        size_t const initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint, f, accelParams);
-        if (ZSTD_isError(initVal)) {
-          LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n");
-          COVER_best_destroy(&best);
-          POOL_free(pool);
-          return initVal;
-        }
-      }
-      if (!warned) {
-        COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, displayLevel);
-        warned = 1;
-      }
-      /* Loop through k reusing the same context */
-      for (k = kMinK; k <= kMaxK; k += kStepSize) {
-        /* Prepare the arguments */
-        FASTCOVER_tryParameters_data_t *data = (FASTCOVER_tryParameters_data_t *)malloc(
-            sizeof(FASTCOVER_tryParameters_data_t));
-        LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k);
-        if (!data) {
-          LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n");
-          COVER_best_destroy(&best);
-          FASTCOVER_ctx_destroy(&ctx);
-          POOL_free(pool);
-          return ERROR(memory_allocation);
-        }
-        data->ctx = &ctx;
-        data->best = &best;
-        data->dictBufferCapacity = dictBufferCapacity;
-        data->parameters = coverParams;
-        data->parameters.k = k;
-        data->parameters.d = d;
-        data->parameters.splitPoint = splitPoint;
-        data->parameters.steps = kSteps;
-        data->parameters.shrinkDict = shrinkDict;
-        data->parameters.zParams.notificationLevel = g_displayLevel;
-        /* Check the parameters */
-        if (!FASTCOVER_checkParameters(data->parameters, dictBufferCapacity,
-                                       data->ctx->f, accel)) {
-          DISPLAYLEVEL(1, "FASTCOVER parameters incorrect\n");
-          free(data);
-          continue;
-        }
-        /* Call the function and pass ownership of data to it */
-        COVER_best_start(&best);
-        if (pool) {
-          POOL_add(pool, &FASTCOVER_tryParameters, data);
-        } else {
-          FASTCOVER_tryParameters(data);
-        }
-        /* Print status */
-        LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%%       ",
-                           (unsigned)((iteration * 100) / kIterations));
-        ++iteration;
-      }
-      COVER_best_wait(&best);
-      FASTCOVER_ctx_destroy(&ctx);
-    }
-    LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", "");
-    /* Fill the output buffer and parameters with output of the best parameters */
-    {
-      const size_t dictSize = best.dictSize;
-      if (ZSTD_isError(best.compressedSize)) {
-        const size_t compressedSize = best.compressedSize;
-        COVER_best_destroy(&best);
-        POOL_free(pool);
-        return compressedSize;
-      }
-      FASTCOVER_convertToFastCoverParams(best.parameters, parameters, f, accel);
-      memcpy(dictBuffer, best.dict, dictSize);
-      COVER_best_destroy(&best);
-      POOL_free(pool);
-      return dictSize;
-    }
-
-}
diff --git a/vendor/github.com/DataDog/zstd/fse.h b/vendor/github.com/DataDog/zstd/fse.h
deleted file mode 100644
index 811c670..0000000
--- a/vendor/github.com/DataDog/zstd/fse.h
+++ /dev/null
@@ -1,708 +0,0 @@
-/* ******************************************************************
-   FSE : Finite State Entropy codec
-   Public Prototypes declaration
-   Copyright (C) 2013-2016, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-****************************************************************** */
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-#ifndef FSE_H
-#define FSE_H
-
-
-/*-*****************************************
-*  Dependencies
-******************************************/
-#include <stddef.h>    /* size_t, ptrdiff_t */
-
-
-/*-*****************************************
-*  FSE_PUBLIC_API : control library symbols visibility
-******************************************/
-#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
-#  define FSE_PUBLIC_API __attribute__ ((visibility ("default")))
-#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1)   /* Visual expected */
-#  define FSE_PUBLIC_API __declspec(dllexport)
-#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
-#  define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
-#else
-#  define FSE_PUBLIC_API
-#endif
-
-/*------   Version   ------*/
-#define FSE_VERSION_MAJOR    0
-#define FSE_VERSION_MINOR    9
-#define FSE_VERSION_RELEASE  0
-
-#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE
-#define FSE_QUOTE(str) #str
-#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str)
-#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)
-
-#define FSE_VERSION_NUMBER  (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE)
-FSE_PUBLIC_API unsigned FSE_versionNumber(void);   /**< library version number; to be used when checking dll version */
-
-
-/*-****************************************
-*  FSE simple functions
-******************************************/
-/*! FSE_compress() :
-    Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'.
-    'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize).
-    @return : size of compressed data (<= dstCapacity).
-    Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
-                     if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead.
-                     if FSE_isError(return), compression failed (more details using FSE_getErrorName())
-*/
-FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity,
-                             const void* src, size_t srcSize);
-
-/*! FSE_decompress():
-    Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
-    into already allocated destination buffer 'dst', of size 'dstCapacity'.
-    @return : size of regenerated data (<= maxDstSize),
-              or an error code, which can be tested using FSE_isError() .
-
-    ** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!!
-    Why ? : making this distinction requires a header.
-    Header management is intentionally delegated to the user layer, which can better manage special cases.
-*/
-FSE_PUBLIC_API size_t FSE_decompress(void* dst,  size_t dstCapacity,
-                               const void* cSrc, size_t cSrcSize);
-
-
-/*-*****************************************
-*  Tool functions
-******************************************/
-FSE_PUBLIC_API size_t FSE_compressBound(size_t size);       /* maximum compressed size */
-
-/* Error Management */
-FSE_PUBLIC_API unsigned    FSE_isError(size_t code);        /* tells if a return value is an error code */
-FSE_PUBLIC_API const char* FSE_getErrorName(size_t code);   /* provides error code string (useful for debugging) */
-
-
-/*-*****************************************
-*  FSE advanced functions
-******************************************/
-/*! FSE_compress2() :
-    Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog'
-    Both parameters can be defined as '0' to mean : use default value
-    @return : size of compressed data
-    Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!!
-                     if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression.
-                     if FSE_isError(return), it's an error code.
-*/
-FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
-
-
-/*-*****************************************
-*  FSE detailed API
-******************************************/
-/*!
-FSE_compress() does the following:
-1. count symbol occurrence from source[] into table count[] (see hist.h)
-2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
-3. save normalized counters to memory buffer using writeNCount()
-4. build encoding table 'CTable' from normalized counters
-5. encode the data stream using encoding table 'CTable'
-
-FSE_decompress() does the following:
-1. read normalized counters with readNCount()
-2. build decoding table 'DTable' from normalized counters
-3. decode the data stream using decoding table 'DTable'
-
-The following API allows targeting specific sub-functions for advanced tasks.
-For example, it's possible to compress several blocks using the same 'CTable',
-or to save and provide normalized distribution using external method.
-*/
-
-/* *** COMPRESSION *** */
-
-/*! FSE_optimalTableLog():
-    dynamically downsize 'tableLog' when conditions are met.
-    It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
-    @return : recommended tableLog (necessarily <= 'maxTableLog') */
-FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
-
-/*! FSE_normalizeCount():
-    normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
-    'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
-    @return : tableLog,
-              or an errorCode, which can be tested using FSE_isError() */
-FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog,
-                    const unsigned* count, size_t srcSize, unsigned maxSymbolValue);
-
-/*! FSE_NCountWriteBound():
-    Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
-    Typically useful for allocation purpose. */
-FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
-
-/*! FSE_writeNCount():
-    Compactly save 'normalizedCounter' into 'buffer'.
-    @return : size of the compressed table,
-              or an errorCode, which can be tested using FSE_isError(). */
-FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize,
-                                 const short* normalizedCounter,
-                                 unsigned maxSymbolValue, unsigned tableLog);
-
-/*! Constructor and Destructor of FSE_CTable.
-    Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
-typedef unsigned FSE_CTable;   /* don't allocate that. It's only meant to be more restrictive than void* */
-FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog);
-FSE_PUBLIC_API void        FSE_freeCTable (FSE_CTable* ct);
-
-/*! FSE_buildCTable():
-    Builds `ct`, which must be already allocated, using FSE_createCTable().
-    @return : 0, or an errorCode, which can be tested using FSE_isError() */
-FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
-
-/*! FSE_compress_usingCTable():
-    Compress `src` using `ct` into `dst` which must be already allocated.
-    @return : size of compressed data (<= `dstCapacity`),
-              or 0 if compressed data could not fit into `dst`,
-              or an errorCode, which can be tested using FSE_isError() */
-FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct);
-
-/*!
-Tutorial :
-----------
-The first step is to count all symbols. FSE_count() does this job very fast.
-Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells.
-'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0]
-maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value)
-FSE_count() will return the number of occurrence of the most frequent symbol.
-This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility.
-If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
-
-The next step is to normalize the frequencies.
-FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'.
-It also guarantees a minimum of 1 to any Symbol with frequency >= 1.
-You can use 'tableLog'==0 to mean "use default tableLog value".
-If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(),
-which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default").
-
-The result of FSE_normalizeCount() will be saved into a table,
-called 'normalizedCounter', which is a table of signed short.
-'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells.
-The return value is tableLog if everything proceeded as expected.
-It is 0 if there is a single symbol within distribution.
-If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()).
-
-'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount().
-'buffer' must be already allocated.
-For guaranteed success, buffer size must be at least FSE_headerBound().
-The result of the function is the number of bytes written into 'buffer'.
-If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small).
-
-'normalizedCounter' can then be used to create the compression table 'CTable'.
-The space required by 'CTable' must be already allocated, using FSE_createCTable().
-You can then use FSE_buildCTable() to fill 'CTable'.
-If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()).
-
-'CTable' can then be used to compress 'src', with FSE_compress_usingCTable().
-Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize'
-The function returns the size of compressed data (without header), necessarily <= `dstCapacity`.
-If it returns '0', compressed data could not fit into 'dst'.
-If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
-*/
-
-
-/* *** DECOMPRESSION *** */
-
-/*! FSE_readNCount():
-    Read compactly saved 'normalizedCounter' from 'rBuffer'.
-    @return : size read from 'rBuffer',
-              or an errorCode, which can be tested using FSE_isError().
-              maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
-FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter,
-                           unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
-                           const void* rBuffer, size_t rBuffSize);
-
-/*! Constructor and Destructor of FSE_DTable.
-    Note that its size depends on 'tableLog' */
-typedef unsigned FSE_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */
-FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog);
-FSE_PUBLIC_API void        FSE_freeDTable(FSE_DTable* dt);
-
-/*! FSE_buildDTable():
-    Builds 'dt', which must be already allocated, using FSE_createDTable().
-    return : 0, or an errorCode, which can be tested using FSE_isError() */
-FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
-
-/*! FSE_decompress_usingDTable():
-    Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
-    into `dst` which must be already allocated.
-    @return : size of regenerated data (necessarily <= `dstCapacity`),
-              or an errorCode, which can be tested using FSE_isError() */
-FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);
-
-/*!
-Tutorial :
-----------
-(Note : these functions only decompress FSE-compressed blocks.
- If block is uncompressed, use memcpy() instead
- If block is a single repeated byte, use memset() instead )
-
-The first step is to obtain the normalized frequencies of symbols.
-This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
-'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
-In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
-or size the table to handle worst case situations (typically 256).
-FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
-The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
-Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
-If there is an error, the function will return an error code, which can be tested using FSE_isError().
-
-The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
-This is performed by the function FSE_buildDTable().
-The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
-If there is an error, the function will return an error code, which can be tested using FSE_isError().
-
-`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable().
-`cSrcSize` must be strictly correct, otherwise decompression will fail.
-FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
-If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
-*/
-
-#endif  /* FSE_H */
-
-#if defined(FSE_STATIC_LINKING_ONLY) && !defined(FSE_H_FSE_STATIC_LINKING_ONLY)
-#define FSE_H_FSE_STATIC_LINKING_ONLY
-
-/* *** Dependency *** */
-#include "bitstream.h"
-
-
-/* *****************************************
-*  Static allocation
-*******************************************/
-/* FSE buffer bounds */
-#define FSE_NCOUNTBOUND 512
-#define FSE_BLOCKBOUND(size) (size + (size>>7))
-#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
-
-/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
-#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue)   (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))
-#define FSE_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<maxTableLog))
-
-/* or use the size to malloc() space directly. Pay attention to alignment restrictions though */
-#define FSE_CTABLE_SIZE(maxTableLog, maxSymbolValue)   (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable))
-#define FSE_DTABLE_SIZE(maxTableLog)                   (FSE_DTABLE_SIZE_U32(maxTableLog) * sizeof(FSE_DTable))
-
-
-/* *****************************************
- *  FSE advanced API
- ***************************************** */
-
-unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
-/**< same as FSE_optimalTableLog(), which used `minus==2` */
-
-/* FSE_compress_wksp() :
- * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
- * FSE_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable.
- */
-#define FSE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue)   ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) )
-size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
-
-size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits);
-/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */
-
-size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue);
-/**< build a fake FSE_CTable, designed to compress always the same symbolValue */
-
-/* FSE_buildCTable_wksp() :
- * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
- * `wkspSize` must be >= `(1<<tableLog)`.
- */
-size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
-
-size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
-/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */
-
-size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
-/**< build a fake FSE_DTable, designed to always generate the same symbolValue */
-
-size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog);
-/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DTABLE_SIZE_U32(maxLog)` */
-
-typedef enum {
-   FSE_repeat_none,  /**< Cannot use the previous table */
-   FSE_repeat_check, /**< Can use the previous table but it must be checked */
-   FSE_repeat_valid  /**< Can use the previous table and it is assumed to be valid */
- } FSE_repeat;
-
-/* *****************************************
-*  FSE symbol compression API
-*******************************************/
-/*!
-   This API consists of small unitary functions, which highly benefit from being inlined.
-   Hence their body are included in next section.
-*/
-typedef struct {
-    ptrdiff_t   value;
-    const void* stateTable;
-    const void* symbolTT;
-    unsigned    stateLog;
-} FSE_CState_t;
-
-static void FSE_initCState(FSE_CState_t* CStatePtr, const FSE_CTable* ct);
-
-static void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* CStatePtr, unsigned symbol);
-
-static void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* CStatePtr);
-
-/**<
-These functions are inner components of FSE_compress_usingCTable().
-They allow the creation of custom streams, mixing multiple tables and bit sources.
-
-A key property to keep in mind is that encoding and decoding are done **in reverse direction**.
-So the first symbol you will encode is the last you will decode, like a LIFO stack.
-
-You will need a few variables to track your CStream. They are :
-
-FSE_CTable    ct;         // Provided by FSE_buildCTable()
-BIT_CStream_t bitStream;  // bitStream tracking structure
-FSE_CState_t  state;      // State tracking structure (can have several)
-
-
-The first thing to do is to init bitStream and state.
-    size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize);
-    FSE_initCState(&state, ct);
-
-Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError();
-You can then encode your input data, byte after byte.
-FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time.
-Remember decoding will be done in reverse direction.
-    FSE_encodeByte(&bitStream, &state, symbol);
-
-At any time, you can also add any bit sequence.
-Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders
-    BIT_addBits(&bitStream, bitField, nbBits);
-
-The above methods don't commit data to memory, they just store it into local register, for speed.
-Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
-Writing data to memory is a manual operation, performed by the flushBits function.
-    BIT_flushBits(&bitStream);
-
-Your last FSE encoding operation shall be to flush your last state value(s).
-    FSE_flushState(&bitStream, &state);
-
-Finally, you must close the bitStream.
-The function returns the size of CStream in bytes.
-If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible)
-If there is an error, it returns an errorCode (which can be tested using FSE_isError()).
-    size_t size = BIT_closeCStream(&bitStream);
-*/
-
-
-/* *****************************************
-*  FSE symbol decompression API
-*******************************************/
-typedef struct {
-    size_t      state;
-    const void* table;   /* precise table may vary, depending on U16 */
-} FSE_DState_t;
-
-
-static void     FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
-
-static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
-
-static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
-
-/**<
-Let's now decompose FSE_decompress_usingDTable() into its unitary components.
-You will decode FSE-encoded symbols from the bitStream,
-and also any other bitFields you put in, **in reverse order**.
-
-You will need a few variables to track your bitStream. They are :
-
-BIT_DStream_t DStream;    // Stream context
-FSE_DState_t  DState;     // State context. Multiple ones are possible
-FSE_DTable*   DTablePtr;  // Decoding table, provided by FSE_buildDTable()
-
-The first thing to do is to init the bitStream.
-    errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize);
-
-You should then retrieve your initial state(s)
-(in reverse flushing order if you have several ones) :
-    errorCode = FSE_initDState(&DState, &DStream, DTablePtr);
-
-You can then decode your data, symbol after symbol.
-For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'.
-Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out).
-    unsigned char symbol = FSE_decodeSymbol(&DState, &DStream);
-
-You can retrieve any bitfield you eventually stored into the bitStream (in reverse order)
-Note : maximum allowed nbBits is 25, for 32-bits compatibility
-    size_t bitField = BIT_readBits(&DStream, nbBits);
-
-All above operations only read from local register (which size depends on size_t).
-Refueling the register from memory is manually performed by the reload method.
-    endSignal = FSE_reloadDStream(&DStream);
-
-BIT_reloadDStream() result tells if there is still some more data to read from DStream.
-BIT_DStream_unfinished : there is still some data left into the DStream.
-BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled.
-BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed.
-BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted.
-
-When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop,
-to properly detect the exact end of stream.
-After each decoded symbol, check if DStream is fully consumed using this simple test :
-    BIT_reloadDStream(&DStream) >= BIT_DStream_completed
-
-When it's done, verify decompression is fully completed, by checking both DStream and the relevant states.
-Checking if DStream has reached its end is performed by :
-    BIT_endOfDStream(&DStream);
-Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible.
-    FSE_endOfDState(&DState);
-*/
-
-
-/* *****************************************
-*  FSE unsafe API
-*******************************************/
-static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
-/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
-
-
-/* *****************************************
-*  Implementation of inlined functions
-*******************************************/
-typedef struct {
-    int deltaFindState;
-    U32 deltaNbBits;
-} FSE_symbolCompressionTransform; /* total 8 bytes */
-
-MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct)
-{
-    const void* ptr = ct;
-    const U16* u16ptr = (const U16*) ptr;
-    const U32 tableLog = MEM_read16(ptr);
-    statePtr->value = (ptrdiff_t)1<<tableLog;
-    statePtr->stateTable = u16ptr+2;
-    statePtr->symbolTT = ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1);
-    statePtr->stateLog = tableLog;
-}
-
-
-/*! FSE_initCState2() :
-*   Same as FSE_initCState(), but the first symbol to include (which will be the last to be read)
-*   uses the smallest state value possible, saving the cost of this symbol */
-MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol)
-{
-    FSE_initCState(statePtr, ct);
-    {   const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
-        const U16* stateTable = (const U16*)(statePtr->stateTable);
-        U32 nbBitsOut  = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16);
-        statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits;
-        statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
-    }
-}
-
-MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, unsigned symbol)
-{
-    FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
-    const U16* const stateTable = (const U16*)(statePtr->stateTable);
-    U32 const nbBitsOut  = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
-    BIT_addBits(bitC, statePtr->value, nbBitsOut);
-    statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
-}
-
-MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr)
-{
-    BIT_addBits(bitC, statePtr->value, statePtr->stateLog);
-    BIT_flushBits(bitC);
-}
-
-
-/* FSE_getMaxNbBits() :
- * Approximate maximum cost of a symbol, in bits.
- * Fractional get rounded up (i.e : a symbol with a normalized frequency of 3 gives the same result as a frequency of 2)
- * note 1 : assume symbolValue is valid (<= maxSymbolValue)
- * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
-MEM_STATIC U32 FSE_getMaxNbBits(const void* symbolTTPtr, U32 symbolValue)
-{
-    const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;
-    return (symbolTT[symbolValue].deltaNbBits + ((1<<16)-1)) >> 16;
-}
-
-/* FSE_bitCost() :
- * Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits)
- * note 1 : assume symbolValue is valid (<= maxSymbolValue)
- * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
-MEM_STATIC U32 FSE_bitCost(const void* symbolTTPtr, U32 tableLog, U32 symbolValue, U32 accuracyLog)
-{
-    const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;
-    U32 const minNbBits = symbolTT[symbolValue].deltaNbBits >> 16;
-    U32 const threshold = (minNbBits+1) << 16;
-    assert(tableLog < 16);
-    assert(accuracyLog < 31-tableLog);  /* ensure enough room for renormalization double shift */
-    {   U32 const tableSize = 1 << tableLog;
-        U32 const deltaFromThreshold = threshold - (symbolTT[symbolValue].deltaNbBits + tableSize);
-        U32 const normalizedDeltaFromThreshold = (deltaFromThreshold << accuracyLog) >> tableLog;   /* linear interpolation (very approximate) */
-        U32 const bitMultiplier = 1 << accuracyLog;
-        assert(symbolTT[symbolValue].deltaNbBits + tableSize <= threshold);
-        assert(normalizedDeltaFromThreshold <= bitMultiplier);
-        return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold;
-    }
-}
-
-
-/* ======    Decompression    ====== */
-
-typedef struct {
-    U16 tableLog;
-    U16 fastMode;
-} FSE_DTableHeader;   /* sizeof U32 */
-
-typedef struct
-{
-    unsigned short newState;
-    unsigned char  symbol;
-    unsigned char  nbBits;
-} FSE_decode_t;   /* size == U32 */
-
-MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
-{
-    const void* ptr = dt;
-    const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr;
-    DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
-    BIT_reloadDStream(bitD);
-    DStatePtr->table = dt + 1;
-}
-
-MEM_STATIC BYTE FSE_peekSymbol(const FSE_DState_t* DStatePtr)
-{
-    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
-    return DInfo.symbol;
-}
-
-MEM_STATIC void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
-{
-    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
-    U32 const nbBits = DInfo.nbBits;
-    size_t const lowBits = BIT_readBits(bitD, nbBits);
-    DStatePtr->state = DInfo.newState + lowBits;
-}
-
-MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
-{
-    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
-    U32 const nbBits = DInfo.nbBits;
-    BYTE const symbol = DInfo.symbol;
-    size_t const lowBits = BIT_readBits(bitD, nbBits);
-
-    DStatePtr->state = DInfo.newState + lowBits;
-    return symbol;
-}
-
-/*! FSE_decodeSymbolFast() :
-    unsafe, only works if no symbol has a probability > 50% */
-MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
-{
-    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
-    U32 const nbBits = DInfo.nbBits;
-    BYTE const symbol = DInfo.symbol;
-    size_t const lowBits = BIT_readBitsFast(bitD, nbBits);
-
-    DStatePtr->state = DInfo.newState + lowBits;
-    return symbol;
-}
-
-MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
-{
-    return DStatePtr->state == 0;
-}
-
-
-
-#ifndef FSE_COMMONDEFS_ONLY
-
-/* **************************************************************
-*  Tuning parameters
-****************************************************************/
-/*!MEMORY_USAGE :
-*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
-*  Increasing memory usage improves compression ratio
-*  Reduced memory usage can improve speed, due to cache effect
-*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
-#ifndef FSE_MAX_MEMORY_USAGE
-#  define FSE_MAX_MEMORY_USAGE 14
-#endif
-#ifndef FSE_DEFAULT_MEMORY_USAGE
-#  define FSE_DEFAULT_MEMORY_USAGE 13
-#endif
-
-/*!FSE_MAX_SYMBOL_VALUE :
-*  Maximum symbol value authorized.
-*  Required for proper stack allocation */
-#ifndef FSE_MAX_SYMBOL_VALUE
-#  define FSE_MAX_SYMBOL_VALUE 255
-#endif
-
-/* **************************************************************
-*  template functions type & suffix
-****************************************************************/
-#define FSE_FUNCTION_TYPE BYTE
-#define FSE_FUNCTION_EXTENSION
-#define FSE_DECODE_TYPE FSE_decode_t
-
-
-#endif   /* !FSE_COMMONDEFS_ONLY */
-
-
-/* ***************************************************************
-*  Constants
-*****************************************************************/
-#define FSE_MAX_TABLELOG  (FSE_MAX_MEMORY_USAGE-2)
-#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
-#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
-#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
-#define FSE_MIN_TABLELOG 5
-
-#define FSE_TABLELOG_ABSOLUTE_MAX 15
-#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
-#  error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
-#endif
-
-#define FSE_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3)
-
-
-#endif /* FSE_STATIC_LINKING_ONLY */
-
-
-#if defined (__cplusplus)
-}
-#endif
diff --git a/vendor/github.com/DataDog/zstd/fse_compress.c b/vendor/github.com/DataDog/zstd/fse_compress.c
deleted file mode 100644
index 68b47e1..0000000
--- a/vendor/github.com/DataDog/zstd/fse_compress.c
+++ /dev/null
@@ -1,721 +0,0 @@
-/* ******************************************************************
-   FSE : Finite State Entropy encoder
-   Copyright (C) 2013-present, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
-    - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-
-/* **************************************************************
-*  Includes
-****************************************************************/
-#include <stdlib.h>     /* malloc, free, qsort */
-#include <string.h>     /* memcpy, memset */
-#include "compiler.h"
-#include "mem.h"        /* U32, U16, etc. */
-#include "debug.h"      /* assert, DEBUGLOG */
-#include "hist.h"       /* HIST_count_wksp */
-#include "bitstream.h"
-#define FSE_STATIC_LINKING_ONLY
-#include "fse.h"
-#include "error_private.h"
-
-
-/* **************************************************************
-*  Error Management
-****************************************************************/
-#define FSE_isError ERR_isError
-
-
-/* **************************************************************
-*  Templates
-****************************************************************/
-/*
-  designed to be included
-  for type-specific functions (template emulation in C)
-  Objective is to write these functions only once, for improved maintenance
-*/
-
-/* safety checks */
-#ifndef FSE_FUNCTION_EXTENSION
-#  error "FSE_FUNCTION_EXTENSION must be defined"
-#endif
-#ifndef FSE_FUNCTION_TYPE
-#  error "FSE_FUNCTION_TYPE must be defined"
-#endif
-
-/* Function names */
-#define FSE_CAT(X,Y) X##Y
-#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
-#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
-
-
-/* Function templates */
-
-/* FSE_buildCTable_wksp() :
- * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
- * wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
- * workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
- */
-size_t FSE_buildCTable_wksp(FSE_CTable* ct,
-                      const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
-                            void* workSpace, size_t wkspSize)
-{
-    U32 const tableSize = 1 << tableLog;
-    U32 const tableMask = tableSize - 1;
-    void* const ptr = ct;
-    U16* const tableU16 = ( (U16*) ptr) + 2;
-    void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ;
-    FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
-    U32 const step = FSE_TABLESTEP(tableSize);
-    U32 cumul[FSE_MAX_SYMBOL_VALUE+2];
-
-    FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)workSpace;
-    U32 highThreshold = tableSize-1;
-
-    /* CTable header */
-    if (((size_t)1 << tableLog) * sizeof(FSE_FUNCTION_TYPE) > wkspSize) return ERROR(tableLog_tooLarge);
-    tableU16[-2] = (U16) tableLog;
-    tableU16[-1] = (U16) maxSymbolValue;
-    assert(tableLog < 16);   /* required for threshold strategy to work */
-
-    /* For explanations on how to distribute symbol values over the table :
-     * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
-
-     #ifdef __clang_analyzer__
-     memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize);   /* useless initialization, just to keep scan-build happy */
-     #endif
-
-    /* symbol start positions */
-    {   U32 u;
-        cumul[0] = 0;
-        for (u=1; u <= maxSymbolValue+1; u++) {
-            if (normalizedCounter[u-1]==-1) {  /* Low proba symbol */
-                cumul[u] = cumul[u-1] + 1;
-                tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1);
-            } else {
-                cumul[u] = cumul[u-1] + normalizedCounter[u-1];
-        }   }
-        cumul[maxSymbolValue+1] = tableSize+1;
-    }
-
-    /* Spread symbols */
-    {   U32 position = 0;
-        U32 symbol;
-        for (symbol=0; symbol<=maxSymbolValue; symbol++) {
-            int nbOccurrences;
-            int const freq = normalizedCounter[symbol];
-            for (nbOccurrences=0; nbOccurrences<freq; nbOccurrences++) {
-                tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
-                position = (position + step) & tableMask;
-                while (position > highThreshold)
-                    position = (position + step) & tableMask;   /* Low proba area */
-        }   }
-
-        assert(position==0);  /* Must have initialized all positions */
-    }
-
-    /* Build table */
-    {   U32 u; for (u=0; u<tableSize; u++) {
-        FSE_FUNCTION_TYPE s = tableSymbol[u];   /* note : static analyzer may not understand tableSymbol is properly initialized */
-        tableU16[cumul[s]++] = (U16) (tableSize+u);   /* TableU16 : sorted by symbol order; gives next state value */
-    }   }
-
-    /* Build Symbol Transformation Table */
-    {   unsigned total = 0;
-        unsigned s;
-        for (s=0; s<=maxSymbolValue; s++) {
-            switch (normalizedCounter[s])
-            {
-            case  0:
-                /* filling nonetheless, for compatibility with FSE_getMaxNbBits() */
-                symbolTT[s].deltaNbBits = ((tableLog+1) << 16) - (1<<tableLog);
-                break;
-
-            case -1:
-            case  1:
-                symbolTT[s].deltaNbBits = (tableLog << 16) - (1<<tableLog);
-                symbolTT[s].deltaFindState = total - 1;
-                total ++;
-                break;
-            default :
-                {
-                    U32 const maxBitsOut = tableLog - BIT_highbit32 (normalizedCounter[s]-1);
-                    U32 const minStatePlus = normalizedCounter[s] << maxBitsOut;
-                    symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
-                    symbolTT[s].deltaFindState = total - normalizedCounter[s];
-                    total +=  normalizedCounter[s];
-    }   }   }   }
-
-#if 0  /* debug : symbol costs */
-    DEBUGLOG(5, "\n --- table statistics : ");
-    {   U32 symbol;
-        for (symbol=0; symbol<=maxSymbolValue; symbol++) {
-            DEBUGLOG(5, "%3u: w=%3i,   maxBits=%u, fracBits=%.2f",
-                symbol, normalizedCounter[symbol],
-                FSE_getMaxNbBits(symbolTT, symbol),
-                (double)FSE_bitCost(symbolTT, tableLog, symbol, 8) / 256);
-        }
-    }
-#endif
-
-    return 0;
-}
-
-
-size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
-{
-    FSE_FUNCTION_TYPE tableSymbol[FSE_MAX_TABLESIZE];   /* memset() is not necessary, even if static analyzer complain about it */
-    return FSE_buildCTable_wksp(ct, normalizedCounter, maxSymbolValue, tableLog, tableSymbol, sizeof(tableSymbol));
-}
-
-
-
-#ifndef FSE_COMMONDEFS_ONLY
-
-
-/*-**************************************************************
-*  FSE NCount encoding
-****************************************************************/
-size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
-{
-    size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog) >> 3) + 3;
-    return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND;  /* maxSymbolValue==0 ? use default */
-}
-
-static size_t
-FSE_writeNCount_generic (void* header, size_t headerBufferSize,
-                   const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
-                         unsigned writeIsSafe)
-{
-    BYTE* const ostart = (BYTE*) header;
-    BYTE* out = ostart;
-    BYTE* const oend = ostart + headerBufferSize;
-    int nbBits;
-    const int tableSize = 1 << tableLog;
-    int remaining;
-    int threshold;
-    U32 bitStream = 0;
-    int bitCount = 0;
-    unsigned symbol = 0;
-    unsigned const alphabetSize = maxSymbolValue + 1;
-    int previousIs0 = 0;
-
-    /* Table Size */
-    bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount;
-    bitCount  += 4;
-
-    /* Init */
-    remaining = tableSize+1;   /* +1 for extra accuracy */
-    threshold = tableSize;
-    nbBits = tableLog+1;
-
-    while ((symbol < alphabetSize) && (remaining>1)) {  /* stops at 1 */
-        if (previousIs0) {
-            unsigned start = symbol;
-            while ((symbol < alphabetSize) && !normalizedCounter[symbol]) symbol++;
-            if (symbol == alphabetSize) break;   /* incorrect distribution */
-            while (symbol >= start+24) {
-                start+=24;
-                bitStream += 0xFFFFU << bitCount;
-                if ((!writeIsSafe) && (out > oend-2))
-                    return ERROR(dstSize_tooSmall);   /* Buffer overflow */
-                out[0] = (BYTE) bitStream;
-                out[1] = (BYTE)(bitStream>>8);
-                out+=2;
-                bitStream>>=16;
-            }
-            while (symbol >= start+3) {
-                start+=3;
-                bitStream += 3 << bitCount;
-                bitCount += 2;
-            }
-            bitStream += (symbol-start) << bitCount;
-            bitCount += 2;
-            if (bitCount>16) {
-                if ((!writeIsSafe) && (out > oend - 2))
-                    return ERROR(dstSize_tooSmall);   /* Buffer overflow */
-                out[0] = (BYTE)bitStream;
-                out[1] = (BYTE)(bitStream>>8);
-                out += 2;
-                bitStream >>= 16;
-                bitCount -= 16;
-        }   }
-        {   int count = normalizedCounter[symbol++];
-            int const max = (2*threshold-1) - remaining;
-            remaining -= count < 0 ? -count : count;
-            count++;   /* +1 for extra accuracy */
-            if (count>=threshold)
-                count += max;   /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
-            bitStream += count << bitCount;
-            bitCount  += nbBits;
-            bitCount  -= (count<max);
-            previousIs0  = (count==1);
-            if (remaining<1) return ERROR(GENERIC);
-            while (remaining<threshold) { nbBits--; threshold>>=1; }
-        }
-        if (bitCount>16) {
-            if ((!writeIsSafe) && (out > oend - 2))
-                return ERROR(dstSize_tooSmall);   /* Buffer overflow */
-            out[0] = (BYTE)bitStream;
-            out[1] = (BYTE)(bitStream>>8);
-            out += 2;
-            bitStream >>= 16;
-            bitCount -= 16;
-    }   }
-
-    if (remaining != 1)
-        return ERROR(GENERIC);  /* incorrect normalized distribution */
-    assert(symbol <= alphabetSize);
-
-    /* flush remaining bitStream */
-    if ((!writeIsSafe) && (out > oend - 2))
-        return ERROR(dstSize_tooSmall);   /* Buffer overflow */
-    out[0] = (BYTE)bitStream;
-    out[1] = (BYTE)(bitStream>>8);
-    out+= (bitCount+7) /8;
-
-    return (out-ostart);
-}
-
-
-size_t FSE_writeNCount (void* buffer, size_t bufferSize,
-                  const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
-{
-    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);   /* Unsupported */
-    if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC);   /* Unsupported */
-
-    if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
-        return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
-
-    return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1 /* write in buffer is safe */);
-}
-
-
-/*-**************************************************************
-*  FSE Compression Code
-****************************************************************/
-
-FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog)
-{
-    size_t size;
-    if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
-    size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
-    return (FSE_CTable*)malloc(size);
-}
-
-void FSE_freeCTable (FSE_CTable* ct) { free(ct); }
-
-/* provides the minimum logSize to safely represent a distribution */
-static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
-{
-    U32 minBitsSrc = BIT_highbit32((U32)(srcSize)) + 1;
-    U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
-    U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
-    assert(srcSize > 1); /* Not supported, RLE should be used instead */
-    return minBits;
-}
-
-unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
-{
-    U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
-    U32 tableLog = maxTableLog;
-    U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
-    assert(srcSize > 1); /* Not supported, RLE should be used instead */
-    if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
-    if (maxBitsSrc < tableLog) tableLog = maxBitsSrc;   /* Accuracy can be reduced */
-    if (minBits > tableLog) tableLog = minBits;   /* Need a minimum to safely represent all symbol values */
-    if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG;
-    if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG;
-    return tableLog;
-}
-
-unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
-{
-    return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
-}
-
-
-/* Secondary normalization method.
-   To be used when primary method fails. */
-
-static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue)
-{
-    short const NOT_YET_ASSIGNED = -2;
-    U32 s;
-    U32 distributed = 0;
-    U32 ToDistribute;
-
-    /* Init */
-    U32 const lowThreshold = (U32)(total >> tableLog);
-    U32 lowOne = (U32)((total * 3) >> (tableLog + 1));
-
-    for (s=0; s<=maxSymbolValue; s++) {
-        if (count[s] == 0) {
-            norm[s]=0;
-            continue;
-        }
-        if (count[s] <= lowThreshold) {
-            norm[s] = -1;
-            distributed++;
-            total -= count[s];
-            continue;
-        }
-        if (count[s] <= lowOne) {
-            norm[s] = 1;
-            distributed++;
-            total -= count[s];
-            continue;
-        }
-
-        norm[s]=NOT_YET_ASSIGNED;
-    }
-    ToDistribute = (1 << tableLog) - distributed;
-
-    if (ToDistribute == 0)
-        return 0;
-
-    if ((total / ToDistribute) > lowOne) {
-        /* risk of rounding to zero */
-        lowOne = (U32)((total * 3) / (ToDistribute * 2));
-        for (s=0; s<=maxSymbolValue; s++) {
-            if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) {
-                norm[s] = 1;
-                distributed++;
-                total -= count[s];
-                continue;
-        }   }
-        ToDistribute = (1 << tableLog) - distributed;
-    }
-
-    if (distributed == maxSymbolValue+1) {
-        /* all values are pretty poor;
-           probably incompressible data (should have already been detected);
-           find max, then give all remaining points to max */
-        U32 maxV = 0, maxC = 0;
-        for (s=0; s<=maxSymbolValue; s++)
-            if (count[s] > maxC) { maxV=s; maxC=count[s]; }
-        norm[maxV] += (short)ToDistribute;
-        return 0;
-    }
-
-    if (total == 0) {
-        /* all of the symbols were low enough for the lowOne or lowThreshold */
-        for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1))
-            if (norm[s] > 0) { ToDistribute--; norm[s]++; }
-        return 0;
-    }
-
-    {   U64 const vStepLog = 62 - tableLog;
-        U64 const mid = (1ULL << (vStepLog-1)) - 1;
-        U64 const rStep = ((((U64)1<<vStepLog) * ToDistribute) + mid) / total;   /* scale on remaining */
-        U64 tmpTotal = mid;
-        for (s=0; s<=maxSymbolValue; s++) {
-            if (norm[s]==NOT_YET_ASSIGNED) {
-                U64 const end = tmpTotal + (count[s] * rStep);
-                U32 const sStart = (U32)(tmpTotal >> vStepLog);
-                U32 const sEnd = (U32)(end >> vStepLog);
-                U32 const weight = sEnd - sStart;
-                if (weight < 1)
-                    return ERROR(GENERIC);
-                norm[s] = (short)weight;
-                tmpTotal = end;
-    }   }   }
-
-    return 0;
-}
-
-
-size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
-                           const unsigned* count, size_t total,
-                           unsigned maxSymbolValue)
-{
-    /* Sanity checks */
-    if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
-    if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC);   /* Unsupported size */
-    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);   /* Unsupported size */
-    if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC);   /* Too small tableLog, compression potentially impossible */
-
-    {   static U32 const rtbTable[] = {     0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
-        U64 const scale = 62 - tableLog;
-        U64 const step = ((U64)1<<62) / total;   /* <== here, one division ! */
-        U64 const vStep = 1ULL<<(scale-20);
-        int stillToDistribute = 1<<tableLog;
-        unsigned s;
-        unsigned largest=0;
-        short largestP=0;
-        U32 lowThreshold = (U32)(total >> tableLog);
-
-        for (s=0; s<=maxSymbolValue; s++) {
-            if (count[s] == total) return 0;   /* rle special case */
-            if (count[s] == 0) { normalizedCounter[s]=0; continue; }
-            if (count[s] <= lowThreshold) {
-                normalizedCounter[s] = -1;
-                stillToDistribute--;
-            } else {
-                short proba = (short)((count[s]*step) >> scale);
-                if (proba<8) {
-                    U64 restToBeat = vStep * rtbTable[proba];
-                    proba += (count[s]*step) - ((U64)proba<<scale) > restToBeat;
-                }
-                if (proba > largestP) { largestP=proba; largest=s; }
-                normalizedCounter[s] = proba;
-                stillToDistribute -= proba;
-        }   }
-        if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
-            /* corner case, need another normalization method */
-            size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue);
-            if (FSE_isError(errorCode)) return errorCode;
-        }
-        else normalizedCounter[largest] += (short)stillToDistribute;
-    }
-
-#if 0
-    {   /* Print Table (debug) */
-        U32 s;
-        U32 nTotal = 0;
-        for (s=0; s<=maxSymbolValue; s++)
-            RAWLOG(2, "%3i: %4i \n", s, normalizedCounter[s]);
-        for (s=0; s<=maxSymbolValue; s++)
-            nTotal += abs(normalizedCounter[s]);
-        if (nTotal != (1U<<tableLog))
-            RAWLOG(2, "Warning !!! Total == %u != %u !!!", nTotal, 1U<<tableLog);
-        getchar();
-    }
-#endif
-
-    return tableLog;
-}
-
-
-/* fake FSE_CTable, for raw (uncompressed) input */
-size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits)
-{
-    const unsigned tableSize = 1 << nbBits;
-    const unsigned tableMask = tableSize - 1;
-    const unsigned maxSymbolValue = tableMask;
-    void* const ptr = ct;
-    U16* const tableU16 = ( (U16*) ptr) + 2;
-    void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableSize>>1);   /* assumption : tableLog >= 1 */
-    FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
-    unsigned s;
-
-    /* Sanity checks */
-    if (nbBits < 1) return ERROR(GENERIC);             /* min size */
-
-    /* header */
-    tableU16[-2] = (U16) nbBits;
-    tableU16[-1] = (U16) maxSymbolValue;
-
-    /* Build table */
-    for (s=0; s<tableSize; s++)
-        tableU16[s] = (U16)(tableSize + s);
-
-    /* Build Symbol Transformation Table */
-    {   const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
-        for (s=0; s<=maxSymbolValue; s++) {
-            symbolTT[s].deltaNbBits = deltaNbBits;
-            symbolTT[s].deltaFindState = s-1;
-    }   }
-
-    return 0;
-}
-
-/* fake FSE_CTable, for rle input (always same symbol) */
-size_t FSE_buildCTable_rle (FSE_CTable* ct, BYTE symbolValue)
-{
-    void* ptr = ct;
-    U16* tableU16 = ( (U16*) ptr) + 2;
-    void* FSCTptr = (U32*)ptr + 2;
-    FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*) FSCTptr;
-
-    /* header */
-    tableU16[-2] = (U16) 0;
-    tableU16[-1] = (U16) symbolValue;
-
-    /* Build table */
-    tableU16[0] = 0;
-    tableU16[1] = 0;   /* just in case */
-
-    /* Build Symbol Transformation Table */
-    symbolTT[symbolValue].deltaNbBits = 0;
-    symbolTT[symbolValue].deltaFindState = 0;
-
-    return 0;
-}
-
-
-static size_t FSE_compress_usingCTable_generic (void* dst, size_t dstSize,
-                           const void* src, size_t srcSize,
-                           const FSE_CTable* ct, const unsigned fast)
-{
-    const BYTE* const istart = (const BYTE*) src;
-    const BYTE* const iend = istart + srcSize;
-    const BYTE* ip=iend;
-
-    BIT_CStream_t bitC;
-    FSE_CState_t CState1, CState2;
-
-    /* init */
-    if (srcSize <= 2) return 0;
-    { size_t const initError = BIT_initCStream(&bitC, dst, dstSize);
-      if (FSE_isError(initError)) return 0; /* not enough space available to write a bitstream */ }
-
-#define FSE_FLUSHBITS(s)  (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
-
-    if (srcSize & 1) {
-        FSE_initCState2(&CState1, ct, *--ip);
-        FSE_initCState2(&CState2, ct, *--ip);
-        FSE_encodeSymbol(&bitC, &CState1, *--ip);
-        FSE_FLUSHBITS(&bitC);
-    } else {
-        FSE_initCState2(&CState2, ct, *--ip);
-        FSE_initCState2(&CState1, ct, *--ip);
-    }
-
-    /* join to mod 4 */
-    srcSize -= 2;
-    if ((sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) && (srcSize & 2)) {  /* test bit 2 */
-        FSE_encodeSymbol(&bitC, &CState2, *--ip);
-        FSE_encodeSymbol(&bitC, &CState1, *--ip);
-        FSE_FLUSHBITS(&bitC);
-    }
-
-    /* 2 or 4 encoding per loop */
-    while ( ip>istart ) {
-
-        FSE_encodeSymbol(&bitC, &CState2, *--ip);
-
-        if (sizeof(bitC.bitContainer)*8 < FSE_MAX_TABLELOG*2+7 )   /* this test must be static */
-            FSE_FLUSHBITS(&bitC);
-
-        FSE_encodeSymbol(&bitC, &CState1, *--ip);
-
-        if (sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) {  /* this test must be static */
-            FSE_encodeSymbol(&bitC, &CState2, *--ip);
-            FSE_encodeSymbol(&bitC, &CState1, *--ip);
-        }
-
-        FSE_FLUSHBITS(&bitC);
-    }
-
-    FSE_flushCState(&bitC, &CState2);
-    FSE_flushCState(&bitC, &CState1);
-    return BIT_closeCStream(&bitC);
-}
-
-size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
-                           const void* src, size_t srcSize,
-                           const FSE_CTable* ct)
-{
-    unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize));
-
-    if (fast)
-        return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
-    else
-        return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
-}
-
-
-size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
-
-#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
-#define CHECK_F(f)   { CHECK_V_F(_var_err__, f); }
-
-/* FSE_compress_wksp() :
- * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
- * `wkspSize` size must be `(1<<tableLog)`.
- */
-size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
-{
-    BYTE* const ostart = (BYTE*) dst;
-    BYTE* op = ostart;
-    BYTE* const oend = ostart + dstSize;
-
-    unsigned count[FSE_MAX_SYMBOL_VALUE+1];
-    S16   norm[FSE_MAX_SYMBOL_VALUE+1];
-    FSE_CTable* CTable = (FSE_CTable*)workSpace;
-    size_t const CTableSize = FSE_CTABLE_SIZE_U32(tableLog, maxSymbolValue);
-    void* scratchBuffer = (void*)(CTable + CTableSize);
-    size_t const scratchBufferSize = wkspSize - (CTableSize * sizeof(FSE_CTable));
-
-    /* init conditions */
-    if (wkspSize < FSE_WKSP_SIZE_U32(tableLog, maxSymbolValue)) return ERROR(tableLog_tooLarge);
-    if (srcSize <= 1) return 0;  /* Not compressible */
-    if (!maxSymbolValue) maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
-    if (!tableLog) tableLog = FSE_DEFAULT_TABLELOG;
-
-    /* Scan input and build symbol stats */
-    {   CHECK_V_F(maxCount, HIST_count_wksp(count, &maxSymbolValue, src, srcSize, scratchBuffer, scratchBufferSize) );
-        if (maxCount == srcSize) return 1;   /* only a single symbol in src : rle */
-        if (maxCount == 1) return 0;         /* each symbol present maximum once => not compressible */
-        if (maxCount < (srcSize >> 7)) return 0;   /* Heuristic : not compressible enough */
-    }
-
-    tableLog = FSE_optimalTableLog(tableLog, srcSize, maxSymbolValue);
-    CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue) );
-
-    /* Write table description header */
-    {   CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );
-        op += nc_err;
-    }
-
-    /* Compress */
-    CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, scratchBufferSize) );
-    {   CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) );
-        if (cSize == 0) return 0;   /* not enough space for compressed data */
-        op += cSize;
-    }
-
-    /* check compressibility */
-    if ( (size_t)(op-ostart) >= srcSize-1 ) return 0;
-
-    return op-ostart;
-}
-
-typedef struct {
-    FSE_CTable CTable_max[FSE_CTABLE_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)];
-    BYTE scratchBuffer[1 << FSE_MAX_TABLELOG];
-} fseWkspMax_t;
-
-size_t FSE_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog)
-{
-    fseWkspMax_t scratchBuffer;
-    DEBUG_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE));   /* compilation failures here means scratchBuffer is not large enough */
-    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
-    return FSE_compress_wksp(dst, dstCapacity, src, srcSize, maxSymbolValue, tableLog, &scratchBuffer, sizeof(scratchBuffer));
-}
-
-size_t FSE_compress (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
-{
-    return FSE_compress2(dst, dstCapacity, src, srcSize, FSE_MAX_SYMBOL_VALUE, FSE_DEFAULT_TABLELOG);
-}
-
-
-#endif   /* FSE_COMMONDEFS_ONLY */
diff --git a/vendor/github.com/DataDog/zstd/fse_decompress.c b/vendor/github.com/DataDog/zstd/fse_decompress.c
deleted file mode 100644
index 72bbead..0000000
--- a/vendor/github.com/DataDog/zstd/fse_decompress.c
+++ /dev/null
@@ -1,309 +0,0 @@
-/* ******************************************************************
-   FSE : Finite State Entropy decoder
-   Copyright (C) 2013-2015, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
-    - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-
-
-/* **************************************************************
-*  Includes
-****************************************************************/
-#include <stdlib.h>     /* malloc, free, qsort */
-#include <string.h>     /* memcpy, memset */
-#include "bitstream.h"
-#include "compiler.h"
-#define FSE_STATIC_LINKING_ONLY
-#include "fse.h"
-#include "error_private.h"
-
-
-/* **************************************************************
-*  Error Management
-****************************************************************/
-#define FSE_isError ERR_isError
-#define FSE_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)   /* use only *after* variable declarations */
-
-/* check and forward error code */
-#define CHECK_F(f) { size_t const e = f; if (FSE_isError(e)) return e; }
-
-
-/* **************************************************************
-*  Templates
-****************************************************************/
-/*
-  designed to be included
-  for type-specific functions (template emulation in C)
-  Objective is to write these functions only once, for improved maintenance
-*/
-
-/* safety checks */
-#ifndef FSE_FUNCTION_EXTENSION
-#  error "FSE_FUNCTION_EXTENSION must be defined"
-#endif
-#ifndef FSE_FUNCTION_TYPE
-#  error "FSE_FUNCTION_TYPE must be defined"
-#endif
-
-/* Function names */
-#define FSE_CAT(X,Y) X##Y
-#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
-#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
-
-
-/* Function templates */
-FSE_DTable* FSE_createDTable (unsigned tableLog)
-{
-    if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
-    return (FSE_DTable*)malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
-}
-
-void FSE_freeDTable (FSE_DTable* dt)
-{
-    free(dt);
-}
-
-size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
-{
-    void* const tdPtr = dt+1;   /* because *dt is unsigned, 32-bits aligned on 32-bits */
-    FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);
-    U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
-
-    U32 const maxSV1 = maxSymbolValue + 1;
-    U32 const tableSize = 1 << tableLog;
-    U32 highThreshold = tableSize-1;
-
-    /* Sanity Checks */
-    if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
-    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
-
-    /* Init, lay down lowprob symbols */
-    {   FSE_DTableHeader DTableH;
-        DTableH.tableLog = (U16)tableLog;
-        DTableH.fastMode = 1;
-        {   S16 const largeLimit= (S16)(1 << (tableLog-1));
-            U32 s;
-            for (s=0; s<maxSV1; s++) {
-                if (normalizedCounter[s]==-1) {
-                    tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
-                    symbolNext[s] = 1;
-                } else {
-                    if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
-                    symbolNext[s] = normalizedCounter[s];
-        }   }   }
-        memcpy(dt, &DTableH, sizeof(DTableH));
-    }
-
-    /* Spread symbols */
-    {   U32 const tableMask = tableSize-1;
-        U32 const step = FSE_TABLESTEP(tableSize);
-        U32 s, position = 0;
-        for (s=0; s<maxSV1; s++) {
-            int i;
-            for (i=0; i<normalizedCounter[s]; i++) {
-                tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
-                position = (position + step) & tableMask;
-                while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
-        }   }
-        if (position!=0) return ERROR(GENERIC);   /* position must reach all cells once, otherwise normalizedCounter is incorrect */
-    }
-
-    /* Build Decoding table */
-    {   U32 u;
-        for (u=0; u<tableSize; u++) {
-            FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
-            U32 const nextState = symbolNext[symbol]++;
-            tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
-            tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
-    }   }
-
-    return 0;
-}
-
-
-#ifndef FSE_COMMONDEFS_ONLY
-
-/*-*******************************************************
-*  Decompression (Byte symbols)
-*********************************************************/
-size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
-{
-    void* ptr = dt;
-    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
-    void* dPtr = dt + 1;
-    FSE_decode_t* const cell = (FSE_decode_t*)dPtr;
-
-    DTableH->tableLog = 0;
-    DTableH->fastMode = 0;
-
-    cell->newState = 0;
-    cell->symbol = symbolValue;
-    cell->nbBits = 0;
-
-    return 0;
-}
-
-
-size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
-{
-    void* ptr = dt;
-    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
-    void* dPtr = dt + 1;
-    FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr;
-    const unsigned tableSize = 1 << nbBits;
-    const unsigned tableMask = tableSize - 1;
-    const unsigned maxSV1 = tableMask+1;
-    unsigned s;
-
-    /* Sanity checks */
-    if (nbBits < 1) return ERROR(GENERIC);         /* min size */
-
-    /* Build Decoding Table */
-    DTableH->tableLog = (U16)nbBits;
-    DTableH->fastMode = 1;
-    for (s=0; s<maxSV1; s++) {
-        dinfo[s].newState = 0;
-        dinfo[s].symbol = (BYTE)s;
-        dinfo[s].nbBits = (BYTE)nbBits;
-    }
-
-    return 0;
-}
-
-FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
-          void* dst, size_t maxDstSize,
-    const void* cSrc, size_t cSrcSize,
-    const FSE_DTable* dt, const unsigned fast)
-{
-    BYTE* const ostart = (BYTE*) dst;
-    BYTE* op = ostart;
-    BYTE* const omax = op + maxDstSize;
-    BYTE* const olimit = omax-3;
-
-    BIT_DStream_t bitD;
-    FSE_DState_t state1;
-    FSE_DState_t state2;
-
-    /* Init */
-    CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize));
-
-    FSE_initDState(&state1, &bitD, dt);
-    FSE_initDState(&state2, &bitD, dt);
-
-#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
-
-    /* 4 symbols per loop */
-    for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) & (op<olimit) ; op+=4) {
-        op[0] = FSE_GETSYMBOL(&state1);
-
-        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
-            BIT_reloadDStream(&bitD);
-
-        op[1] = FSE_GETSYMBOL(&state2);
-
-        if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
-            { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
-
-        op[2] = FSE_GETSYMBOL(&state1);
-
-        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
-            BIT_reloadDStream(&bitD);
-
-        op[3] = FSE_GETSYMBOL(&state2);
-    }
-
-    /* tail */
-    /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
-    while (1) {
-        if (op>(omax-2)) return ERROR(dstSize_tooSmall);
-        *op++ = FSE_GETSYMBOL(&state1);
-        if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
-            *op++ = FSE_GETSYMBOL(&state2);
-            break;
-        }
-
-        if (op>(omax-2)) return ERROR(dstSize_tooSmall);
-        *op++ = FSE_GETSYMBOL(&state2);
-        if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
-            *op++ = FSE_GETSYMBOL(&state1);
-            break;
-    }   }
-
-    return op-ostart;
-}
-
-
-size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
-                            const void* cSrc, size_t cSrcSize,
-                            const FSE_DTable* dt)
-{
-    const void* ptr = dt;
-    const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
-    const U32 fastMode = DTableH->fastMode;
-
-    /* select fast mode (static) */
-    if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
-    return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
-}
-
-
-size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog)
-{
-    const BYTE* const istart = (const BYTE*)cSrc;
-    const BYTE* ip = istart;
-    short counting[FSE_MAX_SYMBOL_VALUE+1];
-    unsigned tableLog;
-    unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
-
-    /* normal FSE decoding mode */
-    size_t const NCountLength = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
-    if (FSE_isError(NCountLength)) return NCountLength;
-    //if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong);   /* too small input size; supposed to be already checked in NCountLength, only remaining case : NCountLength==cSrcSize */
-    if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
-    ip += NCountLength;
-    cSrcSize -= NCountLength;
-
-    CHECK_F( FSE_buildDTable (workSpace, counting, maxSymbolValue, tableLog) );
-
-    return FSE_decompress_usingDTable (dst, dstCapacity, ip, cSrcSize, workSpace);   /* always return, even if it is an error code */
-}
-
-
-typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
-
-size_t FSE_decompress(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize)
-{
-    DTable_max_t dt;   /* Static analyzer seems unable to understand this table will be properly initialized later */
-    return FSE_decompress_wksp(dst, dstCapacity, cSrc, cSrcSize, dt, FSE_MAX_TABLELOG);
-}
-
-
-
-#endif   /* FSE_COMMONDEFS_ONLY */
diff --git a/vendor/github.com/DataDog/zstd/hist.c b/vendor/github.com/DataDog/zstd/hist.c
deleted file mode 100644
index 45b7bab..0000000
--- a/vendor/github.com/DataDog/zstd/hist.c
+++ /dev/null
@@ -1,203 +0,0 @@
-/* ******************************************************************
-   hist : Histogram functions
-   part of Finite State Entropy project
-   Copyright (C) 2013-present, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
-    - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-
-/* --- dependencies --- */
-#include "mem.h"             /* U32, BYTE, etc. */
-#include "debug.h"           /* assert, DEBUGLOG */
-#include "error_private.h"   /* ERROR */
-#include "hist.h"
-
-
-/* --- Error management --- */
-unsigned HIST_isError(size_t code) { return ERR_isError(code); }
-
-/*-**************************************************************
- *  Histogram functions
- ****************************************************************/
-unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
-                           const void* src, size_t srcSize)
-{
-    const BYTE* ip = (const BYTE*)src;
-    const BYTE* const end = ip + srcSize;
-    unsigned maxSymbolValue = *maxSymbolValuePtr;
-    unsigned largestCount=0;
-
-    memset(count, 0, (maxSymbolValue+1) * sizeof(*count));
-    if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
-
-    while (ip<end) {
-        assert(*ip <= maxSymbolValue);
-        count[*ip++]++;
-    }
-
-    while (!count[maxSymbolValue]) maxSymbolValue--;
-    *maxSymbolValuePtr = maxSymbolValue;
-
-    {   U32 s;
-        for (s=0; s<=maxSymbolValue; s++)
-            if (count[s] > largestCount) largestCount = count[s];
-    }
-
-    return largestCount;
-}
-
-typedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e;
-
-/* HIST_count_parallel_wksp() :
- * store histogram into 4 intermediate tables, recombined at the end.
- * this design makes better use of OoO cpus,
- * and is noticeably faster when some values are heavily repeated.
- * But it needs some additional workspace for intermediate tables.
- * `workSpace` size must be a table of size >= HIST_WKSP_SIZE_U32.
- * @return : largest histogram frequency,
- *           or an error code (notably when histogram would be larger than *maxSymbolValuePtr). */
-static size_t HIST_count_parallel_wksp(
-                                unsigned* count, unsigned* maxSymbolValuePtr,
-                                const void* source, size_t sourceSize,
-                                HIST_checkInput_e check,
-                                U32* const workSpace)
-{
-    const BYTE* ip = (const BYTE*)source;
-    const BYTE* const iend = ip+sourceSize;
-    unsigned maxSymbolValue = *maxSymbolValuePtr;
-    unsigned max=0;
-    U32* const Counting1 = workSpace;
-    U32* const Counting2 = Counting1 + 256;
-    U32* const Counting3 = Counting2 + 256;
-    U32* const Counting4 = Counting3 + 256;
-
-    memset(workSpace, 0, 4*256*sizeof(unsigned));
-
-    /* safety checks */
-    if (!sourceSize) {
-        memset(count, 0, maxSymbolValue + 1);
-        *maxSymbolValuePtr = 0;
-        return 0;
-    }
-    if (!maxSymbolValue) maxSymbolValue = 255;            /* 0 == default */
-
-    /* by stripes of 16 bytes */
-    {   U32 cached = MEM_read32(ip); ip += 4;
-        while (ip < iend-15) {
-            U32 c = cached; cached = MEM_read32(ip); ip += 4;
-            Counting1[(BYTE) c     ]++;
-            Counting2[(BYTE)(c>>8) ]++;
-            Counting3[(BYTE)(c>>16)]++;
-            Counting4[       c>>24 ]++;
-            c = cached; cached = MEM_read32(ip); ip += 4;
-            Counting1[(BYTE) c     ]++;
-            Counting2[(BYTE)(c>>8) ]++;
-            Counting3[(BYTE)(c>>16)]++;
-            Counting4[       c>>24 ]++;
-            c = cached; cached = MEM_read32(ip); ip += 4;
-            Counting1[(BYTE) c     ]++;
-            Counting2[(BYTE)(c>>8) ]++;
-            Counting3[(BYTE)(c>>16)]++;
-            Counting4[       c>>24 ]++;
-            c = cached; cached = MEM_read32(ip); ip += 4;
-            Counting1[(BYTE) c     ]++;
-            Counting2[(BYTE)(c>>8) ]++;
-            Counting3[(BYTE)(c>>16)]++;
-            Counting4[       c>>24 ]++;
-        }
-        ip-=4;
-    }
-
-    /* finish last symbols */
-    while (ip<iend) Counting1[*ip++]++;
-
-    if (check) {   /* verify stats will fit into destination table */
-        U32 s; for (s=255; s>maxSymbolValue; s--) {
-            Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
-            if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall);
-    }   }
-
-    {   U32 s;
-        if (maxSymbolValue > 255) maxSymbolValue = 255;
-        for (s=0; s<=maxSymbolValue; s++) {
-            count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s];
-            if (count[s] > max) max = count[s];
-    }   }
-
-    while (!count[maxSymbolValue]) maxSymbolValue--;
-    *maxSymbolValuePtr = maxSymbolValue;
-    return (size_t)max;
-}
-
-/* HIST_countFast_wksp() :
- * Same as HIST_countFast(), but using an externally provided scratch buffer.
- * `workSpace` is a writable buffer which must be 4-bytes aligned,
- * `workSpaceSize` must be >= HIST_WKSP_SIZE
- */
-size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
-                          const void* source, size_t sourceSize,
-                          void* workSpace, size_t workSpaceSize)
-{
-    if (sourceSize < 1500) /* heuristic threshold */
-        return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize);
-    if ((size_t)workSpace & 3) return ERROR(GENERIC);  /* must be aligned on 4-bytes boundaries */
-    if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
-    return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace);
-}
-
-/* fast variant (unsafe : won't check if src contains values beyond count[] limit) */
-size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
-                     const void* source, size_t sourceSize)
-{
-    unsigned tmpCounters[HIST_WKSP_SIZE_U32];
-    return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters, sizeof(tmpCounters));
-}
-
-/* HIST_count_wksp() :
- * Same as HIST_count(), but using an externally provided scratch buffer.
- * `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */
-size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
-                       const void* source, size_t sourceSize,
-                       void* workSpace, size_t workSpaceSize)
-{
-    if ((size_t)workSpace & 3) return ERROR(GENERIC);  /* must be aligned on 4-bytes boundaries */
-    if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
-    if (*maxSymbolValuePtr < 255)
-        return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace);
-    *maxSymbolValuePtr = 255;
-    return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize);
-}
-
-size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,
-                 const void* src, size_t srcSize)
-{
-    unsigned tmpCounters[HIST_WKSP_SIZE_U32];
-    return HIST_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters, sizeof(tmpCounters));
-}
diff --git a/vendor/github.com/DataDog/zstd/hist.h b/vendor/github.com/DataDog/zstd/hist.h
deleted file mode 100644
index 8b38935..0000000
--- a/vendor/github.com/DataDog/zstd/hist.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* ******************************************************************
-   hist : Histogram functions
-   part of Finite State Entropy project
-   Copyright (C) 2013-present, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
-    - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-
-/* --- dependencies --- */
-#include <stddef.h>   /* size_t */
-
-
-/* --- simple histogram functions --- */
-
-/*! HIST_count():
- *  Provides the precise count of each byte within a table 'count'.
- * 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1).
- *  Updates *maxSymbolValuePtr with actual largest symbol value detected.
- * @return : count of the most frequent symbol (which isn't identified).
- *           or an error code, which can be tested using HIST_isError().
- *           note : if return == srcSize, there is only one symbol.
- */
-size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,
-                  const void* src, size_t srcSize);
-
-unsigned HIST_isError(size_t code);  /**< tells if a return value is an error code */
-
-
-/* --- advanced histogram functions --- */
-
-#define HIST_WKSP_SIZE_U32 1024
-#define HIST_WKSP_SIZE    (HIST_WKSP_SIZE_U32 * sizeof(unsigned))
-/** HIST_count_wksp() :
- *  Same as HIST_count(), but using an externally provided scratch buffer.
- *  Benefit is this function will use very little stack space.
- * `workSpace` is a writable buffer which must be 4-bytes aligned,
- * `workSpaceSize` must be >= HIST_WKSP_SIZE
- */
-size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
-                       const void* src, size_t srcSize,
-                       void* workSpace, size_t workSpaceSize);
-
-/** HIST_countFast() :
- *  same as HIST_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr.
- *  This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr`
- */
-size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
-                      const void* src, size_t srcSize);
-
-/** HIST_countFast_wksp() :
- *  Same as HIST_countFast(), but using an externally provided scratch buffer.
- * `workSpace` is a writable buffer which must be 4-bytes aligned,
- * `workSpaceSize` must be >= HIST_WKSP_SIZE
- */
-size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
-                           const void* src, size_t srcSize,
-                           void* workSpace, size_t workSpaceSize);
-
-/*! HIST_count_simple() :
- *  Same as HIST_countFast(), this function is unsafe,
- *  and will segfault if any value within `src` is `> *maxSymbolValuePtr`.
- *  It is also a bit slower for large inputs.
- *  However, it does not need any additional memory (not even on stack).
- * @return : count of the most frequent symbol.
- *  Note this function doesn't produce any error (i.e. it must succeed).
- */
-unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
-                           const void* src, size_t srcSize);
diff --git a/vendor/github.com/DataDog/zstd/huf.h b/vendor/github.com/DataDog/zstd/huf.h
deleted file mode 100644
index 6b572c4..0000000
--- a/vendor/github.com/DataDog/zstd/huf.h
+++ /dev/null
@@ -1,358 +0,0 @@
-/* ******************************************************************
-   huff0 huffman codec,
-   part of Finite State Entropy library
-   Copyright (C) 2013-present, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-****************************************************************** */
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-#ifndef HUF_H_298734234
-#define HUF_H_298734234
-
-/* *** Dependencies *** */
-#include <stddef.h>    /* size_t */
-
-
-/* *** library symbols visibility *** */
-/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual,
- *        HUF symbols remain "private" (internal symbols for library only).
- *        Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */
-#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
-#  define HUF_PUBLIC_API __attribute__ ((visibility ("default")))
-#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1)   /* Visual expected */
-#  define HUF_PUBLIC_API __declspec(dllexport)
-#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
-#  define HUF_PUBLIC_API __declspec(dllimport)  /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */
-#else
-#  define HUF_PUBLIC_API
-#endif
-
-
-/* ========================== */
-/* ***  simple functions  *** */
-/* ========================== */
-
-/** HUF_compress() :
- *  Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'.
- * 'dst' buffer must be already allocated.
- *  Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize).
- * `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB.
- * @return : size of compressed data (<= `dstCapacity`).
- *  Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
- *                   if HUF_isError(return), compression failed (more details using HUF_getErrorName())
- */
-HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity,
-                             const void* src, size_t srcSize);
-
-/** HUF_decompress() :
- *  Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
- *  into already allocated buffer 'dst', of minimum size 'dstSize'.
- * `originalSize` : **must** be the ***exact*** size of original (uncompressed) data.
- *  Note : in contrast with FSE, HUF_decompress can regenerate
- *         RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
- *         because it knows size to regenerate (originalSize).
- * @return : size of regenerated data (== originalSize),
- *           or an error code, which can be tested using HUF_isError()
- */
-HUF_PUBLIC_API size_t HUF_decompress(void* dst,  size_t originalSize,
-                               const void* cSrc, size_t cSrcSize);
-
-
-/* ***   Tool functions *** */
-#define HUF_BLOCKSIZE_MAX (128 * 1024)                  /**< maximum input size for a single block compressed with HUF_compress */
-HUF_PUBLIC_API size_t HUF_compressBound(size_t size);   /**< maximum compressed size (worst case) */
-
-/* Error Management */
-HUF_PUBLIC_API unsigned    HUF_isError(size_t code);       /**< tells if a return value is an error code */
-HUF_PUBLIC_API const char* HUF_getErrorName(size_t code);  /**< provides error code string (useful for debugging) */
-
-
-/* ***   Advanced function   *** */
-
-/** HUF_compress2() :
- *  Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`.
- * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX .
- * `tableLog` must be `<= HUF_TABLELOG_MAX` . */
-HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity,
-                               const void* src, size_t srcSize,
-                               unsigned maxSymbolValue, unsigned tableLog);
-
-/** HUF_compress4X_wksp() :
- *  Same as HUF_compress2(), but uses externally allocated `workSpace`.
- * `workspace` must have minimum alignment of 4, and be at least as large as HUF_WORKSPACE_SIZE */
-#define HUF_WORKSPACE_SIZE (6 << 10)
-#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32))
-HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
-                                     const void* src, size_t srcSize,
-                                     unsigned maxSymbolValue, unsigned tableLog,
-                                     void* workSpace, size_t wkspSize);
-
-#endif   /* HUF_H_298734234 */
-
-/* ******************************************************************
- *  WARNING !!
- *  The following section contains advanced and experimental definitions
- *  which shall never be used in the context of a dynamic library,
- *  because they are not guaranteed to remain stable in the future.
- *  Only consider them in association with static linking.
- * *****************************************************************/
-#if defined(HUF_STATIC_LINKING_ONLY) && !defined(HUF_H_HUF_STATIC_LINKING_ONLY)
-#define HUF_H_HUF_STATIC_LINKING_ONLY
-
-/* *** Dependencies *** */
-#include "mem.h"   /* U32 */
-
-
-/* *** Constants *** */
-#define HUF_TABLELOG_MAX      12      /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
-#define HUF_TABLELOG_DEFAULT  11      /* default tableLog value when none specified */
-#define HUF_SYMBOLVALUE_MAX  255
-
-#define HUF_TABLELOG_ABSOLUTEMAX  15  /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
-#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
-#  error "HUF_TABLELOG_MAX is too large !"
-#endif
-
-
-/* ****************************************
-*  Static allocation
-******************************************/
-/* HUF buffer bounds */
-#define HUF_CTABLEBOUND 129
-#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8)   /* only true when incompressible is pre-filtered with fast heuristic */
-#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
-
-/* static allocation of HUF's Compression Table */
-#define HUF_CTABLE_SIZE_U32(maxSymbolValue)   ((maxSymbolValue)+1)   /* Use tables of U32, for proper alignment */
-#define HUF_CTABLE_SIZE(maxSymbolValue)       (HUF_CTABLE_SIZE_U32(maxSymbolValue) * sizeof(U32))
-#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
-    U32 name##hb[HUF_CTABLE_SIZE_U32(maxSymbolValue)]; \
-    void* name##hv = &(name##hb); \
-    HUF_CElt* name = (HUF_CElt*)(name##hv)   /* no final ; */
-
-/* static allocation of HUF's DTable */
-typedef U32 HUF_DTable;
-#define HUF_DTABLE_SIZE(maxTableLog)   (1 + (1<<(maxTableLog)))
-#define HUF_CREATE_STATIC_DTABLEX1(DTable, maxTableLog) \
-        HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) }
-#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
-        HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) }
-
-
-/* ****************************************
-*  Advanced decompression functions
-******************************************/
-size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
-#ifndef HUF_FORCE_DECOMPRESS_X1
-size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
-#endif
-
-size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< decodes RLE and uncompressed */
-size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */
-size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */
-size_t HUF_decompress4X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
-size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< single-symbol decoder */
-#ifndef HUF_FORCE_DECOMPRESS_X1
-size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
-size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< double-symbols decoder */
-#endif
-
-
-/* ****************************************
- *  HUF detailed API
- * ****************************************/
-
-/*! HUF_compress() does the following:
- *  1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h")
- *  2. (optional) refine tableLog using HUF_optimalTableLog()
- *  3. build Huffman table from count using HUF_buildCTable()
- *  4. save Huffman table to memory buffer using HUF_writeCTable()
- *  5. encode the data stream using HUF_compress4X_usingCTable()
- *
- *  The following API allows targeting specific sub-functions for advanced tasks.
- *  For example, it's possible to compress several blocks using the same 'CTable',
- *  or to save and regenerate 'CTable' using external methods.
- */
-unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
-typedef struct HUF_CElt_s HUF_CElt;   /* incomplete type */
-size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits);   /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */
-size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
-size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
-
-typedef enum {
-   HUF_repeat_none,  /**< Cannot use the previous table */
-   HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */
-   HUF_repeat_valid  /**< Can use the previous table and it is assumed to be valid */
- } HUF_repeat;
-/** HUF_compress4X_repeat() :
- *  Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
- *  If it uses hufTable it does not modify hufTable or repeat.
- *  If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
- *  If preferRepeat then the old table will always be used if valid. */
-size_t HUF_compress4X_repeat(void* dst, size_t dstSize,
-                       const void* src, size_t srcSize,
-                       unsigned maxSymbolValue, unsigned tableLog,
-                       void* workSpace, size_t wkspSize,    /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
-                       HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
-
-/** HUF_buildCTable_wksp() :
- *  Same as HUF_buildCTable(), but using externally allocated scratch buffer.
- * `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE.
- */
-#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1)
-#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))
-size_t HUF_buildCTable_wksp (HUF_CElt* tree,
-                       const unsigned* count, U32 maxSymbolValue, U32 maxNbBits,
-                             void* workSpace, size_t wkspSize);
-
-/*! HUF_readStats() :
- *  Read compact Huffman tree, saved by HUF_writeCTable().
- * `huffWeight` is destination buffer.
- * @return : size read from `src` , or an error Code .
- *  Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
-size_t HUF_readStats(BYTE* huffWeight, size_t hwSize,
-                     U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
-                     const void* src, size_t srcSize);
-
-/** HUF_readCTable() :
- *  Loading a CTable saved with HUF_writeCTable() */
-size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
-
-/** HUF_getNbBits() :
- *  Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX
- *  Note 1 : is not inlined, as HUF_CElt definition is private
- *  Note 2 : const void* used, so that it can provide a statically allocated table as argument (which uses type U32) */
-U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue);
-
-/*
- * HUF_decompress() does the following:
- * 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics
- * 2. build Huffman table from save, using HUF_readDTableX?()
- * 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable()
- */
-
-/** HUF_selectDecoder() :
- *  Tells which decoder is likely to decode faster,
- *  based on a set of pre-computed metrics.
- * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
- *  Assumption : 0 < dstSize <= 128 KB */
-U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
-
-/**
- *  The minimum workspace size for the `workSpace` used in
- *  HUF_readDTableX1_wksp() and HUF_readDTableX2_wksp().
- *
- *  The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when
- *  HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15.
- *  Buffer overflow errors may potentially occur if code modifications result in
- *  a required workspace size greater than that specified in the following
- *  macro.
- */
-#define HUF_DECOMPRESS_WORKSPACE_SIZE (2 << 10)
-#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
-
-#ifndef HUF_FORCE_DECOMPRESS_X2
-size_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize);
-size_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
-#endif
-#ifndef HUF_FORCE_DECOMPRESS_X1
-size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize);
-size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
-#endif
-
-size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
-#ifndef HUF_FORCE_DECOMPRESS_X2
-size_t HUF_decompress4X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
-#endif
-#ifndef HUF_FORCE_DECOMPRESS_X1
-size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
-#endif
-
-
-/* ====================== */
-/* single stream variants */
-/* ====================== */
-
-size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
-size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);  /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
-size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
-/** HUF_compress1X_repeat() :
- *  Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
- *  If it uses hufTable it does not modify hufTable or repeat.
- *  If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
- *  If preferRepeat then the old table will always be used if valid. */
-size_t HUF_compress1X_repeat(void* dst, size_t dstSize,
-                       const void* src, size_t srcSize,
-                       unsigned maxSymbolValue, unsigned tableLog,
-                       void* workSpace, size_t wkspSize,   /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
-                       HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
-
-size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */
-#ifndef HUF_FORCE_DECOMPRESS_X1
-size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbol decoder */
-#endif
-
-size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
-size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);
-#ifndef HUF_FORCE_DECOMPRESS_X2
-size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
-size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< single-symbol decoder */
-#endif
-#ifndef HUF_FORCE_DECOMPRESS_X1
-size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
-size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< double-symbols decoder */
-#endif
-
-size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);   /**< automatic selection of sing or double symbol decoder, based on DTable */
-#ifndef HUF_FORCE_DECOMPRESS_X2
-size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
-#endif
-#ifndef HUF_FORCE_DECOMPRESS_X1
-size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
-#endif
-
-/* BMI2 variants.
- * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
- */
-size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
-#ifndef HUF_FORCE_DECOMPRESS_X2
-size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
-#endif
-size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
-size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
-
-#endif /* HUF_STATIC_LINKING_ONLY */
-
-#if defined (__cplusplus)
-}
-#endif
diff --git a/vendor/github.com/DataDog/zstd/huf_compress.c b/vendor/github.com/DataDog/zstd/huf_compress.c
deleted file mode 100644
index f074f1e..0000000
--- a/vendor/github.com/DataDog/zstd/huf_compress.c
+++ /dev/null
@@ -1,798 +0,0 @@
-/* ******************************************************************
-   Huffman encoder, part of New Generation Entropy library
-   Copyright (C) 2013-2016, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
-    - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-
-/* **************************************************************
-*  Compiler specifics
-****************************************************************/
-#ifdef _MSC_VER    /* Visual Studio */
-#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
-#endif
-
-
-/* **************************************************************
-*  Includes
-****************************************************************/
-#include <string.h>     /* memcpy, memset */
-#include <stdio.h>      /* printf (debug) */
-#include "compiler.h"
-#include "bitstream.h"
-#include "hist.h"
-#define FSE_STATIC_LINKING_ONLY   /* FSE_optimalTableLog_internal */
-#include "fse.h"        /* header compression */
-#define HUF_STATIC_LINKING_ONLY
-#include "huf.h"
-#include "error_private.h"
-
-
-/* **************************************************************
-*  Error Management
-****************************************************************/
-#define HUF_isError ERR_isError
-#define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)   /* use only *after* variable declarations */
-#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
-#define CHECK_F(f)   { CHECK_V_F(_var_err__, f); }
-
-
-/* **************************************************************
-*  Utils
-****************************************************************/
-unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
-{
-    return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
-}
-
-
-/* *******************************************************
-*  HUF : Huffman block compression
-*********************************************************/
-/* HUF_compressWeights() :
- * Same as FSE_compress(), but dedicated to huff0's weights compression.
- * The use case needs much less stack memory.
- * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
- */
-#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
-static size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize)
-{
-    BYTE* const ostart = (BYTE*) dst;
-    BYTE* op = ostart;
-    BYTE* const oend = ostart + dstSize;
-
-    unsigned maxSymbolValue = HUF_TABLELOG_MAX;
-    U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
-
-    FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
-    BYTE scratchBuffer[1<<MAX_FSE_TABLELOG_FOR_HUFF_HEADER];
-
-    unsigned count[HUF_TABLELOG_MAX+1];
-    S16 norm[HUF_TABLELOG_MAX+1];
-
-    /* init conditions */
-    if (wtSize <= 1) return 0;  /* Not compressible */
-
-    /* Scan input and build symbol stats */
-    {   unsigned const maxCount = HIST_count_simple(count, &maxSymbolValue, weightTable, wtSize);   /* never fails */
-        if (maxCount == wtSize) return 1;   /* only a single symbol in src : rle */
-        if (maxCount == 1) return 0;        /* each symbol present maximum once => not compressible */
-    }
-
-    tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
-    CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue) );
-
-    /* Write table description header */
-    {   CHECK_V_F(hSize, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );
-        op += hSize;
-    }
-
-    /* Compress */
-    CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, sizeof(scratchBuffer)) );
-    {   CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable) );
-        if (cSize == 0) return 0;   /* not enough space for compressed data */
-        op += cSize;
-    }
-
-    return op-ostart;
-}
-
-
-struct HUF_CElt_s {
-  U16  val;
-  BYTE nbBits;
-};   /* typedef'd to HUF_CElt within "huf.h" */
-
-/*! HUF_writeCTable() :
-    `CTable` : Huffman tree to save, using huf representation.
-    @return : size of saved CTable */
-size_t HUF_writeCTable (void* dst, size_t maxDstSize,
-                        const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog)
-{
-    BYTE bitsToWeight[HUF_TABLELOG_MAX + 1];   /* precomputed conversion table */
-    BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
-    BYTE* op = (BYTE*)dst;
-    U32 n;
-
-     /* check conditions */
-    if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
-
-    /* convert to weight */
-    bitsToWeight[0] = 0;
-    for (n=1; n<huffLog+1; n++)
-        bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
-    for (n=0; n<maxSymbolValue; n++)
-        huffWeight[n] = bitsToWeight[CTable[n].nbBits];
-
-    /* attempt weights compression by FSE */
-    {   CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, huffWeight, maxSymbolValue) );
-        if ((hSize>1) & (hSize < maxSymbolValue/2)) {   /* FSE compressed */
-            op[0] = (BYTE)hSize;
-            return hSize+1;
-    }   }
-
-    /* write raw values as 4-bits (max : 15) */
-    if (maxSymbolValue > (256-128)) return ERROR(GENERIC);   /* should not happen : likely means source cannot be compressed */
-    if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall);   /* not enough space within dst buffer */
-    op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1));
-    huffWeight[maxSymbolValue] = 0;   /* to be sure it doesn't cause msan issue in final combination */
-    for (n=0; n<maxSymbolValue; n+=2)
-        op[(n/2)+1] = (BYTE)((huffWeight[n] << 4) + huffWeight[n+1]);
-    return ((maxSymbolValue+1)/2) + 1;
-}
-
-
-size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize)
-{
-    BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];   /* init not required, even though some static analyzer may complain */
-    U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1];   /* large enough for values from 0 to 16 */
-    U32 tableLog = 0;
-    U32 nbSymbols = 0;
-
-    /* get symbol weights */
-    CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
-
-    /* check result */
-    if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
-    if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
-
-    /* Prepare base value per rank */
-    {   U32 n, nextRankStart = 0;
-        for (n=1; n<=tableLog; n++) {
-            U32 current = nextRankStart;
-            nextRankStart += (rankVal[n] << (n-1));
-            rankVal[n] = current;
-    }   }
-
-    /* fill nbBits */
-    {   U32 n; for (n=0; n<nbSymbols; n++) {
-            const U32 w = huffWeight[n];
-            CTable[n].nbBits = (BYTE)(tableLog + 1 - w);
-    }   }
-
-    /* fill val */
-    {   U16 nbPerRank[HUF_TABLELOG_MAX+2]  = {0};  /* support w=0=>n=tableLog+1 */
-        U16 valPerRank[HUF_TABLELOG_MAX+2] = {0};
-        { U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[CTable[n].nbBits]++; }
-        /* determine stating value per rank */
-        valPerRank[tableLog+1] = 0;   /* for w==0 */
-        {   U16 min = 0;
-            U32 n; for (n=tableLog; n>0; n--) {  /* start at n=tablelog <-> w=1 */
-                valPerRank[n] = min;     /* get starting value within each rank */
-                min += nbPerRank[n];
-                min >>= 1;
-        }   }
-        /* assign value within rank, symbol order */
-        { U32 n; for (n=0; n<nbSymbols; n++) CTable[n].val = valPerRank[CTable[n].nbBits]++; }
-    }
-
-    *maxSymbolValuePtr = nbSymbols - 1;
-    return readSize;
-}
-
-U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue)
-{
-    const HUF_CElt* table = (const HUF_CElt*)symbolTable;
-    assert(symbolValue <= HUF_SYMBOLVALUE_MAX);
-    return table[symbolValue].nbBits;
-}
-
-
-typedef struct nodeElt_s {
-    U32 count;
-    U16 parent;
-    BYTE byte;
-    BYTE nbBits;
-} nodeElt;
-
-static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
-{
-    const U32 largestBits = huffNode[lastNonNull].nbBits;
-    if (largestBits <= maxNbBits) return largestBits;   /* early exit : no elt > maxNbBits */
-
-    /* there are several too large elements (at least >= 2) */
-    {   int totalCost = 0;
-        const U32 baseCost = 1 << (largestBits - maxNbBits);
-        U32 n = lastNonNull;
-
-        while (huffNode[n].nbBits > maxNbBits) {
-            totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
-            huffNode[n].nbBits = (BYTE)maxNbBits;
-            n --;
-        }  /* n stops at huffNode[n].nbBits <= maxNbBits */
-        while (huffNode[n].nbBits == maxNbBits) n--;   /* n end at index of smallest symbol using < maxNbBits */
-
-        /* renorm totalCost */
-        totalCost >>= (largestBits - maxNbBits);  /* note : totalCost is necessarily a multiple of baseCost */
-
-        /* repay normalized cost */
-        {   U32 const noSymbol = 0xF0F0F0F0;
-            U32 rankLast[HUF_TABLELOG_MAX+2];
-            int pos;
-
-            /* Get pos of last (smallest) symbol per rank */
-            memset(rankLast, 0xF0, sizeof(rankLast));
-            {   U32 currentNbBits = maxNbBits;
-                for (pos=n ; pos >= 0; pos--) {
-                    if (huffNode[pos].nbBits >= currentNbBits) continue;
-                    currentNbBits = huffNode[pos].nbBits;   /* < maxNbBits */
-                    rankLast[maxNbBits-currentNbBits] = pos;
-            }   }
-
-            while (totalCost > 0) {
-                U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1;
-                for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {
-                    U32 highPos = rankLast[nBitsToDecrease];
-                    U32 lowPos = rankLast[nBitsToDecrease-1];
-                    if (highPos == noSymbol) continue;
-                    if (lowPos == noSymbol) break;
-                    {   U32 const highTotal = huffNode[highPos].count;
-                        U32 const lowTotal = 2 * huffNode[lowPos].count;
-                        if (highTotal <= lowTotal) break;
-                }   }
-                /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
-                /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
-                while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
-                    nBitsToDecrease ++;
-                totalCost -= 1 << (nBitsToDecrease-1);
-                if (rankLast[nBitsToDecrease-1] == noSymbol)
-                    rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease];   /* this rank is no longer empty */
-                huffNode[rankLast[nBitsToDecrease]].nbBits ++;
-                if (rankLast[nBitsToDecrease] == 0)    /* special case, reached largest symbol */
-                    rankLast[nBitsToDecrease] = noSymbol;
-                else {
-                    rankLast[nBitsToDecrease]--;
-                    if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease)
-                        rankLast[nBitsToDecrease] = noSymbol;   /* this rank is now empty */
-            }   }   /* while (totalCost > 0) */
-
-            while (totalCost < 0) {  /* Sometimes, cost correction overshoot */
-                if (rankLast[1] == noSymbol) {  /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
-                    while (huffNode[n].nbBits == maxNbBits) n--;
-                    huffNode[n+1].nbBits--;
-                    rankLast[1] = n+1;
-                    totalCost++;
-                    continue;
-                }
-                huffNode[ rankLast[1] + 1 ].nbBits--;
-                rankLast[1]++;
-                totalCost ++;
-    }   }   }   /* there are several too large elements (at least >= 2) */
-
-    return maxNbBits;
-}
-
-
-typedef struct {
-    U32 base;
-    U32 current;
-} rankPos;
-
-static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue)
-{
-    rankPos rank[32];
-    U32 n;
-
-    memset(rank, 0, sizeof(rank));
-    for (n=0; n<=maxSymbolValue; n++) {
-        U32 r = BIT_highbit32(count[n] + 1);
-        rank[r].base ++;
-    }
-    for (n=30; n>0; n--) rank[n-1].base += rank[n].base;
-    for (n=0; n<32; n++) rank[n].current = rank[n].base;
-    for (n=0; n<=maxSymbolValue; n++) {
-        U32 const c = count[n];
-        U32 const r = BIT_highbit32(c+1) + 1;
-        U32 pos = rank[r].current++;
-        while ((pos > rank[r].base) && (c > huffNode[pos-1].count)) {
-            huffNode[pos] = huffNode[pos-1];
-            pos--;
-        }
-        huffNode[pos].count = c;
-        huffNode[pos].byte  = (BYTE)n;
-    }
-}
-
-
-/** HUF_buildCTable_wksp() :
- *  Same as HUF_buildCTable(), but using externally allocated scratch buffer.
- *  `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of HUF_CTABLE_WORKSPACE_SIZE_U32 unsigned.
- */
-#define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
-typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
-size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
-{
-    nodeElt* const huffNode0 = (nodeElt*)workSpace;
-    nodeElt* const huffNode = huffNode0+1;
-    U32 n, nonNullRank;
-    int lowS, lowN;
-    U16 nodeNb = STARTNODE;
-    U32 nodeRoot;
-
-    /* safety checks */
-    if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC);  /* must be aligned on 4-bytes boundaries */
-    if (wkspSize < sizeof(huffNodeTable)) return ERROR(workSpace_tooSmall);
-    if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
-    if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
-    memset(huffNode0, 0, sizeof(huffNodeTable));
-
-    /* sort, decreasing order */
-    HUF_sort(huffNode, count, maxSymbolValue);
-
-    /* init for parents */
-    nonNullRank = maxSymbolValue;
-    while(huffNode[nonNullRank].count == 0) nonNullRank--;
-    lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb;
-    huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count;
-    huffNode[lowS].parent = huffNode[lowS-1].parent = nodeNb;
-    nodeNb++; lowS-=2;
-    for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30);
-    huffNode0[0].count = (U32)(1U<<31);  /* fake entry, strong barrier */
-
-    /* create parents */
-    while (nodeNb <= nodeRoot) {
-        U32 n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
-        U32 n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
-        huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
-        huffNode[n1].parent = huffNode[n2].parent = nodeNb;
-        nodeNb++;
-    }
-
-    /* distribute weights (unlimited tree height) */
-    huffNode[nodeRoot].nbBits = 0;
-    for (n=nodeRoot-1; n>=STARTNODE; n--)
-        huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
-    for (n=0; n<=nonNullRank; n++)
-        huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
-
-    /* enforce maxTableLog */
-    maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits);
-
-    /* fill result into tree (val, nbBits) */
-    {   U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
-        U16 valPerRank[HUF_TABLELOG_MAX+1] = {0};
-        if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC);   /* check fit into table */
-        for (n=0; n<=nonNullRank; n++)
-            nbPerRank[huffNode[n].nbBits]++;
-        /* determine stating value per rank */
-        {   U16 min = 0;
-            for (n=maxNbBits; n>0; n--) {
-                valPerRank[n] = min;      /* get starting value within each rank */
-                min += nbPerRank[n];
-                min >>= 1;
-        }   }
-        for (n=0; n<=maxSymbolValue; n++)
-            tree[huffNode[n].byte].nbBits = huffNode[n].nbBits;   /* push nbBits per symbol, symbol order */
-        for (n=0; n<=maxSymbolValue; n++)
-            tree[n].val = valPerRank[tree[n].nbBits]++;   /* assign value within rank, symbol order */
-    }
-
-    return maxNbBits;
-}
-
-/** HUF_buildCTable() :
- * @return : maxNbBits
- *  Note : count is used before tree is written, so they can safely overlap
- */
-size_t HUF_buildCTable (HUF_CElt* tree, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits)
-{
-    huffNodeTable nodeTable;
-    return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, nodeTable, sizeof(nodeTable));
-}
-
-static size_t HUF_estimateCompressedSize(HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
-{
-    size_t nbBits = 0;
-    int s;
-    for (s = 0; s <= (int)maxSymbolValue; ++s) {
-        nbBits += CTable[s].nbBits * count[s];
-    }
-    return nbBits >> 3;
-}
-
-static int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
-  int bad = 0;
-  int s;
-  for (s = 0; s <= (int)maxSymbolValue; ++s) {
-    bad |= (count[s] != 0) & (CTable[s].nbBits == 0);
-  }
-  return !bad;
-}
-
-size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
-
-FORCE_INLINE_TEMPLATE void
-HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable)
-{
-    BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits);
-}
-
-#define HUF_FLUSHBITS(s)  BIT_flushBits(s)
-
-#define HUF_FLUSHBITS_1(stream) \
-    if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream)
-
-#define HUF_FLUSHBITS_2(stream) \
-    if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream)
-
-FORCE_INLINE_TEMPLATE size_t
-HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
-                                   const void* src, size_t srcSize,
-                                   const HUF_CElt* CTable)
-{
-    const BYTE* ip = (const BYTE*) src;
-    BYTE* const ostart = (BYTE*)dst;
-    BYTE* const oend = ostart + dstSize;
-    BYTE* op = ostart;
-    size_t n;
-    BIT_CStream_t bitC;
-
-    /* init */
-    if (dstSize < 8) return 0;   /* not enough space to compress */
-    { size_t const initErr = BIT_initCStream(&bitC, op, oend-op);
-      if (HUF_isError(initErr)) return 0; }
-
-    n = srcSize & ~3;  /* join to mod 4 */
-    switch (srcSize & 3)
-    {
-        case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable);
-                 HUF_FLUSHBITS_2(&bitC);
-		 /* fall-through */
-        case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable);
-                 HUF_FLUSHBITS_1(&bitC);
-		 /* fall-through */
-        case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable);
-                 HUF_FLUSHBITS(&bitC);
-		 /* fall-through */
-        case 0 : /* fall-through */
-        default: break;
-    }
-
-    for (; n>0; n-=4) {  /* note : n&3==0 at this stage */
-        HUF_encodeSymbol(&bitC, ip[n- 1], CTable);
-        HUF_FLUSHBITS_1(&bitC);
-        HUF_encodeSymbol(&bitC, ip[n- 2], CTable);
-        HUF_FLUSHBITS_2(&bitC);
-        HUF_encodeSymbol(&bitC, ip[n- 3], CTable);
-        HUF_FLUSHBITS_1(&bitC);
-        HUF_encodeSymbol(&bitC, ip[n- 4], CTable);
-        HUF_FLUSHBITS(&bitC);
-    }
-
-    return BIT_closeCStream(&bitC);
-}
-
-#if DYNAMIC_BMI2
-
-static TARGET_ATTRIBUTE("bmi2") size_t
-HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize,
-                                   const void* src, size_t srcSize,
-                                   const HUF_CElt* CTable)
-{
-    return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
-}
-
-static size_t
-HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize,
-                                      const void* src, size_t srcSize,
-                                      const HUF_CElt* CTable)
-{
-    return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
-}
-
-static size_t
-HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
-                              const void* src, size_t srcSize,
-                              const HUF_CElt* CTable, const int bmi2)
-{
-    if (bmi2) {
-        return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable);
-    }
-    return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable);
-}
-
-#else
-
-static size_t
-HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
-                              const void* src, size_t srcSize,
-                              const HUF_CElt* CTable, const int bmi2)
-{
-    (void)bmi2;
-    return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
-}
-
-#endif
-
-size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
-{
-    return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
-}
-
-
-static size_t
-HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
-                              const void* src, size_t srcSize,
-                              const HUF_CElt* CTable, int bmi2)
-{
-    size_t const segmentSize = (srcSize+3)/4;   /* first 3 segments */
-    const BYTE* ip = (const BYTE*) src;
-    const BYTE* const iend = ip + srcSize;
-    BYTE* const ostart = (BYTE*) dst;
-    BYTE* const oend = ostart + dstSize;
-    BYTE* op = ostart;
-
-    if (dstSize < 6 + 1 + 1 + 1 + 8) return 0;   /* minimum space to compress successfully */
-    if (srcSize < 12) return 0;   /* no saving possible : too small input */
-    op += 6;   /* jumpTable */
-
-    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, segmentSize, CTable, bmi2) );
-        if (cSize==0) return 0;
-        assert(cSize <= 65535);
-        MEM_writeLE16(ostart, (U16)cSize);
-        op += cSize;
-    }
-
-    ip += segmentSize;
-    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, segmentSize, CTable, bmi2) );
-        if (cSize==0) return 0;
-        assert(cSize <= 65535);
-        MEM_writeLE16(ostart+2, (U16)cSize);
-        op += cSize;
-    }
-
-    ip += segmentSize;
-    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, segmentSize, CTable, bmi2) );
-        if (cSize==0) return 0;
-        assert(cSize <= 65535);
-        MEM_writeLE16(ostart+4, (U16)cSize);
-        op += cSize;
-    }
-
-    ip += segmentSize;
-    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, iend-ip, CTable, bmi2) );
-        if (cSize==0) return 0;
-        op += cSize;
-    }
-
-    return op-ostart;
-}
-
-size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
-{
-    return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
-}
-
-typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e;
-
-static size_t HUF_compressCTable_internal(
-                BYTE* const ostart, BYTE* op, BYTE* const oend,
-                const void* src, size_t srcSize,
-                HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2)
-{
-    size_t const cSize = (nbStreams==HUF_singleStream) ?
-                         HUF_compress1X_usingCTable_internal(op, oend - op, src, srcSize, CTable, bmi2) :
-                         HUF_compress4X_usingCTable_internal(op, oend - op, src, srcSize, CTable, bmi2);
-    if (HUF_isError(cSize)) { return cSize; }
-    if (cSize==0) { return 0; }   /* uncompressible */
-    op += cSize;
-    /* check compressibility */
-    if ((size_t)(op-ostart) >= srcSize-1) { return 0; }
-    return op-ostart;
-}
-
-typedef struct {
-    unsigned count[HUF_SYMBOLVALUE_MAX + 1];
-    HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1];
-    huffNodeTable nodeTable;
-} HUF_compress_tables_t;
-
-/* HUF_compress_internal() :
- * `workSpace` must a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
-static size_t
-HUF_compress_internal (void* dst, size_t dstSize,
-                 const void* src, size_t srcSize,
-                       unsigned maxSymbolValue, unsigned huffLog,
-                       HUF_nbStreams_e nbStreams,
-                       void* workSpace, size_t wkspSize,
-                       HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
-                 const int bmi2)
-{
-    HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace;
-    BYTE* const ostart = (BYTE*)dst;
-    BYTE* const oend = ostart + dstSize;
-    BYTE* op = ostart;
-
-    /* checks & inits */
-    if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC);  /* must be aligned on 4-bytes boundaries */
-    if (wkspSize < HUF_WORKSPACE_SIZE) return ERROR(workSpace_tooSmall);
-    if (!srcSize) return 0;  /* Uncompressed */
-    if (!dstSize) return 0;  /* cannot fit anything within dst budget */
-    if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong);   /* current block size limit */
-    if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
-    if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
-    if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;
-    if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
-
-    /* Heuristic : If old table is valid, use it for small inputs */
-    if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
-        return HUF_compressCTable_internal(ostart, op, oend,
-                                           src, srcSize,
-                                           nbStreams, oldHufTable, bmi2);
-    }
-
-    /* Scan input and build symbol stats */
-    {   CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, workSpace, wkspSize) );
-        if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; }   /* single symbol, rle */
-        if (largest <= (srcSize >> 7)+4) return 0;   /* heuristic : probably not compressible enough */
-    }
-
-    /* Check validity of previous table */
-    if ( repeat
-      && *repeat == HUF_repeat_check
-      && !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) {
-        *repeat = HUF_repeat_none;
-    }
-    /* Heuristic : use existing table for small inputs */
-    if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
-        return HUF_compressCTable_internal(ostart, op, oend,
-                                           src, srcSize,
-                                           nbStreams, oldHufTable, bmi2);
-    }
-
-    /* Build Huffman Tree */
-    huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
-    {   size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,
-                                            maxSymbolValue, huffLog,
-                                            table->nodeTable, sizeof(table->nodeTable));
-        CHECK_F(maxBits);
-        huffLog = (U32)maxBits;
-        /* Zero unused symbols in CTable, so we can check it for validity */
-        memset(table->CTable + (maxSymbolValue + 1), 0,
-               sizeof(table->CTable) - ((maxSymbolValue + 1) * sizeof(HUF_CElt)));
-    }
-
-    /* Write table description header */
-    {   CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, table->CTable, maxSymbolValue, huffLog) );
-        /* Check if using previous huffman table is beneficial */
-        if (repeat && *repeat != HUF_repeat_none) {
-            size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue);
-            size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue);
-            if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
-                return HUF_compressCTable_internal(ostart, op, oend,
-                                                   src, srcSize,
-                                                   nbStreams, oldHufTable, bmi2);
-        }   }
-
-        /* Use the new huffman table */
-        if (hSize + 12ul >= srcSize) { return 0; }
-        op += hSize;
-        if (repeat) { *repeat = HUF_repeat_none; }
-        if (oldHufTable)
-            memcpy(oldHufTable, table->CTable, sizeof(table->CTable));  /* Save new table */
-    }
-    return HUF_compressCTable_internal(ostart, op, oend,
-                                       src, srcSize,
-                                       nbStreams, table->CTable, bmi2);
-}
-
-
-size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
-                      const void* src, size_t srcSize,
-                      unsigned maxSymbolValue, unsigned huffLog,
-                      void* workSpace, size_t wkspSize)
-{
-    return HUF_compress_internal(dst, dstSize, src, srcSize,
-                                 maxSymbolValue, huffLog, HUF_singleStream,
-                                 workSpace, wkspSize,
-                                 NULL, NULL, 0, 0 /*bmi2*/);
-}
-
-size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
-                      const void* src, size_t srcSize,
-                      unsigned maxSymbolValue, unsigned huffLog,
-                      void* workSpace, size_t wkspSize,
-                      HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
-{
-    return HUF_compress_internal(dst, dstSize, src, srcSize,
-                                 maxSymbolValue, huffLog, HUF_singleStream,
-                                 workSpace, wkspSize, hufTable,
-                                 repeat, preferRepeat, bmi2);
-}
-
-size_t HUF_compress1X (void* dst, size_t dstSize,
-                 const void* src, size_t srcSize,
-                 unsigned maxSymbolValue, unsigned huffLog)
-{
-    unsigned workSpace[HUF_WORKSPACE_SIZE_U32];
-    return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
-}
-
-/* HUF_compress4X_repeat():
- * compress input using 4 streams.
- * provide workspace to generate compression tables */
-size_t HUF_compress4X_wksp (void* dst, size_t dstSize,
-                      const void* src, size_t srcSize,
-                      unsigned maxSymbolValue, unsigned huffLog,
-                      void* workSpace, size_t wkspSize)
-{
-    return HUF_compress_internal(dst, dstSize, src, srcSize,
-                                 maxSymbolValue, huffLog, HUF_fourStreams,
-                                 workSpace, wkspSize,
-                                 NULL, NULL, 0, 0 /*bmi2*/);
-}
-
-/* HUF_compress4X_repeat():
- * compress input using 4 streams.
- * re-use an existing huffman compression table */
-size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
-                      const void* src, size_t srcSize,
-                      unsigned maxSymbolValue, unsigned huffLog,
-                      void* workSpace, size_t wkspSize,
-                      HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
-{
-    return HUF_compress_internal(dst, dstSize, src, srcSize,
-                                 maxSymbolValue, huffLog, HUF_fourStreams,
-                                 workSpace, wkspSize,
-                                 hufTable, repeat, preferRepeat, bmi2);
-}
-
-size_t HUF_compress2 (void* dst, size_t dstSize,
-                const void* src, size_t srcSize,
-                unsigned maxSymbolValue, unsigned huffLog)
-{
-    unsigned workSpace[HUF_WORKSPACE_SIZE_U32];
-    return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
-}
-
-size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
-    return HUF_compress2(dst, maxDstSize, src, srcSize, 255, HUF_TABLELOG_DEFAULT);
-}
diff --git a/vendor/github.com/DataDog/zstd/huf_decompress.c b/vendor/github.com/DataDog/zstd/huf_decompress.c
deleted file mode 100644
index 3f8bd29..0000000
--- a/vendor/github.com/DataDog/zstd/huf_decompress.c
+++ /dev/null
@@ -1,1232 +0,0 @@
-/* ******************************************************************
-   huff0 huffman decoder,
-   part of Finite State Entropy library
-   Copyright (C) 2013-present, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
-****************************************************************** */
-
-/* **************************************************************
-*  Dependencies
-****************************************************************/
-#include <string.h>     /* memcpy, memset */
-#include "compiler.h"
-#include "bitstream.h"  /* BIT_* */
-#include "fse.h"        /* to compress headers */
-#define HUF_STATIC_LINKING_ONLY
-#include "huf.h"
-#include "error_private.h"
-
-/* **************************************************************
-*  Macros
-****************************************************************/
-
-/* These two optional macros force the use one way or another of the two
- * Huffman decompression implementations. You can't force in both directions
- * at the same time.
- */
-#if defined(HUF_FORCE_DECOMPRESS_X1) && \
-    defined(HUF_FORCE_DECOMPRESS_X2)
-#error "Cannot force the use of the X1 and X2 decoders at the same time!"
-#endif
-
-
-/* **************************************************************
-*  Error Management
-****************************************************************/
-#define HUF_isError ERR_isError
-#define CHECK_F(f) { size_t const err_ = (f); if (HUF_isError(err_)) return err_; }
-
-
-/* **************************************************************
-*  Byte alignment for workSpace management
-****************************************************************/
-#define HUF_ALIGN(x, a)         HUF_ALIGN_MASK((x), (a) - 1)
-#define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
-
-
-/* **************************************************************
-*  BMI2 Variant Wrappers
-****************************************************************/
-#if DYNAMIC_BMI2
-
-#define HUF_DGEN(fn)                                                        \
-                                                                            \
-    static size_t fn##_default(                                             \
-                  void* dst,  size_t dstSize,                               \
-            const void* cSrc, size_t cSrcSize,                              \
-            const HUF_DTable* DTable)                                       \
-    {                                                                       \
-        return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable);             \
-    }                                                                       \
-                                                                            \
-    static TARGET_ATTRIBUTE("bmi2") size_t fn##_bmi2(                       \
-                  void* dst,  size_t dstSize,                               \
-            const void* cSrc, size_t cSrcSize,                              \
-            const HUF_DTable* DTable)                                       \
-    {                                                                       \
-        return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable);             \
-    }                                                                       \
-                                                                            \
-    static size_t fn(void* dst, size_t dstSize, void const* cSrc,           \
-                     size_t cSrcSize, HUF_DTable const* DTable, int bmi2)   \
-    {                                                                       \
-        if (bmi2) {                                                         \
-            return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);         \
-        }                                                                   \
-        return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable);          \
-    }
-
-#else
-
-#define HUF_DGEN(fn)                                                        \
-    static size_t fn(void* dst, size_t dstSize, void const* cSrc,           \
-                     size_t cSrcSize, HUF_DTable const* DTable, int bmi2)   \
-    {                                                                       \
-        (void)bmi2;                                                         \
-        return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable);             \
-    }
-
-#endif
-
-
-/*-***************************/
-/*  generic DTableDesc       */
-/*-***************************/
-typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc;
-
-static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
-{
-    DTableDesc dtd;
-    memcpy(&dtd, table, sizeof(dtd));
-    return dtd;
-}
-
-
-#ifndef HUF_FORCE_DECOMPRESS_X2
-
-/*-***************************/
-/*  single-symbol decoding   */
-/*-***************************/
-typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX1;   /* single-symbol decoding */
-
-size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize)
-{
-    U32 tableLog = 0;
-    U32 nbSymbols = 0;
-    size_t iSize;
-    void* const dtPtr = DTable + 1;
-    HUF_DEltX1* const dt = (HUF_DEltX1*)dtPtr;
-
-    U32* rankVal;
-    BYTE* huffWeight;
-    size_t spaceUsed32 = 0;
-
-    rankVal = (U32 *)workSpace + spaceUsed32;
-    spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1;
-    huffWeight = (BYTE *)((U32 *)workSpace + spaceUsed32);
-    spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
-
-    if ((spaceUsed32 << 2) > wkspSize) return ERROR(tableLog_tooLarge);
-
-    DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
-    /* memset(huffWeight, 0, sizeof(huffWeight)); */   /* is not necessary, even though some analyzer complain ... */
-
-    iSize = HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
-    if (HUF_isError(iSize)) return iSize;
-
-    /* Table header */
-    {   DTableDesc dtd = HUF_getDTableDesc(DTable);
-        if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge);   /* DTable too small, Huffman tree cannot fit in */
-        dtd.tableType = 0;
-        dtd.tableLog = (BYTE)tableLog;
-        memcpy(DTable, &dtd, sizeof(dtd));
-    }
-
-    /* Calculate starting value for each rank */
-    {   U32 n, nextRankStart = 0;
-        for (n=1; n<tableLog+1; n++) {
-            U32 const current = nextRankStart;
-            nextRankStart += (rankVal[n] << (n-1));
-            rankVal[n] = current;
-    }   }
-
-    /* fill DTable */
-    {   U32 n;
-        for (n=0; n<nbSymbols; n++) {
-            U32 const w = huffWeight[n];
-            U32 const length = (1 << w) >> 1;
-            U32 u;
-            HUF_DEltX1 D;
-            D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
-            for (u = rankVal[w]; u < rankVal[w] + length; u++)
-                dt[u] = D;
-            rankVal[w] += length;
-    }   }
-
-    return iSize;
-}
-
-size_t HUF_readDTableX1(HUF_DTable* DTable, const void* src, size_t srcSize)
-{
-    U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
-    return HUF_readDTableX1_wksp(DTable, src, srcSize,
-                                 workSpace, sizeof(workSpace));
-}
-
-FORCE_INLINE_TEMPLATE BYTE
-HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog)
-{
-    size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
-    BYTE const c = dt[val].byte;
-    BIT_skipBits(Dstream, dt[val].nbBits);
-    return c;
-}
-
-#define HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) \
-    *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog)
-
-#define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr)  \
-    if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
-        HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
-
-#define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \
-    if (MEM_64bits()) \
-        HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
-
-HINT_INLINE size_t
-HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX1* const dt, const U32 dtLog)
-{
-    BYTE* const pStart = p;
-
-    /* up to 4 symbols at a time */
-    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) {
-        HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
-        HUF_DECODE_SYMBOLX1_1(p, bitDPtr);
-        HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
-        HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
-    }
-
-    /* [0-3] symbols remaining */
-    if (MEM_32bits())
-        while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd))
-            HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
-
-    /* no more data to retrieve from bitstream, no need to reload */
-    while (p < pEnd)
-        HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
-
-    return pEnd-pStart;
-}
-
-FORCE_INLINE_TEMPLATE size_t
-HUF_decompress1X1_usingDTable_internal_body(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const HUF_DTable* DTable)
-{
-    BYTE* op = (BYTE*)dst;
-    BYTE* const oend = op + dstSize;
-    const void* dtPtr = DTable + 1;
-    const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
-    BIT_DStream_t bitD;
-    DTableDesc const dtd = HUF_getDTableDesc(DTable);
-    U32 const dtLog = dtd.tableLog;
-
-    CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
-
-    HUF_decodeStreamX1(op, &bitD, oend, dt, dtLog);
-
-    if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
-
-    return dstSize;
-}
-
-FORCE_INLINE_TEMPLATE size_t
-HUF_decompress4X1_usingDTable_internal_body(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const HUF_DTable* DTable)
-{
-    /* Check */
-    if (cSrcSize < 10) return ERROR(corruption_detected);  /* strict minimum : jump table + 1 byte per stream */
-
-    {   const BYTE* const istart = (const BYTE*) cSrc;
-        BYTE* const ostart = (BYTE*) dst;
-        BYTE* const oend = ostart + dstSize;
-        const void* const dtPtr = DTable + 1;
-        const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
-
-        /* Init */
-        BIT_DStream_t bitD1;
-        BIT_DStream_t bitD2;
-        BIT_DStream_t bitD3;
-        BIT_DStream_t bitD4;
-        size_t const length1 = MEM_readLE16(istart);
-        size_t const length2 = MEM_readLE16(istart+2);
-        size_t const length3 = MEM_readLE16(istart+4);
-        size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
-        const BYTE* const istart1 = istart + 6;  /* jumpTable */
-        const BYTE* const istart2 = istart1 + length1;
-        const BYTE* const istart3 = istart2 + length2;
-        const BYTE* const istart4 = istart3 + length3;
-        const size_t segmentSize = (dstSize+3) / 4;
-        BYTE* const opStart2 = ostart + segmentSize;
-        BYTE* const opStart3 = opStart2 + segmentSize;
-        BYTE* const opStart4 = opStart3 + segmentSize;
-        BYTE* op1 = ostart;
-        BYTE* op2 = opStart2;
-        BYTE* op3 = opStart3;
-        BYTE* op4 = opStart4;
-        U32 endSignal = BIT_DStream_unfinished;
-        DTableDesc const dtd = HUF_getDTableDesc(DTable);
-        U32 const dtLog = dtd.tableLog;
-
-        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
-        CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
-        CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
-        CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
-        CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
-
-        /* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */
-        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
-        while ( (endSignal==BIT_DStream_unfinished) && (op4<(oend-3)) ) {
-            HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
-            HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
-            HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
-            HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
-            HUF_DECODE_SYMBOLX1_1(op1, &bitD1);
-            HUF_DECODE_SYMBOLX1_1(op2, &bitD2);
-            HUF_DECODE_SYMBOLX1_1(op3, &bitD3);
-            HUF_DECODE_SYMBOLX1_1(op4, &bitD4);
-            HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
-            HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
-            HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
-            HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
-            HUF_DECODE_SYMBOLX1_0(op1, &bitD1);
-            HUF_DECODE_SYMBOLX1_0(op2, &bitD2);
-            HUF_DECODE_SYMBOLX1_0(op3, &bitD3);
-            HUF_DECODE_SYMBOLX1_0(op4, &bitD4);
-            BIT_reloadDStream(&bitD1);
-            BIT_reloadDStream(&bitD2);
-            BIT_reloadDStream(&bitD3);
-            BIT_reloadDStream(&bitD4);
-        }
-
-        /* check corruption */
-        /* note : should not be necessary : op# advance in lock step, and we control op4.
-         *        but curiously, binary generated by gcc 7.2 & 7.3 with -mbmi2 runs faster when >=1 test is present */
-        if (op1 > opStart2) return ERROR(corruption_detected);
-        if (op2 > opStart3) return ERROR(corruption_detected);
-        if (op3 > opStart4) return ERROR(corruption_detected);
-        /* note : op4 supposed already verified within main loop */
-
-        /* finish bitStreams one by one */
-        HUF_decodeStreamX1(op1, &bitD1, opStart2, dt, dtLog);
-        HUF_decodeStreamX1(op2, &bitD2, opStart3, dt, dtLog);
-        HUF_decodeStreamX1(op3, &bitD3, opStart4, dt, dtLog);
-        HUF_decodeStreamX1(op4, &bitD4, oend,     dt, dtLog);
-
-        /* check */
-        { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
-          if (!endCheck) return ERROR(corruption_detected); }
-
-        /* decoded size */
-        return dstSize;
-    }
-}
-
-
-typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize,
-                                               const void *cSrc,
-                                               size_t cSrcSize,
-                                               const HUF_DTable *DTable);
-
-HUF_DGEN(HUF_decompress1X1_usingDTable_internal)
-HUF_DGEN(HUF_decompress4X1_usingDTable_internal)
-
-
-
-size_t HUF_decompress1X1_usingDTable(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const HUF_DTable* DTable)
-{
-    DTableDesc dtd = HUF_getDTableDesc(DTable);
-    if (dtd.tableType != 0) return ERROR(GENERIC);
-    return HUF_decompress1X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-}
-
-size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
-                                   const void* cSrc, size_t cSrcSize,
-                                   void* workSpace, size_t wkspSize)
-{
-    const BYTE* ip = (const BYTE*) cSrc;
-
-    size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);
-    if (HUF_isError(hSize)) return hSize;
-    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
-    ip += hSize; cSrcSize -= hSize;
-
-    return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
-}
-
-
-size_t HUF_decompress1X1_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
-                              const void* cSrc, size_t cSrcSize)
-{
-    U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
-    return HUF_decompress1X1_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
-                                       workSpace, sizeof(workSpace));
-}
-
-size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX);
-    return HUF_decompress1X1_DCtx (DTable, dst, dstSize, cSrc, cSrcSize);
-}
-
-size_t HUF_decompress4X1_usingDTable(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const HUF_DTable* DTable)
-{
-    DTableDesc dtd = HUF_getDTableDesc(DTable);
-    if (dtd.tableType != 0) return ERROR(GENERIC);
-    return HUF_decompress4X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-}
-
-static size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
-                                   const void* cSrc, size_t cSrcSize,
-                                   void* workSpace, size_t wkspSize, int bmi2)
-{
-    const BYTE* ip = (const BYTE*) cSrc;
-
-    size_t const hSize = HUF_readDTableX1_wksp (dctx, cSrc, cSrcSize,
-                                                workSpace, wkspSize);
-    if (HUF_isError(hSize)) return hSize;
-    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
-    ip += hSize; cSrcSize -= hSize;
-
-    return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
-}
-
-size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
-                                   const void* cSrc, size_t cSrcSize,
-                                   void* workSpace, size_t wkspSize)
-{
-    return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0);
-}
-
-
-size_t HUF_decompress4X1_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
-    return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
-                                       workSpace, sizeof(workSpace));
-}
-size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX);
-    return HUF_decompress4X1_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
-}
-
-#endif /* HUF_FORCE_DECOMPRESS_X2 */
-
-
-#ifndef HUF_FORCE_DECOMPRESS_X1
-
-/* *************************/
-/* double-symbols decoding */
-/* *************************/
-
-typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX2;  /* double-symbols decoding */
-typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
-typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
-typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX];
-
-
-/* HUF_fillDTableX2Level2() :
- * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
-static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 sizeLog, const U32 consumed,
-                           const U32* rankValOrigin, const int minWeight,
-                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
-                           U32 nbBitsBaseline, U16 baseSeq)
-{
-    HUF_DEltX2 DElt;
-    U32 rankVal[HUF_TABLELOG_MAX + 1];
-
-    /* get pre-calculated rankVal */
-    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
-
-    /* fill skipped values */
-    if (minWeight>1) {
-        U32 i, skipSize = rankVal[minWeight];
-        MEM_writeLE16(&(DElt.sequence), baseSeq);
-        DElt.nbBits   = (BYTE)(consumed);
-        DElt.length   = 1;
-        for (i = 0; i < skipSize; i++)
-            DTable[i] = DElt;
-    }
-
-    /* fill DTable */
-    {   U32 s; for (s=0; s<sortedListSize; s++) {   /* note : sortedSymbols already skipped */
-            const U32 symbol = sortedSymbols[s].symbol;
-            const U32 weight = sortedSymbols[s].weight;
-            const U32 nbBits = nbBitsBaseline - weight;
-            const U32 length = 1 << (sizeLog-nbBits);
-            const U32 start = rankVal[weight];
-            U32 i = start;
-            const U32 end = start + length;
-
-            MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
-            DElt.nbBits = (BYTE)(nbBits + consumed);
-            DElt.length = 2;
-            do { DTable[i++] = DElt; } while (i<end);   /* since length >= 1 */
-
-            rankVal[weight] += length;
-    }   }
-}
-
-
-static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog,
-                           const sortedSymbol_t* sortedList, const U32 sortedListSize,
-                           const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
-                           const U32 nbBitsBaseline)
-{
-    U32 rankVal[HUF_TABLELOG_MAX + 1];
-    const int scaleLog = nbBitsBaseline - targetLog;   /* note : targetLog >= srcLog, hence scaleLog <= 1 */
-    const U32 minBits  = nbBitsBaseline - maxWeight;
-    U32 s;
-
-    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
-
-    /* fill DTable */
-    for (s=0; s<sortedListSize; s++) {
-        const U16 symbol = sortedList[s].symbol;
-        const U32 weight = sortedList[s].weight;
-        const U32 nbBits = nbBitsBaseline - weight;
-        const U32 start = rankVal[weight];
-        const U32 length = 1 << (targetLog-nbBits);
-
-        if (targetLog-nbBits >= minBits) {   /* enough room for a second symbol */
-            U32 sortedRank;
-            int minWeight = nbBits + scaleLog;
-            if (minWeight < 1) minWeight = 1;
-            sortedRank = rankStart[minWeight];
-            HUF_fillDTableX2Level2(DTable+start, targetLog-nbBits, nbBits,
-                           rankValOrigin[nbBits], minWeight,
-                           sortedList+sortedRank, sortedListSize-sortedRank,
-                           nbBitsBaseline, symbol);
-        } else {
-            HUF_DEltX2 DElt;
-            MEM_writeLE16(&(DElt.sequence), symbol);
-            DElt.nbBits = (BYTE)(nbBits);
-            DElt.length = 1;
-            {   U32 const end = start + length;
-                U32 u;
-                for (u = start; u < end; u++) DTable[u] = DElt;
-        }   }
-        rankVal[weight] += length;
-    }
-}
-
-size_t HUF_readDTableX2_wksp(HUF_DTable* DTable,
-                       const void* src, size_t srcSize,
-                             void* workSpace, size_t wkspSize)
-{
-    U32 tableLog, maxW, sizeOfSort, nbSymbols;
-    DTableDesc dtd = HUF_getDTableDesc(DTable);
-    U32 const maxTableLog = dtd.maxTableLog;
-    size_t iSize;
-    void* dtPtr = DTable+1;   /* force compiler to avoid strict-aliasing */
-    HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
-    U32 *rankStart;
-
-    rankValCol_t* rankVal;
-    U32* rankStats;
-    U32* rankStart0;
-    sortedSymbol_t* sortedSymbol;
-    BYTE* weightList;
-    size_t spaceUsed32 = 0;
-
-    rankVal = (rankValCol_t *)((U32 *)workSpace + spaceUsed32);
-    spaceUsed32 += (sizeof(rankValCol_t) * HUF_TABLELOG_MAX) >> 2;
-    rankStats = (U32 *)workSpace + spaceUsed32;
-    spaceUsed32 += HUF_TABLELOG_MAX + 1;
-    rankStart0 = (U32 *)workSpace + spaceUsed32;
-    spaceUsed32 += HUF_TABLELOG_MAX + 2;
-    sortedSymbol = (sortedSymbol_t *)workSpace + (spaceUsed32 * sizeof(U32)) / sizeof(sortedSymbol_t);
-    spaceUsed32 += HUF_ALIGN(sizeof(sortedSymbol_t) * (HUF_SYMBOLVALUE_MAX + 1), sizeof(U32)) >> 2;
-    weightList = (BYTE *)((U32 *)workSpace + spaceUsed32);
-    spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
-
-    if ((spaceUsed32 << 2) > wkspSize) return ERROR(tableLog_tooLarge);
-
-    rankStart = rankStart0 + 1;
-    memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1));
-
-    DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(HUF_DTable));   /* if compiler fails here, assertion is wrong */
-    if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
-    /* memset(weightList, 0, sizeof(weightList)); */  /* is not necessary, even though some analyzer complain ... */
-
-    iSize = HUF_readStats(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
-    if (HUF_isError(iSize)) return iSize;
-
-    /* check result */
-    if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge);   /* DTable can't fit code depth */
-
-    /* find maxWeight */
-    for (maxW = tableLog; rankStats[maxW]==0; maxW--) {}  /* necessarily finds a solution before 0 */
-
-    /* Get start index of each weight */
-    {   U32 w, nextRankStart = 0;
-        for (w=1; w<maxW+1; w++) {
-            U32 current = nextRankStart;
-            nextRankStart += rankStats[w];
-            rankStart[w] = current;
-        }
-        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/
-        sizeOfSort = nextRankStart;
-    }
-
-    /* sort symbols by weight */
-    {   U32 s;
-        for (s=0; s<nbSymbols; s++) {
-            U32 const w = weightList[s];
-            U32 const r = rankStart[w]++;
-            sortedSymbol[r].symbol = (BYTE)s;
-            sortedSymbol[r].weight = (BYTE)w;
-        }
-        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */
-    }
-
-    /* Build rankVal */
-    {   U32* const rankVal0 = rankVal[0];
-        {   int const rescale = (maxTableLog-tableLog) - 1;   /* tableLog <= maxTableLog */
-            U32 nextRankVal = 0;
-            U32 w;
-            for (w=1; w<maxW+1; w++) {
-                U32 current = nextRankVal;
-                nextRankVal += rankStats[w] << (w+rescale);
-                rankVal0[w] = current;
-        }   }
-        {   U32 const minBits = tableLog+1 - maxW;
-            U32 consumed;
-            for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
-                U32* const rankValPtr = rankVal[consumed];
-                U32 w;
-                for (w = 1; w < maxW+1; w++) {
-                    rankValPtr[w] = rankVal0[w] >> consumed;
-    }   }   }   }
-
-    HUF_fillDTableX2(dt, maxTableLog,
-                   sortedSymbol, sizeOfSort,
-                   rankStart0, rankVal, maxW,
-                   tableLog+1);
-
-    dtd.tableLog = (BYTE)maxTableLog;
-    dtd.tableType = 1;
-    memcpy(DTable, &dtd, sizeof(dtd));
-    return iSize;
-}
-
-size_t HUF_readDTableX2(HUF_DTable* DTable, const void* src, size_t srcSize)
-{
-  U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
-  return HUF_readDTableX2_wksp(DTable, src, srcSize,
-                               workSpace, sizeof(workSpace));
-}
-
-
-FORCE_INLINE_TEMPLATE U32
-HUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
-{
-    size_t const val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
-    memcpy(op, dt+val, 2);
-    BIT_skipBits(DStream, dt[val].nbBits);
-    return dt[val].length;
-}
-
-FORCE_INLINE_TEMPLATE U32
-HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
-{
-    size_t const val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
-    memcpy(op, dt+val, 1);
-    if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
-    else {
-        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
-            BIT_skipBits(DStream, dt[val].nbBits);
-            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
-                /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
-                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);
-    }   }
-    return 1;
-}
-
-#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
-    ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
-
-#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
-    if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
-        ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
-
-#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
-    if (MEM_64bits()) \
-        ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
-
-HINT_INLINE size_t
-HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd,
-                const HUF_DEltX2* const dt, const U32 dtLog)
-{
-    BYTE* const pStart = p;
-
-    /* up to 8 symbols at a time */
-    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) {
-        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
-        HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
-        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
-        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
-    }
-
-    /* closer to end : up to 2 symbols at a time */
-    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2))
-        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
-
-    while (p <= pEnd-2)
-        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */
-
-    if (p < pEnd)
-        p += HUF_decodeLastSymbolX2(p, bitDPtr, dt, dtLog);
-
-    return p-pStart;
-}
-
-FORCE_INLINE_TEMPLATE size_t
-HUF_decompress1X2_usingDTable_internal_body(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const HUF_DTable* DTable)
-{
-    BIT_DStream_t bitD;
-
-    /* Init */
-    CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
-
-    /* decode */
-    {   BYTE* const ostart = (BYTE*) dst;
-        BYTE* const oend = ostart + dstSize;
-        const void* const dtPtr = DTable+1;   /* force compiler to not use strict-aliasing */
-        const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
-        DTableDesc const dtd = HUF_getDTableDesc(DTable);
-        HUF_decodeStreamX2(ostart, &bitD, oend, dt, dtd.tableLog);
-    }
-
-    /* check */
-    if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
-
-    /* decoded size */
-    return dstSize;
-}
-
-
-FORCE_INLINE_TEMPLATE size_t
-HUF_decompress4X2_usingDTable_internal_body(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const HUF_DTable* DTable)
-{
-    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
-
-    {   const BYTE* const istart = (const BYTE*) cSrc;
-        BYTE* const ostart = (BYTE*) dst;
-        BYTE* const oend = ostart + dstSize;
-        const void* const dtPtr = DTable+1;
-        const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
-
-        /* Init */
-        BIT_DStream_t bitD1;
-        BIT_DStream_t bitD2;
-        BIT_DStream_t bitD3;
-        BIT_DStream_t bitD4;
-        size_t const length1 = MEM_readLE16(istart);
-        size_t const length2 = MEM_readLE16(istart+2);
-        size_t const length3 = MEM_readLE16(istart+4);
-        size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
-        const BYTE* const istart1 = istart + 6;  /* jumpTable */
-        const BYTE* const istart2 = istart1 + length1;
-        const BYTE* const istart3 = istart2 + length2;
-        const BYTE* const istart4 = istart3 + length3;
-        size_t const segmentSize = (dstSize+3) / 4;
-        BYTE* const opStart2 = ostart + segmentSize;
-        BYTE* const opStart3 = opStart2 + segmentSize;
-        BYTE* const opStart4 = opStart3 + segmentSize;
-        BYTE* op1 = ostart;
-        BYTE* op2 = opStart2;
-        BYTE* op3 = opStart3;
-        BYTE* op4 = opStart4;
-        U32 endSignal;
-        DTableDesc const dtd = HUF_getDTableDesc(DTable);
-        U32 const dtLog = dtd.tableLog;
-
-        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
-        CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
-        CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
-        CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
-        CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
-
-        /* 16-32 symbols per loop (4-8 symbols per stream) */
-        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
-        for ( ; (endSignal==BIT_DStream_unfinished) & (op4<(oend-(sizeof(bitD4.bitContainer)-1))) ; ) {
-            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
-            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
-            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
-            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
-            HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
-            HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
-            HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
-            HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
-            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
-            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
-            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
-            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
-            HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
-            HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
-            HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
-            HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
-
-            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
-        }
-
-        /* check corruption */
-        if (op1 > opStart2) return ERROR(corruption_detected);
-        if (op2 > opStart3) return ERROR(corruption_detected);
-        if (op3 > opStart4) return ERROR(corruption_detected);
-        /* note : op4 already verified within main loop */
-
-        /* finish bitStreams one by one */
-        HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
-        HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
-        HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
-        HUF_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);
-
-        /* check */
-        { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
-          if (!endCheck) return ERROR(corruption_detected); }
-
-        /* decoded size */
-        return dstSize;
-    }
-}
-
-HUF_DGEN(HUF_decompress1X2_usingDTable_internal)
-HUF_DGEN(HUF_decompress4X2_usingDTable_internal)
-
-size_t HUF_decompress1X2_usingDTable(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const HUF_DTable* DTable)
-{
-    DTableDesc dtd = HUF_getDTableDesc(DTable);
-    if (dtd.tableType != 1) return ERROR(GENERIC);
-    return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-}
-
-size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
-                                   const void* cSrc, size_t cSrcSize,
-                                   void* workSpace, size_t wkspSize)
-{
-    const BYTE* ip = (const BYTE*) cSrc;
-
-    size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize,
-                                               workSpace, wkspSize);
-    if (HUF_isError(hSize)) return hSize;
-    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
-    ip += hSize; cSrcSize -= hSize;
-
-    return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
-}
-
-
-size_t HUF_decompress1X2_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
-                              const void* cSrc, size_t cSrcSize)
-{
-    U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
-    return HUF_decompress1X2_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
-                                       workSpace, sizeof(workSpace));
-}
-
-size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
-    return HUF_decompress1X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
-}
-
-size_t HUF_decompress4X2_usingDTable(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const HUF_DTable* DTable)
-{
-    DTableDesc dtd = HUF_getDTableDesc(DTable);
-    if (dtd.tableType != 1) return ERROR(GENERIC);
-    return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-}
-
-static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
-                                   const void* cSrc, size_t cSrcSize,
-                                   void* workSpace, size_t wkspSize, int bmi2)
-{
-    const BYTE* ip = (const BYTE*) cSrc;
-
-    size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize,
-                                         workSpace, wkspSize);
-    if (HUF_isError(hSize)) return hSize;
-    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
-    ip += hSize; cSrcSize -= hSize;
-
-    return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
-}
-
-size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
-                                   const void* cSrc, size_t cSrcSize,
-                                   void* workSpace, size_t wkspSize)
-{
-    return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0);
-}
-
-
-size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize,
-                              const void* cSrc, size_t cSrcSize)
-{
-    U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
-    return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
-                                       workSpace, sizeof(workSpace));
-}
-
-size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
-    return HUF_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
-}
-
-#endif /* HUF_FORCE_DECOMPRESS_X1 */
-
-
-/* ***********************************/
-/* Universal decompression selectors */
-/* ***********************************/
-
-size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize,
-                                    const void* cSrc, size_t cSrcSize,
-                                    const HUF_DTable* DTable)
-{
-    DTableDesc const dtd = HUF_getDTableDesc(DTable);
-#if defined(HUF_FORCE_DECOMPRESS_X1)
-    (void)dtd;
-    assert(dtd.tableType == 0);
-    return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-#elif defined(HUF_FORCE_DECOMPRESS_X2)
-    (void)dtd;
-    assert(dtd.tableType == 1);
-    return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-#else
-    return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
-                           HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-#endif
-}
-
-size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize,
-                                    const void* cSrc, size_t cSrcSize,
-                                    const HUF_DTable* DTable)
-{
-    DTableDesc const dtd = HUF_getDTableDesc(DTable);
-#if defined(HUF_FORCE_DECOMPRESS_X1)
-    (void)dtd;
-    assert(dtd.tableType == 0);
-    return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-#elif defined(HUF_FORCE_DECOMPRESS_X2)
-    (void)dtd;
-    assert(dtd.tableType == 1);
-    return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-#else
-    return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
-                           HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-#endif
-}
-
-
-#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2)
-typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
-static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
-{
-    /* single, double, quad */
-    {{0,0}, {1,1}, {2,2}},  /* Q==0 : impossible */
-    {{0,0}, {1,1}, {2,2}},  /* Q==1 : impossible */
-    {{  38,130}, {1313, 74}, {2151, 38}},   /* Q == 2 : 12-18% */
-    {{ 448,128}, {1353, 74}, {2238, 41}},   /* Q == 3 : 18-25% */
-    {{ 556,128}, {1353, 74}, {2238, 47}},   /* Q == 4 : 25-32% */
-    {{ 714,128}, {1418, 74}, {2436, 53}},   /* Q == 5 : 32-38% */
-    {{ 883,128}, {1437, 74}, {2464, 61}},   /* Q == 6 : 38-44% */
-    {{ 897,128}, {1515, 75}, {2622, 68}},   /* Q == 7 : 44-50% */
-    {{ 926,128}, {1613, 75}, {2730, 75}},   /* Q == 8 : 50-56% */
-    {{ 947,128}, {1729, 77}, {3359, 77}},   /* Q == 9 : 56-62% */
-    {{1107,128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */
-    {{1177,128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */
-    {{1242,128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */
-    {{1349,128}, {2644,106}, {5260,106}},   /* Q ==13 : 81-87% */
-    {{1455,128}, {2422,124}, {4174,124}},   /* Q ==14 : 87-93% */
-    {{ 722,128}, {1891,145}, {1936,146}},   /* Q ==15 : 93-99% */
-};
-#endif
-
-/** HUF_selectDecoder() :
- *  Tells which decoder is likely to decode faster,
- *  based on a set of pre-computed metrics.
- * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
- *  Assumption : 0 < dstSize <= 128 KB */
-U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
-{
-    assert(dstSize > 0);
-    assert(dstSize <= 128*1024);
-#if defined(HUF_FORCE_DECOMPRESS_X1)
-    (void)dstSize;
-    (void)cSrcSize;
-    return 0;
-#elif defined(HUF_FORCE_DECOMPRESS_X2)
-    (void)dstSize;
-    (void)cSrcSize;
-    return 1;
-#else
-    /* decoder timing evaluation */
-    {   U32 const Q = (cSrcSize >= dstSize) ? 15 : (U32)(cSrcSize * 16 / dstSize);   /* Q < 16 */
-        U32 const D256 = (U32)(dstSize >> 8);
-        U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
-        U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
-        DTime1 += DTime1 >> 3;  /* advantage to algorithm using less memory, to reduce cache eviction */
-        return DTime1 < DTime0;
-    }
-#endif
-}
-
-
-typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
-
-size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2)
-    static const decompressionAlgo decompress[2] = { HUF_decompress4X1, HUF_decompress4X2 };
-#endif
-
-    /* validation checks */
-    if (dstSize == 0) return ERROR(dstSize_tooSmall);
-    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */
-    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */
-    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
-
-    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
-#if defined(HUF_FORCE_DECOMPRESS_X1)
-        (void)algoNb;
-        assert(algoNb == 0);
-        return HUF_decompress4X1(dst, dstSize, cSrc, cSrcSize);
-#elif defined(HUF_FORCE_DECOMPRESS_X2)
-        (void)algoNb;
-        assert(algoNb == 1);
-        return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize);
-#else
-        return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
-#endif
-    }
-}
-
-size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    /* validation checks */
-    if (dstSize == 0) return ERROR(dstSize_tooSmall);
-    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */
-    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */
-    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
-
-    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
-#if defined(HUF_FORCE_DECOMPRESS_X1)
-        (void)algoNb;
-        assert(algoNb == 0);
-        return HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize);
-#elif defined(HUF_FORCE_DECOMPRESS_X2)
-        (void)algoNb;
-        assert(algoNb == 1);
-        return HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize);
-#else
-        return algoNb ? HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
-                        HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
-#endif
-    }
-}
-
-size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
-    return HUF_decompress4X_hufOnly_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
-                                         workSpace, sizeof(workSpace));
-}
-
-
-size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst,
-                                     size_t dstSize, const void* cSrc,
-                                     size_t cSrcSize, void* workSpace,
-                                     size_t wkspSize)
-{
-    /* validation checks */
-    if (dstSize == 0) return ERROR(dstSize_tooSmall);
-    if (cSrcSize == 0) return ERROR(corruption_detected);
-
-    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
-#if defined(HUF_FORCE_DECOMPRESS_X1)
-        (void)algoNb;
-        assert(algoNb == 0);
-        return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
-#elif defined(HUF_FORCE_DECOMPRESS_X2)
-        (void)algoNb;
-        assert(algoNb == 1);
-        return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
-#else
-        return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
-                            cSrcSize, workSpace, wkspSize):
-                        HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
-#endif
-    }
-}
-
-size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
-                                  const void* cSrc, size_t cSrcSize,
-                                  void* workSpace, size_t wkspSize)
-{
-    /* validation checks */
-    if (dstSize == 0) return ERROR(dstSize_tooSmall);
-    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */
-    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */
-    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
-
-    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
-#if defined(HUF_FORCE_DECOMPRESS_X1)
-        (void)algoNb;
-        assert(algoNb == 0);
-        return HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
-                                cSrcSize, workSpace, wkspSize);
-#elif defined(HUF_FORCE_DECOMPRESS_X2)
-        (void)algoNb;
-        assert(algoNb == 1);
-        return HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
-                                cSrcSize, workSpace, wkspSize);
-#else
-        return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
-                                cSrcSize, workSpace, wkspSize):
-                        HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
-                                cSrcSize, workSpace, wkspSize);
-#endif
-    }
-}
-
-size_t HUF_decompress1X_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize,
-                             const void* cSrc, size_t cSrcSize)
-{
-    U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
-    return HUF_decompress1X_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
-                                      workSpace, sizeof(workSpace));
-}
-
-
-size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
-{
-    DTableDesc const dtd = HUF_getDTableDesc(DTable);
-#if defined(HUF_FORCE_DECOMPRESS_X1)
-    (void)dtd;
-    assert(dtd.tableType == 0);
-    return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
-#elif defined(HUF_FORCE_DECOMPRESS_X2)
-    (void)dtd;
-    assert(dtd.tableType == 1);
-    return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
-#else
-    return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
-                           HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
-#endif
-}
-
-#ifndef HUF_FORCE_DECOMPRESS_X2
-size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
-{
-    const BYTE* ip = (const BYTE*) cSrc;
-
-    size_t const hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize);
-    if (HUF_isError(hSize)) return hSize;
-    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
-    ip += hSize; cSrcSize -= hSize;
-
-    return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
-}
-#endif
-
-size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
-{
-    DTableDesc const dtd = HUF_getDTableDesc(DTable);
-#if defined(HUF_FORCE_DECOMPRESS_X1)
-    (void)dtd;
-    assert(dtd.tableType == 0);
-    return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
-#elif defined(HUF_FORCE_DECOMPRESS_X2)
-    (void)dtd;
-    assert(dtd.tableType == 1);
-    return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
-#else
-    return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
-                           HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
-#endif
-}
-
-size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
-{
-    /* validation checks */
-    if (dstSize == 0) return ERROR(dstSize_tooSmall);
-    if (cSrcSize == 0) return ERROR(corruption_detected);
-
-    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
-#if defined(HUF_FORCE_DECOMPRESS_X1)
-        (void)algoNb;
-        assert(algoNb == 0);
-        return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
-#elif defined(HUF_FORCE_DECOMPRESS_X2)
-        (void)algoNb;
-        assert(algoNb == 1);
-        return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
-#else
-        return algoNb ? HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) :
-                        HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
-#endif
-    }
-}
diff --git a/vendor/github.com/DataDog/zstd/mem.h b/vendor/github.com/DataDog/zstd/mem.h
deleted file mode 100644
index 5da2487..0000000
--- a/vendor/github.com/DataDog/zstd/mem.h
+++ /dev/null
@@ -1,380 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef MEM_H_MODULE
-#define MEM_H_MODULE
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/*-****************************************
-*  Dependencies
-******************************************/
-#include <stddef.h>     /* size_t, ptrdiff_t */
-#include <string.h>     /* memcpy */
-
-
-/*-****************************************
-*  Compiler specifics
-******************************************/
-#if defined(_MSC_VER)   /* Visual Studio */
-#   include <stdlib.h>  /* _byteswap_ulong */
-#   include <intrin.h>  /* _byteswap_* */
-#endif
-#if defined(__GNUC__)
-#  define MEM_STATIC static __inline __attribute__((unused))
-#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-#  define MEM_STATIC static inline
-#elif defined(_MSC_VER)
-#  define MEM_STATIC static __inline
-#else
-#  define MEM_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */
-#endif
-
-#ifndef __has_builtin
-#  define __has_builtin(x) 0  /* compat. with non-clang compilers */
-#endif
-
-/* code only tested on 32 and 64 bits systems */
-#define MEM_STATIC_ASSERT(c)   { enum { MEM_static_assert = 1/(int)(!!(c)) }; }
-MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
-
-
-/*-**************************************************************
-*  Basic Types
-*****************************************************************/
-#if  !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
-# include <stdint.h>
-  typedef   uint8_t BYTE;
-  typedef  uint16_t U16;
-  typedef   int16_t S16;
-  typedef  uint32_t U32;
-  typedef   int32_t S32;
-  typedef  uint64_t U64;
-  typedef   int64_t S64;
-#else
-# include <limits.h>
-#if CHAR_BIT != 8
-#  error "this implementation requires char to be exactly 8-bit type"
-#endif
-  typedef unsigned char      BYTE;
-#if USHRT_MAX != 65535
-#  error "this implementation requires short to be exactly 16-bit type"
-#endif
-  typedef unsigned short      U16;
-  typedef   signed short      S16;
-#if UINT_MAX != 4294967295
-#  error "this implementation requires int to be exactly 32-bit type"
-#endif
-  typedef unsigned int        U32;
-  typedef   signed int        S32;
-/* note : there are no limits defined for long long type in C90.
- * limits exist in C99, however, in such case, <stdint.h> is preferred */
-  typedef unsigned long long  U64;
-  typedef   signed long long  S64;
-#endif
-
-
-/*-**************************************************************
-*  Memory I/O
-*****************************************************************/
-/* MEM_FORCE_MEMORY_ACCESS :
- * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
- * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
- * The below switch allow to select different access method for improved performance.
- * Method 0 (default) : use `memcpy()`. Safe and portable.
- * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable).
- *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
- * Method 2 : direct access. This method is portable but violate C standard.
- *            It can generate buggy code on targets depending on alignment.
- *            In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6)
- * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
- * Prefer these methods in priority order (0 > 1 > 2)
- */
-#ifndef MEM_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
-#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
-#    define MEM_FORCE_MEMORY_ACCESS 2
-#  elif defined(__INTEL_COMPILER) || defined(__GNUC__)
-#    define MEM_FORCE_MEMORY_ACCESS 1
-#  endif
-#endif
-
-MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; }
-MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }
-
-MEM_STATIC unsigned MEM_isLittleEndian(void)
-{
-    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */
-    return one.c[0];
-}
-
-#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
-
-/* violates C standard, by lying on structure alignment.
-Only use if no other choice to achieve best performance on target platform */
-MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
-MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
-MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
-MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; }
-
-MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
-MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
-MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; }
-
-#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
-
-/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
-/* currently only defined for gcc and icc */
-#if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32))
-    __pragma( pack(push, 1) )
-    typedef struct { U16 v; } unalign16;
-    typedef struct { U32 v; } unalign32;
-    typedef struct { U64 v; } unalign64;
-    typedef struct { size_t v; } unalignArch;
-    __pragma( pack(pop) )
-#else
-    typedef struct { U16 v; } __attribute__((packed)) unalign16;
-    typedef struct { U32 v; } __attribute__((packed)) unalign32;
-    typedef struct { U64 v; } __attribute__((packed)) unalign64;
-    typedef struct { size_t v; } __attribute__((packed)) unalignArch;
-#endif
-
-MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; }
-MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; }
-MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; }
-MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; }
-
-MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; }
-MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; }
-MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; }
-
-#else
-
-/* default method, safe and standard.
-   can sometimes prove slower */
-
-MEM_STATIC U16 MEM_read16(const void* memPtr)
-{
-    U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-MEM_STATIC U32 MEM_read32(const void* memPtr)
-{
-    U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-MEM_STATIC U64 MEM_read64(const void* memPtr)
-{
-    U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-MEM_STATIC size_t MEM_readST(const void* memPtr)
-{
-    size_t val; memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-MEM_STATIC void MEM_write16(void* memPtr, U16 value)
-{
-    memcpy(memPtr, &value, sizeof(value));
-}
-
-MEM_STATIC void MEM_write32(void* memPtr, U32 value)
-{
-    memcpy(memPtr, &value, sizeof(value));
-}
-
-MEM_STATIC void MEM_write64(void* memPtr, U64 value)
-{
-    memcpy(memPtr, &value, sizeof(value));
-}
-
-#endif /* MEM_FORCE_MEMORY_ACCESS */
-
-MEM_STATIC U32 MEM_swap32(U32 in)
-{
-#if defined(_MSC_VER)     /* Visual Studio */
-    return _byteswap_ulong(in);
-#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
-  || (defined(__clang__) && __has_builtin(__builtin_bswap32))
-    return __builtin_bswap32(in);
-#else
-    return  ((in << 24) & 0xff000000 ) |
-            ((in <<  8) & 0x00ff0000 ) |
-            ((in >>  8) & 0x0000ff00 ) |
-            ((in >> 24) & 0x000000ff );
-#endif
-}
-
-MEM_STATIC U64 MEM_swap64(U64 in)
-{
-#if defined(_MSC_VER)     /* Visual Studio */
-    return _byteswap_uint64(in);
-#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
-  || (defined(__clang__) && __has_builtin(__builtin_bswap64))
-    return __builtin_bswap64(in);
-#else
-    return  ((in << 56) & 0xff00000000000000ULL) |
-            ((in << 40) & 0x00ff000000000000ULL) |
-            ((in << 24) & 0x0000ff0000000000ULL) |
-            ((in << 8)  & 0x000000ff00000000ULL) |
-            ((in >> 8)  & 0x00000000ff000000ULL) |
-            ((in >> 24) & 0x0000000000ff0000ULL) |
-            ((in >> 40) & 0x000000000000ff00ULL) |
-            ((in >> 56) & 0x00000000000000ffULL);
-#endif
-}
-
-MEM_STATIC size_t MEM_swapST(size_t in)
-{
-    if (MEM_32bits())
-        return (size_t)MEM_swap32((U32)in);
-    else
-        return (size_t)MEM_swap64((U64)in);
-}
-
-/*=== Little endian r/w ===*/
-
-MEM_STATIC U16 MEM_readLE16(const void* memPtr)
-{
-    if (MEM_isLittleEndian())
-        return MEM_read16(memPtr);
-    else {
-        const BYTE* p = (const BYTE*)memPtr;
-        return (U16)(p[0] + (p[1]<<8));
-    }
-}
-
-MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
-{
-    if (MEM_isLittleEndian()) {
-        MEM_write16(memPtr, val);
-    } else {
-        BYTE* p = (BYTE*)memPtr;
-        p[0] = (BYTE)val;
-        p[1] = (BYTE)(val>>8);
-    }
-}
-
-MEM_STATIC U32 MEM_readLE24(const void* memPtr)
-{
-    return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
-}
-
-MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val)
-{
-    MEM_writeLE16(memPtr, (U16)val);
-    ((BYTE*)memPtr)[2] = (BYTE)(val>>16);
-}
-
-MEM_STATIC U32 MEM_readLE32(const void* memPtr)
-{
-    if (MEM_isLittleEndian())
-        return MEM_read32(memPtr);
-    else
-        return MEM_swap32(MEM_read32(memPtr));
-}
-
-MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32)
-{
-    if (MEM_isLittleEndian())
-        MEM_write32(memPtr, val32);
-    else
-        MEM_write32(memPtr, MEM_swap32(val32));
-}
-
-MEM_STATIC U64 MEM_readLE64(const void* memPtr)
-{
-    if (MEM_isLittleEndian())
-        return MEM_read64(memPtr);
-    else
-        return MEM_swap64(MEM_read64(memPtr));
-}
-
-MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64)
-{
-    if (MEM_isLittleEndian())
-        MEM_write64(memPtr, val64);
-    else
-        MEM_write64(memPtr, MEM_swap64(val64));
-}
-
-MEM_STATIC size_t MEM_readLEST(const void* memPtr)
-{
-    if (MEM_32bits())
-        return (size_t)MEM_readLE32(memPtr);
-    else
-        return (size_t)MEM_readLE64(memPtr);
-}
-
-MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val)
-{
-    if (MEM_32bits())
-        MEM_writeLE32(memPtr, (U32)val);
-    else
-        MEM_writeLE64(memPtr, (U64)val);
-}
-
-/*=== Big endian r/w ===*/
-
-MEM_STATIC U32 MEM_readBE32(const void* memPtr)
-{
-    if (MEM_isLittleEndian())
-        return MEM_swap32(MEM_read32(memPtr));
-    else
-        return MEM_read32(memPtr);
-}
-
-MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32)
-{
-    if (MEM_isLittleEndian())
-        MEM_write32(memPtr, MEM_swap32(val32));
-    else
-        MEM_write32(memPtr, val32);
-}
-
-MEM_STATIC U64 MEM_readBE64(const void* memPtr)
-{
-    if (MEM_isLittleEndian())
-        return MEM_swap64(MEM_read64(memPtr));
-    else
-        return MEM_read64(memPtr);
-}
-
-MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64)
-{
-    if (MEM_isLittleEndian())
-        MEM_write64(memPtr, MEM_swap64(val64));
-    else
-        MEM_write64(memPtr, val64);
-}
-
-MEM_STATIC size_t MEM_readBEST(const void* memPtr)
-{
-    if (MEM_32bits())
-        return (size_t)MEM_readBE32(memPtr);
-    else
-        return (size_t)MEM_readBE64(memPtr);
-}
-
-MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val)
-{
-    if (MEM_32bits())
-        MEM_writeBE32(memPtr, (U32)val);
-    else
-        MEM_writeBE64(memPtr, (U64)val);
-}
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* MEM_H_MODULE */
diff --git a/vendor/github.com/DataDog/zstd/pool.c b/vendor/github.com/DataDog/zstd/pool.c
deleted file mode 100644
index 7a82945..0000000
--- a/vendor/github.com/DataDog/zstd/pool.c
+++ /dev/null
@@ -1,340 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-
-/* ======   Dependencies   ======= */
-#include <stddef.h>    /* size_t */
-#include "debug.h"     /* assert */
-#include "zstd_internal.h"  /* ZSTD_malloc, ZSTD_free */
-#include "pool.h"
-
-/* ======   Compiler specifics   ====== */
-#if defined(_MSC_VER)
-#  pragma warning(disable : 4204)        /* disable: C4204: non-constant aggregate initializer */
-#endif
-
-
-#ifdef ZSTD_MULTITHREAD
-
-#include "threading.h"   /* pthread adaptation */
-
-/* A job is a function and an opaque argument */
-typedef struct POOL_job_s {
-    POOL_function function;
-    void *opaque;
-} POOL_job;
-
-struct POOL_ctx_s {
-    ZSTD_customMem customMem;
-    /* Keep track of the threads */
-    ZSTD_pthread_t* threads;
-    size_t threadCapacity;
-    size_t threadLimit;
-
-    /* The queue is a circular buffer */
-    POOL_job *queue;
-    size_t queueHead;
-    size_t queueTail;
-    size_t queueSize;
-
-    /* The number of threads working on jobs */
-    size_t numThreadsBusy;
-    /* Indicates if the queue is empty */
-    int queueEmpty;
-
-    /* The mutex protects the queue */
-    ZSTD_pthread_mutex_t queueMutex;
-    /* Condition variable for pushers to wait on when the queue is full */
-    ZSTD_pthread_cond_t queuePushCond;
-    /* Condition variables for poppers to wait on when the queue is empty */
-    ZSTD_pthread_cond_t queuePopCond;
-    /* Indicates if the queue is shutting down */
-    int shutdown;
-};
-
-/* POOL_thread() :
- * Work thread for the thread pool.
- * Waits for jobs and executes them.
- * @returns : NULL on failure else non-null.
- */
-static void* POOL_thread(void* opaque) {
-    POOL_ctx* const ctx = (POOL_ctx*)opaque;
-    if (!ctx) { return NULL; }
-    for (;;) {
-        /* Lock the mutex and wait for a non-empty queue or until shutdown */
-        ZSTD_pthread_mutex_lock(&ctx->queueMutex);
-
-        while ( ctx->queueEmpty
-            || (ctx->numThreadsBusy >= ctx->threadLimit) ) {
-            if (ctx->shutdown) {
-                /* even if !queueEmpty, (possible if numThreadsBusy >= threadLimit),
-                 * a few threads will be shutdown while !queueEmpty,
-                 * but enough threads will remain active to finish the queue */
-                ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
-                return opaque;
-            }
-            ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex);
-        }
-        /* Pop a job off the queue */
-        {   POOL_job const job = ctx->queue[ctx->queueHead];
-            ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize;
-            ctx->numThreadsBusy++;
-            ctx->queueEmpty = ctx->queueHead == ctx->queueTail;
-            /* Unlock the mutex, signal a pusher, and run the job */
-            ZSTD_pthread_cond_signal(&ctx->queuePushCond);
-            ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
-
-            job.function(job.opaque);
-
-            /* If the intended queue size was 0, signal after finishing job */
-            ZSTD_pthread_mutex_lock(&ctx->queueMutex);
-            ctx->numThreadsBusy--;
-            if (ctx->queueSize == 1) {
-                ZSTD_pthread_cond_signal(&ctx->queuePushCond);
-            }
-            ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
-        }
-    }  /* for (;;) */
-    assert(0);  /* Unreachable */
-}
-
-POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
-    return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
-}
-
-POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
-                               ZSTD_customMem customMem) {
-    POOL_ctx* ctx;
-    /* Check parameters */
-    if (!numThreads) { return NULL; }
-    /* Allocate the context and zero initialize */
-    ctx = (POOL_ctx*)ZSTD_calloc(sizeof(POOL_ctx), customMem);
-    if (!ctx) { return NULL; }
-    /* Initialize the job queue.
-     * It needs one extra space since one space is wasted to differentiate
-     * empty and full queues.
-     */
-    ctx->queueSize = queueSize + 1;
-    ctx->queue = (POOL_job*)ZSTD_malloc(ctx->queueSize * sizeof(POOL_job), customMem);
-    ctx->queueHead = 0;
-    ctx->queueTail = 0;
-    ctx->numThreadsBusy = 0;
-    ctx->queueEmpty = 1;
-    (void)ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL);
-    (void)ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL);
-    (void)ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL);
-    ctx->shutdown = 0;
-    /* Allocate space for the thread handles */
-    ctx->threads = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), customMem);
-    ctx->threadCapacity = 0;
-    ctx->customMem = customMem;
-    /* Check for errors */
-    if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; }
-    /* Initialize the threads */
-    {   size_t i;
-        for (i = 0; i < numThreads; ++i) {
-            if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) {
-                ctx->threadCapacity = i;
-                POOL_free(ctx);
-                return NULL;
-        }   }
-        ctx->threadCapacity = numThreads;
-        ctx->threadLimit = numThreads;
-    }
-    return ctx;
-}
-
-/*! POOL_join() :
-    Shutdown the queue, wake any sleeping threads, and join all of the threads.
-*/
-static void POOL_join(POOL_ctx* ctx) {
-    /* Shut down the queue */
-    ZSTD_pthread_mutex_lock(&ctx->queueMutex);
-    ctx->shutdown = 1;
-    ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
-    /* Wake up sleeping threads */
-    ZSTD_pthread_cond_broadcast(&ctx->queuePushCond);
-    ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
-    /* Join all of the threads */
-    {   size_t i;
-        for (i = 0; i < ctx->threadCapacity; ++i) {
-            ZSTD_pthread_join(ctx->threads[i], NULL);  /* note : could fail */
-    }   }
-}
-
-void POOL_free(POOL_ctx *ctx) {
-    if (!ctx) { return; }
-    POOL_join(ctx);
-    ZSTD_pthread_mutex_destroy(&ctx->queueMutex);
-    ZSTD_pthread_cond_destroy(&ctx->queuePushCond);
-    ZSTD_pthread_cond_destroy(&ctx->queuePopCond);
-    ZSTD_free(ctx->queue, ctx->customMem);
-    ZSTD_free(ctx->threads, ctx->customMem);
-    ZSTD_free(ctx, ctx->customMem);
-}
-
-
-
-size_t POOL_sizeof(POOL_ctx *ctx) {
-    if (ctx==NULL) return 0;  /* supports sizeof NULL */
-    return sizeof(*ctx)
-        + ctx->queueSize * sizeof(POOL_job)
-        + ctx->threadCapacity * sizeof(ZSTD_pthread_t);
-}
-
-
-/* @return : 0 on success, 1 on error */
-static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads)
-{
-    if (numThreads <= ctx->threadCapacity) {
-        if (!numThreads) return 1;
-        ctx->threadLimit = numThreads;
-        return 0;
-    }
-    /* numThreads > threadCapacity */
-    {   ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem);
-        if (!threadPool) return 1;
-        /* replace existing thread pool */
-        memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool));
-        ZSTD_free(ctx->threads, ctx->customMem);
-        ctx->threads = threadPool;
-        /* Initialize additional threads */
-        {   size_t threadId;
-            for (threadId = ctx->threadCapacity; threadId < numThreads; ++threadId) {
-                if (ZSTD_pthread_create(&threadPool[threadId], NULL, &POOL_thread, ctx)) {
-                    ctx->threadCapacity = threadId;
-                    return 1;
-            }   }
-    }   }
-    /* successfully expanded */
-    ctx->threadCapacity = numThreads;
-    ctx->threadLimit = numThreads;
-    return 0;
-}
-
-/* @return : 0 on success, 1 on error */
-int POOL_resize(POOL_ctx* ctx, size_t numThreads)
-{
-    int result;
-    if (ctx==NULL) return 1;
-    ZSTD_pthread_mutex_lock(&ctx->queueMutex);
-    result = POOL_resize_internal(ctx, numThreads);
-    ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
-    ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
-    return result;
-}
-
-/**
- * Returns 1 if the queue is full and 0 otherwise.
- *
- * When queueSize is 1 (pool was created with an intended queueSize of 0),
- * then a queue is empty if there is a thread free _and_ no job is waiting.
- */
-static int isQueueFull(POOL_ctx const* ctx) {
-    if (ctx->queueSize > 1) {
-        return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize);
-    } else {
-        return (ctx->numThreadsBusy == ctx->threadLimit) ||
-               !ctx->queueEmpty;
-    }
-}
-
-
-static void POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque)
-{
-    POOL_job const job = {function, opaque};
-    assert(ctx != NULL);
-    if (ctx->shutdown) return;
-
-    ctx->queueEmpty = 0;
-    ctx->queue[ctx->queueTail] = job;
-    ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize;
-    ZSTD_pthread_cond_signal(&ctx->queuePopCond);
-}
-
-void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque)
-{
-    assert(ctx != NULL);
-    ZSTD_pthread_mutex_lock(&ctx->queueMutex);
-    /* Wait until there is space in the queue for the new job */
-    while (isQueueFull(ctx) && (!ctx->shutdown)) {
-        ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
-    }
-    POOL_add_internal(ctx, function, opaque);
-    ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
-}
-
-
-int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque)
-{
-    assert(ctx != NULL);
-    ZSTD_pthread_mutex_lock(&ctx->queueMutex);
-    if (isQueueFull(ctx)) {
-        ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
-        return 0;
-    }
-    POOL_add_internal(ctx, function, opaque);
-    ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
-    return 1;
-}
-
-
-#else  /* ZSTD_MULTITHREAD  not defined */
-
-/* ========================== */
-/* No multi-threading support */
-/* ========================== */
-
-
-/* We don't need any data, but if it is empty, malloc() might return NULL. */
-struct POOL_ctx_s {
-    int dummy;
-};
-static POOL_ctx g_ctx;
-
-POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
-    return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
-}
-
-POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) {
-    (void)numThreads;
-    (void)queueSize;
-    (void)customMem;
-    return &g_ctx;
-}
-
-void POOL_free(POOL_ctx* ctx) {
-    assert(!ctx || ctx == &g_ctx);
-    (void)ctx;
-}
-
-int POOL_resize(POOL_ctx* ctx, size_t numThreads) {
-    (void)ctx; (void)numThreads;
-    return 0;
-}
-
-void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) {
-    (void)ctx;
-    function(opaque);
-}
-
-int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) {
-    (void)ctx;
-    function(opaque);
-    return 1;
-}
-
-size_t POOL_sizeof(POOL_ctx* ctx) {
-    if (ctx==NULL) return 0;  /* supports sizeof NULL */
-    assert(ctx == &g_ctx);
-    return sizeof(*ctx);
-}
-
-#endif  /* ZSTD_MULTITHREAD */
diff --git a/vendor/github.com/DataDog/zstd/pool.h b/vendor/github.com/DataDog/zstd/pool.h
deleted file mode 100644
index 458d37f..0000000
--- a/vendor/github.com/DataDog/zstd/pool.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef POOL_H
-#define POOL_H
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-#include <stddef.h>   /* size_t */
-#define ZSTD_STATIC_LINKING_ONLY   /* ZSTD_customMem */
-#include "zstd.h"
-
-typedef struct POOL_ctx_s POOL_ctx;
-
-/*! POOL_create() :
- *  Create a thread pool with at most `numThreads` threads.
- * `numThreads` must be at least 1.
- *  The maximum number of queued jobs before blocking is `queueSize`.
- * @return : POOL_ctx pointer on success, else NULL.
-*/
-POOL_ctx* POOL_create(size_t numThreads, size_t queueSize);
-
-POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
-                               ZSTD_customMem customMem);
-
-/*! POOL_free() :
- *  Free a thread pool returned by POOL_create().
- */
-void POOL_free(POOL_ctx* ctx);
-
-/*! POOL_resize() :
- *  Expands or shrinks pool's number of threads.
- *  This is more efficient than releasing + creating a new context,
- *  since it tries to preserve and re-use existing threads.
- * `numThreads` must be at least 1.
- * @return : 0 when resize was successful,
- *           !0 (typically 1) if there is an error.
- *    note : only numThreads can be resized, queueSize remains unchanged.
- */
-int POOL_resize(POOL_ctx* ctx, size_t numThreads);
-
-/*! POOL_sizeof() :
- * @return threadpool memory usage
- *  note : compatible with NULL (returns 0 in this case)
- */
-size_t POOL_sizeof(POOL_ctx* ctx);
-
-/*! POOL_function :
- *  The function type that can be added to a thread pool.
- */
-typedef void (*POOL_function)(void*);
-
-/*! POOL_add() :
- *  Add the job `function(opaque)` to the thread pool. `ctx` must be valid.
- *  Possibly blocks until there is room in the queue.
- *  Note : The function may be executed asynchronously,
- *         therefore, `opaque` must live until function has been completed.
- */
-void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque);
-
-
-/*! POOL_tryAdd() :
- *  Add the job `function(opaque)` to thread pool _if_ a worker is available.
- *  Returns immediately even if not (does not block).
- * @return : 1 if successful, 0 if not.
- */
-int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque);
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif
diff --git a/vendor/github.com/DataDog/zstd/threading.c b/vendor/github.com/DataDog/zstd/threading.c
deleted file mode 100644
index f3d4fa8..0000000
--- a/vendor/github.com/DataDog/zstd/threading.c
+++ /dev/null
@@ -1,75 +0,0 @@
-/**
- * Copyright (c) 2016 Tino Reichardt
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- *
- * You can contact the author at:
- * - zstdmt source repository: https://github.com/mcmilk/zstdmt
- */
-
-/**
- * This file will hold wrapper for systems, which do not support pthreads
- */
-
-/* create fake symbol to avoid empty translation unit warning */
-int g_ZSTD_threading_useless_symbol;
-
-#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
-
-/**
- * Windows minimalist Pthread Wrapper, based on :
- * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html
- */
-
-
-/* ===  Dependencies  === */
-#include <process.h>
-#include <errno.h>
-#include "threading.h"
-
-
-/* ===  Implementation  === */
-
-static unsigned __stdcall worker(void *arg)
-{
-    ZSTD_pthread_t* const thread = (ZSTD_pthread_t*) arg;
-    thread->arg = thread->start_routine(thread->arg);
-    return 0;
-}
-
-int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused,
-            void* (*start_routine) (void*), void* arg)
-{
-    (void)unused;
-    thread->arg = arg;
-    thread->start_routine = start_routine;
-    thread->handle = (HANDLE) _beginthreadex(NULL, 0, worker, thread, 0, NULL);
-
-    if (!thread->handle)
-        return errno;
-    else
-        return 0;
-}
-
-int ZSTD_pthread_join(ZSTD_pthread_t thread, void **value_ptr)
-{
-    DWORD result;
-
-    if (!thread.handle) return 0;
-
-    result = WaitForSingleObject(thread.handle, INFINITE);
-    switch (result) {
-    case WAIT_OBJECT_0:
-        if (value_ptr) *value_ptr = thread.arg;
-        return 0;
-    case WAIT_ABANDONED:
-        return EINVAL;
-    default:
-        return GetLastError();
-    }
-}
-
-#endif   /* ZSTD_MULTITHREAD */
diff --git a/vendor/github.com/DataDog/zstd/threading.h b/vendor/github.com/DataDog/zstd/threading.h
deleted file mode 100644
index d806c89..0000000
--- a/vendor/github.com/DataDog/zstd/threading.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/**
- * Copyright (c) 2016 Tino Reichardt
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- *
- * You can contact the author at:
- * - zstdmt source repository: https://github.com/mcmilk/zstdmt
- */
-
-#ifndef THREADING_H_938743
-#define THREADING_H_938743
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
-
-/**
- * Windows minimalist Pthread Wrapper, based on :
- * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html
- */
-#ifdef WINVER
-#  undef WINVER
-#endif
-#define WINVER       0x0600
-
-#ifdef _WIN32_WINNT
-#  undef _WIN32_WINNT
-#endif
-#define _WIN32_WINNT 0x0600
-
-#ifndef WIN32_LEAN_AND_MEAN
-#  define WIN32_LEAN_AND_MEAN
-#endif
-
-#undef ERROR   /* reported already defined on VS 2015 (Rich Geldreich) */
-#include <windows.h>
-#undef ERROR
-#define ERROR(name) ZSTD_ERROR(name)
-
-
-/* mutex */
-#define ZSTD_pthread_mutex_t           CRITICAL_SECTION
-#define ZSTD_pthread_mutex_init(a, b)  ((void)(b), InitializeCriticalSection((a)), 0)
-#define ZSTD_pthread_mutex_destroy(a)  DeleteCriticalSection((a))
-#define ZSTD_pthread_mutex_lock(a)     EnterCriticalSection((a))
-#define ZSTD_pthread_mutex_unlock(a)   LeaveCriticalSection((a))
-
-/* condition variable */
-#define ZSTD_pthread_cond_t             CONDITION_VARIABLE
-#define ZSTD_pthread_cond_init(a, b)    ((void)(b), InitializeConditionVariable((a)), 0)
-#define ZSTD_pthread_cond_destroy(a)    ((void)(a))
-#define ZSTD_pthread_cond_wait(a, b)    SleepConditionVariableCS((a), (b), INFINITE)
-#define ZSTD_pthread_cond_signal(a)     WakeConditionVariable((a))
-#define ZSTD_pthread_cond_broadcast(a)  WakeAllConditionVariable((a))
-
-/* ZSTD_pthread_create() and ZSTD_pthread_join() */
-typedef struct {
-    HANDLE handle;
-    void* (*start_routine)(void*);
-    void* arg;
-} ZSTD_pthread_t;
-
-int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused,
-                   void* (*start_routine) (void*), void* arg);
-
-int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr);
-
-/**
- * add here more wrappers as required
- */
-
-
-#elif defined(ZSTD_MULTITHREAD)   /* posix assumed ; need a better detection method */
-/* ===   POSIX Systems   === */
-#  include <pthread.h>
-
-#define ZSTD_pthread_mutex_t            pthread_mutex_t
-#define ZSTD_pthread_mutex_init(a, b)   pthread_mutex_init((a), (b))
-#define ZSTD_pthread_mutex_destroy(a)   pthread_mutex_destroy((a))
-#define ZSTD_pthread_mutex_lock(a)      pthread_mutex_lock((a))
-#define ZSTD_pthread_mutex_unlock(a)    pthread_mutex_unlock((a))
-
-#define ZSTD_pthread_cond_t             pthread_cond_t
-#define ZSTD_pthread_cond_init(a, b)    pthread_cond_init((a), (b))
-#define ZSTD_pthread_cond_destroy(a)    pthread_cond_destroy((a))
-#define ZSTD_pthread_cond_wait(a, b)    pthread_cond_wait((a), (b))
-#define ZSTD_pthread_cond_signal(a)     pthread_cond_signal((a))
-#define ZSTD_pthread_cond_broadcast(a)  pthread_cond_broadcast((a))
-
-#define ZSTD_pthread_t                  pthread_t
-#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d))
-#define ZSTD_pthread_join(a, b)         pthread_join((a),(b))
-
-#else  /* ZSTD_MULTITHREAD not defined */
-/* No multithreading support */
-
-typedef int ZSTD_pthread_mutex_t;
-#define ZSTD_pthread_mutex_init(a, b)   ((void)(a), (void)(b), 0)
-#define ZSTD_pthread_mutex_destroy(a)   ((void)(a))
-#define ZSTD_pthread_mutex_lock(a)      ((void)(a))
-#define ZSTD_pthread_mutex_unlock(a)    ((void)(a))
-
-typedef int ZSTD_pthread_cond_t;
-#define ZSTD_pthread_cond_init(a, b)    ((void)(a), (void)(b), 0)
-#define ZSTD_pthread_cond_destroy(a)    ((void)(a))
-#define ZSTD_pthread_cond_wait(a, b)    ((void)(a), (void)(b))
-#define ZSTD_pthread_cond_signal(a)     ((void)(a))
-#define ZSTD_pthread_cond_broadcast(a)  ((void)(a))
-
-/* do not use ZSTD_pthread_t */
-
-#endif /* ZSTD_MULTITHREAD */
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* THREADING_H_938743 */
diff --git a/vendor/github.com/DataDog/zstd/travis_test_32.sh b/vendor/github.com/DataDog/zstd/travis_test_32.sh
deleted file mode 100644
index d29c86c..0000000
--- a/vendor/github.com/DataDog/zstd/travis_test_32.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-# Get utilities
-yum -y -q -e 0 install wget tar unzip gcc
-
-# Get Go
-wget -q https://dl.google.com/go/go1.11.1.linux-386.tar.gz
-tar -C /usr/local -xzf go1.11.1.linux-386.tar.gz
-export PATH=$PATH:/usr/local/go/bin
-
-# Get payload
-wget -q https://github.com/DataDog/zstd/files/2246767/mr.zip
-unzip mr.zip
-
-# Build and run tests
-cd zstd
-go build
-PAYLOAD=$(pwd)/mr go test -v
-PAYLOAD=$(pwd)/mr go test -bench .
diff --git a/vendor/github.com/DataDog/zstd/update.txt b/vendor/github.com/DataDog/zstd/update.txt
deleted file mode 100644
index 1de939f..0000000
--- a/vendor/github.com/DataDog/zstd/update.txt
+++ /dev/null
@@ -1,56 +0,0 @@
-./lib/common/bitstream.h
-./lib/common/compiler.h
-./lib/compress/zstd_compress_internal.h
-./lib/compress/zstd_fast.h
-./lib/compress/zstd_double_fast.h
-./lib/compress/zstd_lazy.h
-./lib/compress/zstd_ldm.h
-./lib/dictBuilder/cover.c
-./lib/dictBuilder/divsufsort.c
-./lib/dictBuilder/divsufsort.h
-./lib/common/entropy_common.c
-./lib/common/error_private.c
-./lib/common/error_private.h
-./lib/compress/fse_compress.c
-./lib/common/fse_decompress.c
-./lib/common/fse.h
-./lib/compress/huf_compress.c
-./lib/decompress/huf_decompress.c
-./lib/common/huf.h
-./lib/common/mem.h
-./lib/common/pool.c
-./lib/common/pool.h
-./lib/common/threading.c
-./lib/common/threading.h
-./lib/common/xxhash.c
-./lib/common/xxhash.h
-./lib/deprecated/zbuff_common.c
-./lib/deprecated/zbuff_compress.c
-./lib/deprecated/zbuff_decompress.c
-./lib/deprecated/zbuff.h
-./lib/dictBuilder/zdict.c
-./lib/dictBuilder/zdict.h
-./lib/common/zstd_common.c
-./lib/compress/zstd_compress.c
-./lib/decompress/zstd_decompress.c
-./lib/common/zstd_errors.h
-./lib/zstd.h
-./lib/common/zstd_internal.h
-./lib/legacy/zstd_legacy.h
-./lib/compress/zstd_opt.c
-./lib/compress/zstd_opt.h
-./lib/legacy/zstd_v01.c
-./lib/legacy/zstd_v01.h
-./lib/legacy/zstd_v02.c
-./lib/legacy/zstd_v02.h
-./lib/legacy/zstd_v03.c
-./lib/legacy/zstd_v03.h
-./lib/legacy/zstd_v04.c
-./lib/legacy/zstd_v04.h
-./lib/legacy/zstd_v05.c
-./lib/legacy/zstd_v05.h
-./lib/legacy/zstd_v06.c
-./lib/legacy/zstd_v06.h
-./lib/legacy/zstd_v07.c
-./lib/legacy/zstd_v07.h
-
diff --git a/vendor/github.com/DataDog/zstd/xxhash.c b/vendor/github.com/DataDog/zstd/xxhash.c
deleted file mode 100644
index 30599aa..0000000
--- a/vendor/github.com/DataDog/zstd/xxhash.c
+++ /dev/null
@@ -1,876 +0,0 @@
-/*
-*  xxHash - Fast Hash algorithm
-*  Copyright (C) 2012-2016, Yann Collet
-*
-*  BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-*
-*  Redistribution and use in source and binary forms, with or without
-*  modification, are permitted provided that the following conditions are
-*  met:
-*
-*  * Redistributions of source code must retain the above copyright
-*  notice, this list of conditions and the following disclaimer.
-*  * Redistributions in binary form must reproduce the above
-*  copyright notice, this list of conditions and the following disclaimer
-*  in the documentation and/or other materials provided with the
-*  distribution.
-*
-*  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-*  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-*  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-*  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-*  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-*  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-*  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-*  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-*  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-*  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-*  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*
-*  You can contact the author at :
-*  - xxHash homepage: http://www.xxhash.com
-*  - xxHash source repository : https://github.com/Cyan4973/xxHash
-*/
-
-
-/* *************************************
-*  Tuning parameters
-***************************************/
-/*!XXH_FORCE_MEMORY_ACCESS :
- * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
- * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
- * The below switch allow to select different access method for improved performance.
- * Method 0 (default) : use `memcpy()`. Safe and portable.
- * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
- *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
- * Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
- *            It can generate buggy code on targets which do not support unaligned memory accesses.
- *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
- * See http://stackoverflow.com/a/32095106/646947 for details.
- * Prefer these methods in priority order (0 > 1 > 2)
- */
-#ifndef XXH_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
-#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
-#    define XXH_FORCE_MEMORY_ACCESS 2
-#  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
-  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
-#    define XXH_FORCE_MEMORY_ACCESS 1
-#  endif
-#endif
-
-/*!XXH_ACCEPT_NULL_INPUT_POINTER :
- * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
- * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
- * By default, this option is disabled. To enable it, uncomment below define :
- */
-/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
-
-/*!XXH_FORCE_NATIVE_FORMAT :
- * By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
- * Results are therefore identical for little-endian and big-endian CPU.
- * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
- * Should endian-independence be of no importance for your application, you may set the #define below to 1,
- * to improve speed for Big-endian CPU.
- * This option has no impact on Little_Endian CPU.
- */
-#ifndef XXH_FORCE_NATIVE_FORMAT   /* can be defined externally */
-#  define XXH_FORCE_NATIVE_FORMAT 0
-#endif
-
-/*!XXH_FORCE_ALIGN_CHECK :
- * This is a minor performance trick, only useful with lots of very small keys.
- * It means : check for aligned/unaligned input.
- * The check costs one initial branch per hash; set to 0 when the input data
- * is guaranteed to be aligned.
- */
-#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
-#  if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
-#    define XXH_FORCE_ALIGN_CHECK 0
-#  else
-#    define XXH_FORCE_ALIGN_CHECK 1
-#  endif
-#endif
-
-
-/* *************************************
-*  Includes & Memory related functions
-***************************************/
-/* Modify the local functions below should you wish to use some other memory routines */
-/* for malloc(), free() */
-#include <stdlib.h>
-#include <stddef.h>     /* size_t */
-static void* XXH_malloc(size_t s) { return malloc(s); }
-static void  XXH_free  (void* p)  { free(p); }
-/* for memcpy() */
-#include <string.h>
-static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
-
-#ifndef XXH_STATIC_LINKING_ONLY
-#  define XXH_STATIC_LINKING_ONLY
-#endif
-#include "xxhash.h"
-
-
-/* *************************************
-*  Compiler Specific Options
-***************************************/
-#if defined (__GNUC__) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
-#  define INLINE_KEYWORD inline
-#else
-#  define INLINE_KEYWORD
-#endif
-
-#if defined(__GNUC__)
-#  define FORCE_INLINE_ATTR __attribute__((always_inline))
-#elif defined(_MSC_VER)
-#  define FORCE_INLINE_ATTR __forceinline
-#else
-#  define FORCE_INLINE_ATTR
-#endif
-
-#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
-
-
-#ifdef _MSC_VER
-#  pragma warning(disable : 4127)      /* disable: C4127: conditional expression is constant */
-#endif
-
-
-/* *************************************
-*  Basic Types
-***************************************/
-#ifndef MEM_MODULE
-# define MEM_MODULE
-# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
-#   include <stdint.h>
-    typedef uint8_t  BYTE;
-    typedef uint16_t U16;
-    typedef uint32_t U32;
-    typedef  int32_t S32;
-    typedef uint64_t U64;
-#  else
-    typedef unsigned char      BYTE;
-    typedef unsigned short     U16;
-    typedef unsigned int       U32;
-    typedef   signed int       S32;
-    typedef unsigned long long U64;   /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */
-#  endif
-#endif
-
-
-#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
-
-/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
-static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
-static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
-
-#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
-
-/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
-/* currently only defined for gcc and icc */
-typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign;
-
-static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
-static U64 XXH_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
-
-#else
-
-/* portable and safe solution. Generally efficient.
- * see : http://stackoverflow.com/a/32095106/646947
- */
-
-static U32 XXH_read32(const void* memPtr)
-{
-    U32 val;
-    memcpy(&val, memPtr, sizeof(val));
-    return val;
-}
-
-static U64 XXH_read64(const void* memPtr)
-{
-    U64 val;
-    memcpy(&val, memPtr, sizeof(val));
-    return val;
-}
-
-#endif   /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
-
-
-/* ****************************************
-*  Compiler-specific Functions and Macros
-******************************************/
-#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
-
-/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
-#if defined(_MSC_VER)
-#  define XXH_rotl32(x,r) _rotl(x,r)
-#  define XXH_rotl64(x,r) _rotl64(x,r)
-#else
-#  define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
-#  define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
-#endif
-
-#if defined(_MSC_VER)     /* Visual Studio */
-#  define XXH_swap32 _byteswap_ulong
-#  define XXH_swap64 _byteswap_uint64
-#elif GCC_VERSION >= 403
-#  define XXH_swap32 __builtin_bswap32
-#  define XXH_swap64 __builtin_bswap64
-#else
-static U32 XXH_swap32 (U32 x)
-{
-    return  ((x << 24) & 0xff000000 ) |
-            ((x <<  8) & 0x00ff0000 ) |
-            ((x >>  8) & 0x0000ff00 ) |
-            ((x >> 24) & 0x000000ff );
-}
-static U64 XXH_swap64 (U64 x)
-{
-    return  ((x << 56) & 0xff00000000000000ULL) |
-            ((x << 40) & 0x00ff000000000000ULL) |
-            ((x << 24) & 0x0000ff0000000000ULL) |
-            ((x << 8)  & 0x000000ff00000000ULL) |
-            ((x >> 8)  & 0x00000000ff000000ULL) |
-            ((x >> 24) & 0x0000000000ff0000ULL) |
-            ((x >> 40) & 0x000000000000ff00ULL) |
-            ((x >> 56) & 0x00000000000000ffULL);
-}
-#endif
-
-
-/* *************************************
-*  Architecture Macros
-***************************************/
-typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
-
-/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
-#ifndef XXH_CPU_LITTLE_ENDIAN
-    static const int g_one = 1;
-#   define XXH_CPU_LITTLE_ENDIAN   (*(const char*)(&g_one))
-#endif
-
-
-/* ***************************
-*  Memory reads
-*****************************/
-typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
-
-FORCE_INLINE_TEMPLATE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
-{
-    if (align==XXH_unaligned)
-        return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
-    else
-        return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
-}
-
-FORCE_INLINE_TEMPLATE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
-{
-    return XXH_readLE32_align(ptr, endian, XXH_unaligned);
-}
-
-static U32 XXH_readBE32(const void* ptr)
-{
-    return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
-}
-
-FORCE_INLINE_TEMPLATE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
-{
-    if (align==XXH_unaligned)
-        return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
-    else
-        return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
-}
-
-FORCE_INLINE_TEMPLATE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
-{
-    return XXH_readLE64_align(ptr, endian, XXH_unaligned);
-}
-
-static U64 XXH_readBE64(const void* ptr)
-{
-    return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
-}
-
-
-/* *************************************
-*  Macros
-***************************************/
-#define XXH_STATIC_ASSERT(c)   { enum { XXH_static_assert = 1/(int)(!!(c)) }; }    /* use only *after* variable declarations */
-
-
-/* *************************************
-*  Constants
-***************************************/
-static const U32 PRIME32_1 = 2654435761U;
-static const U32 PRIME32_2 = 2246822519U;
-static const U32 PRIME32_3 = 3266489917U;
-static const U32 PRIME32_4 =  668265263U;
-static const U32 PRIME32_5 =  374761393U;
-
-static const U64 PRIME64_1 = 11400714785074694791ULL;
-static const U64 PRIME64_2 = 14029467366897019727ULL;
-static const U64 PRIME64_3 =  1609587929392839161ULL;
-static const U64 PRIME64_4 =  9650029242287828579ULL;
-static const U64 PRIME64_5 =  2870177450012600261ULL;
-
-XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
-
-
-/* **************************
-*  Utils
-****************************/
-XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dstState, const XXH32_state_t* restrict srcState)
-{
-    memcpy(dstState, srcState, sizeof(*dstState));
-}
-
-XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const XXH64_state_t* restrict srcState)
-{
-    memcpy(dstState, srcState, sizeof(*dstState));
-}
-
-
-/* ***************************
-*  Simple Hash Functions
-*****************************/
-
-static U32 XXH32_round(U32 seed, U32 input)
-{
-    seed += input * PRIME32_2;
-    seed  = XXH_rotl32(seed, 13);
-    seed *= PRIME32_1;
-    return seed;
-}
-
-FORCE_INLINE_TEMPLATE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
-{
-    const BYTE* p = (const BYTE*)input;
-    const BYTE* bEnd = p + len;
-    U32 h32;
-#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
-
-#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
-    if (p==NULL) {
-        len=0;
-        bEnd=p=(const BYTE*)(size_t)16;
-    }
-#endif
-
-    if (len>=16) {
-        const BYTE* const limit = bEnd - 16;
-        U32 v1 = seed + PRIME32_1 + PRIME32_2;
-        U32 v2 = seed + PRIME32_2;
-        U32 v3 = seed + 0;
-        U32 v4 = seed - PRIME32_1;
-
-        do {
-            v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;
-            v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
-            v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
-            v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
-        } while (p<=limit);
-
-        h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
-    } else {
-        h32  = seed + PRIME32_5;
-    }
-
-    h32 += (U32) len;
-
-    while (p+4<=bEnd) {
-        h32 += XXH_get32bits(p) * PRIME32_3;
-        h32  = XXH_rotl32(h32, 17) * PRIME32_4 ;
-        p+=4;
-    }
-
-    while (p<bEnd) {
-        h32 += (*p) * PRIME32_5;
-        h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
-        p++;
-    }
-
-    h32 ^= h32 >> 15;
-    h32 *= PRIME32_2;
-    h32 ^= h32 >> 13;
-    h32 *= PRIME32_3;
-    h32 ^= h32 >> 16;
-
-    return h32;
-}
-
-
-XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed)
-{
-#if 0
-    /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
-    XXH32_CREATESTATE_STATIC(state);
-    XXH32_reset(state, seed);
-    XXH32_update(state, input, len);
-    return XXH32_digest(state);
-#else
-    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
-
-    if (XXH_FORCE_ALIGN_CHECK) {
-        if ((((size_t)input) & 3) == 0) {   /* Input is 4-bytes aligned, leverage the speed benefit */
-            if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
-                return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
-            else
-                return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
-    }   }
-
-    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
-        return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
-    else
-        return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
-#endif
-}
-
-
-static U64 XXH64_round(U64 acc, U64 input)
-{
-    acc += input * PRIME64_2;
-    acc  = XXH_rotl64(acc, 31);
-    acc *= PRIME64_1;
-    return acc;
-}
-
-static U64 XXH64_mergeRound(U64 acc, U64 val)
-{
-    val  = XXH64_round(0, val);
-    acc ^= val;
-    acc  = acc * PRIME64_1 + PRIME64_4;
-    return acc;
-}
-
-FORCE_INLINE_TEMPLATE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
-{
-    const BYTE* p = (const BYTE*)input;
-    const BYTE* const bEnd = p + len;
-    U64 h64;
-#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
-
-#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
-    if (p==NULL) {
-        len=0;
-        bEnd=p=(const BYTE*)(size_t)32;
-    }
-#endif
-
-    if (len>=32) {
-        const BYTE* const limit = bEnd - 32;
-        U64 v1 = seed + PRIME64_1 + PRIME64_2;
-        U64 v2 = seed + PRIME64_2;
-        U64 v3 = seed + 0;
-        U64 v4 = seed - PRIME64_1;
-
-        do {
-            v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;
-            v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;
-            v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;
-            v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;
-        } while (p<=limit);
-
-        h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
-        h64 = XXH64_mergeRound(h64, v1);
-        h64 = XXH64_mergeRound(h64, v2);
-        h64 = XXH64_mergeRound(h64, v3);
-        h64 = XXH64_mergeRound(h64, v4);
-
-    } else {
-        h64  = seed + PRIME64_5;
-    }
-
-    h64 += (U64) len;
-
-    while (p+8<=bEnd) {
-        U64 const k1 = XXH64_round(0, XXH_get64bits(p));
-        h64 ^= k1;
-        h64  = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
-        p+=8;
-    }
-
-    if (p+4<=bEnd) {
-        h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
-        h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
-        p+=4;
-    }
-
-    while (p<bEnd) {
-        h64 ^= (*p) * PRIME64_5;
-        h64 = XXH_rotl64(h64, 11) * PRIME64_1;
-        p++;
-    }
-
-    h64 ^= h64 >> 33;
-    h64 *= PRIME64_2;
-    h64 ^= h64 >> 29;
-    h64 *= PRIME64_3;
-    h64 ^= h64 >> 32;
-
-    return h64;
-}
-
-
-XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
-{
-#if 0
-    /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
-    XXH64_CREATESTATE_STATIC(state);
-    XXH64_reset(state, seed);
-    XXH64_update(state, input, len);
-    return XXH64_digest(state);
-#else
-    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
-
-    if (XXH_FORCE_ALIGN_CHECK) {
-        if ((((size_t)input) & 7)==0) {  /* Input is aligned, let's leverage the speed advantage */
-            if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
-                return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
-            else
-                return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
-    }   }
-
-    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
-        return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
-    else
-        return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
-#endif
-}
-
-
-/* **************************************************
-*  Advanced Hash Functions
-****************************************************/
-
-XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
-{
-    return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
-}
-XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
-{
-    XXH_free(statePtr);
-    return XXH_OK;
-}
-
-XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
-{
-    return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
-}
-XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
-{
-    XXH_free(statePtr);
-    return XXH_OK;
-}
-
-
-/*** Hash feed ***/
-
-XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
-{
-    XXH32_state_t state;   /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
-    memset(&state, 0, sizeof(state)-4);   /* do not write into reserved, for future removal */
-    state.v1 = seed + PRIME32_1 + PRIME32_2;
-    state.v2 = seed + PRIME32_2;
-    state.v3 = seed + 0;
-    state.v4 = seed - PRIME32_1;
-    memcpy(statePtr, &state, sizeof(state));
-    return XXH_OK;
-}
-
-
-XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
-{
-    XXH64_state_t state;   /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
-    memset(&state, 0, sizeof(state)-8);   /* do not write into reserved, for future removal */
-    state.v1 = seed + PRIME64_1 + PRIME64_2;
-    state.v2 = seed + PRIME64_2;
-    state.v3 = seed + 0;
-    state.v4 = seed - PRIME64_1;
-    memcpy(statePtr, &state, sizeof(state));
-    return XXH_OK;
-}
-
-
-FORCE_INLINE_TEMPLATE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
-{
-    const BYTE* p = (const BYTE*)input;
-    const BYTE* const bEnd = p + len;
-
-#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
-    if (input==NULL) return XXH_ERROR;
-#endif
-
-    state->total_len_32 += (unsigned)len;
-    state->large_len |= (len>=16) | (state->total_len_32>=16);
-
-    if (state->memsize + len < 16)  {   /* fill in tmp buffer */
-        XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
-        state->memsize += (unsigned)len;
-        return XXH_OK;
-    }
-
-    if (state->memsize) {   /* some data left from previous update */
-        XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
-        {   const U32* p32 = state->mem32;
-            state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++;
-            state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;
-            state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;
-            state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++;
-        }
-        p += 16-state->memsize;
-        state->memsize = 0;
-    }
-
-    if (p <= bEnd-16) {
-        const BYTE* const limit = bEnd - 16;
-        U32 v1 = state->v1;
-        U32 v2 = state->v2;
-        U32 v3 = state->v3;
-        U32 v4 = state->v4;
-
-        do {
-            v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;
-            v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;
-            v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;
-            v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;
-        } while (p<=limit);
-
-        state->v1 = v1;
-        state->v2 = v2;
-        state->v3 = v3;
-        state->v4 = v4;
-    }
-
-    if (p < bEnd) {
-        XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
-        state->memsize = (unsigned)(bEnd-p);
-    }
-
-    return XXH_OK;
-}
-
-XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
-{
-    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
-
-    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
-        return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
-    else
-        return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
-}
-
-
-
-FORCE_INLINE_TEMPLATE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
-{
-    const BYTE * p = (const BYTE*)state->mem32;
-    const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize;
-    U32 h32;
-
-    if (state->large_len) {
-        h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
-    } else {
-        h32 = state->v3 /* == seed */ + PRIME32_5;
-    }
-
-    h32 += state->total_len_32;
-
-    while (p+4<=bEnd) {
-        h32 += XXH_readLE32(p, endian) * PRIME32_3;
-        h32  = XXH_rotl32(h32, 17) * PRIME32_4;
-        p+=4;
-    }
-
-    while (p<bEnd) {
-        h32 += (*p) * PRIME32_5;
-        h32  = XXH_rotl32(h32, 11) * PRIME32_1;
-        p++;
-    }
-
-    h32 ^= h32 >> 15;
-    h32 *= PRIME32_2;
-    h32 ^= h32 >> 13;
-    h32 *= PRIME32_3;
-    h32 ^= h32 >> 16;
-
-    return h32;
-}
-
-
-XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
-{
-    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
-
-    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
-        return XXH32_digest_endian(state_in, XXH_littleEndian);
-    else
-        return XXH32_digest_endian(state_in, XXH_bigEndian);
-}
-
-
-
-/* **** XXH64 **** */
-
-FORCE_INLINE_TEMPLATE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
-{
-    const BYTE* p = (const BYTE*)input;
-    const BYTE* const bEnd = p + len;
-
-#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
-    if (input==NULL) return XXH_ERROR;
-#endif
-
-    state->total_len += len;
-
-    if (state->memsize + len < 32) {  /* fill in tmp buffer */
-        XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
-        state->memsize += (U32)len;
-        return XXH_OK;
-    }
-
-    if (state->memsize) {   /* tmp buffer is full */
-        XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
-        state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));
-        state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));
-        state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));
-        state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));
-        p += 32-state->memsize;
-        state->memsize = 0;
-    }
-
-    if (p+32 <= bEnd) {
-        const BYTE* const limit = bEnd - 32;
-        U64 v1 = state->v1;
-        U64 v2 = state->v2;
-        U64 v3 = state->v3;
-        U64 v4 = state->v4;
-
-        do {
-            v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;
-            v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;
-            v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;
-            v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;
-        } while (p<=limit);
-
-        state->v1 = v1;
-        state->v2 = v2;
-        state->v3 = v3;
-        state->v4 = v4;
-    }
-
-    if (p < bEnd) {
-        XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
-        state->memsize = (unsigned)(bEnd-p);
-    }
-
-    return XXH_OK;
-}
-
-XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
-{
-    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
-
-    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
-        return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
-    else
-        return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
-}
-
-
-
-FORCE_INLINE_TEMPLATE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
-{
-    const BYTE * p = (const BYTE*)state->mem64;
-    const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize;
-    U64 h64;
-
-    if (state->total_len >= 32) {
-        U64 const v1 = state->v1;
-        U64 const v2 = state->v2;
-        U64 const v3 = state->v3;
-        U64 const v4 = state->v4;
-
-        h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
-        h64 = XXH64_mergeRound(h64, v1);
-        h64 = XXH64_mergeRound(h64, v2);
-        h64 = XXH64_mergeRound(h64, v3);
-        h64 = XXH64_mergeRound(h64, v4);
-    } else {
-        h64  = state->v3 + PRIME64_5;
-    }
-
-    h64 += (U64) state->total_len;
-
-    while (p+8<=bEnd) {
-        U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian));
-        h64 ^= k1;
-        h64  = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
-        p+=8;
-    }
-
-    if (p+4<=bEnd) {
-        h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
-        h64  = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
-        p+=4;
-    }
-
-    while (p<bEnd) {
-        h64 ^= (*p) * PRIME64_5;
-        h64  = XXH_rotl64(h64, 11) * PRIME64_1;
-        p++;
-    }
-
-    h64 ^= h64 >> 33;
-    h64 *= PRIME64_2;
-    h64 ^= h64 >> 29;
-    h64 *= PRIME64_3;
-    h64 ^= h64 >> 32;
-
-    return h64;
-}
-
-
-XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)
-{
-    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
-
-    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
-        return XXH64_digest_endian(state_in, XXH_littleEndian);
-    else
-        return XXH64_digest_endian(state_in, XXH_bigEndian);
-}
-
-
-/* **************************
-*  Canonical representation
-****************************/
-
-/*! Default XXH result types are basic unsigned 32 and 64 bits.
-*   The canonical representation follows human-readable write convention, aka big-endian (large digits first).
-*   These functions allow transformation of hash result into and from its canonical format.
-*   This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs.
-*/
-
-XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
-{
-    XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
-    if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
-    memcpy(dst, &hash, sizeof(*dst));
-}
-
-XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
-{
-    XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
-    if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
-    memcpy(dst, &hash, sizeof(*dst));
-}
-
-XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
-{
-    return XXH_readBE32(src);
-}
-
-XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
-{
-    return XXH_readBE64(src);
-}
diff --git a/vendor/github.com/DataDog/zstd/xxhash.h b/vendor/github.com/DataDog/zstd/xxhash.h
deleted file mode 100644
index 9bad1f5..0000000
--- a/vendor/github.com/DataDog/zstd/xxhash.h
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
-   xxHash - Extremely Fast Hash algorithm
-   Header File
-   Copyright (C) 2012-2016, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - xxHash source repository : https://github.com/Cyan4973/xxHash
-*/
-
-/* Notice extracted from xxHash homepage :
-
-xxHash is an extremely fast Hash algorithm, running at RAM speed limits.
-It also successfully passes all tests from the SMHasher suite.
-
-Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
-
-Name            Speed       Q.Score   Author
-xxHash          5.4 GB/s     10
-CrapWow         3.2 GB/s      2       Andrew
-MumurHash 3a    2.7 GB/s     10       Austin Appleby
-SpookyHash      2.0 GB/s     10       Bob Jenkins
-SBox            1.4 GB/s      9       Bret Mulvey
-Lookup3         1.2 GB/s      9       Bob Jenkins
-SuperFastHash   1.2 GB/s      1       Paul Hsieh
-CityHash64      1.05 GB/s    10       Pike & Alakuijala
-FNV             0.55 GB/s     5       Fowler, Noll, Vo
-CRC32           0.43 GB/s     9
-MD5-32          0.33 GB/s    10       Ronald L. Rivest
-SHA1-32         0.28 GB/s    10
-
-Q.Score is a measure of quality of the hash function.
-It depends on successfully passing SMHasher test set.
-10 is a perfect score.
-
-A 64-bits version, named XXH64, is available since r35.
-It offers much better speed, but for 64-bits applications only.
-Name     Speed on 64 bits    Speed on 32 bits
-XXH64       13.8 GB/s            1.9 GB/s
-XXH32        6.8 GB/s            6.0 GB/s
-*/
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-#ifndef XXHASH_H_5627135585666179
-#define XXHASH_H_5627135585666179 1
-
-
-/* ****************************
-*  Definitions
-******************************/
-#include <stddef.h>   /* size_t */
-typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
-
-
-/* ****************************
-*  API modifier
-******************************/
-/** XXH_PRIVATE_API
-*   This is useful if you want to include xxhash functions in `static` mode
-*   in order to inline them, and remove their symbol from the public list.
-*   Methodology :
-*     #define XXH_PRIVATE_API
-*     #include "xxhash.h"
-*   `xxhash.c` is automatically included.
-*   It's not useful to compile and link it as a separate module anymore.
-*/
-#ifdef XXH_PRIVATE_API
-#  ifndef XXH_STATIC_LINKING_ONLY
-#    define XXH_STATIC_LINKING_ONLY
-#  endif
-#  if defined(__GNUC__)
-#    define XXH_PUBLIC_API static __inline __attribute__((unused))
-#  elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-#    define XXH_PUBLIC_API static inline
-#  elif defined(_MSC_VER)
-#    define XXH_PUBLIC_API static __inline
-#  else
-#    define XXH_PUBLIC_API static   /* this version may generate warnings for unused static functions; disable the relevant warning */
-#  endif
-#else
-#  define XXH_PUBLIC_API   /* do nothing */
-#endif /* XXH_PRIVATE_API */
-
-/*!XXH_NAMESPACE, aka Namespace Emulation :
-
-If you want to include _and expose_ xxHash functions from within your own library,
-but also want to avoid symbol collisions with another library which also includes xxHash,
-
-you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library
-with the value of XXH_NAMESPACE (so avoid to keep it NULL and avoid numeric values).
-
-Note that no change is required within the calling program as long as it includes `xxhash.h` :
-regular symbol name will be automatically translated by this header.
-*/
-#ifdef XXH_NAMESPACE
-#  define XXH_CAT(A,B) A##B
-#  define XXH_NAME2(A,B) XXH_CAT(A,B)
-#  define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
-#  define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
-#  define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
-#  define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
-#  define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
-#  define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
-#  define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
-#  define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
-#  define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
-#  define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
-#  define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
-#  define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
-#  define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
-#  define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
-#  define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
-#  define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
-#  define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
-#  define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
-#  define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
-#endif
-
-
-/* *************************************
-*  Version
-***************************************/
-#define XXH_VERSION_MAJOR    0
-#define XXH_VERSION_MINOR    6
-#define XXH_VERSION_RELEASE  2
-#define XXH_VERSION_NUMBER  (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
-XXH_PUBLIC_API unsigned XXH_versionNumber (void);
-
-
-/* ****************************
-*  Simple Hash Functions
-******************************/
-typedef unsigned int       XXH32_hash_t;
-typedef unsigned long long XXH64_hash_t;
-
-XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed);
-XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed);
-
-/*!
-XXH32() :
-    Calculate the 32-bits hash of sequence "length" bytes stored at memory address "input".
-    The memory between input & input+length must be valid (allocated and read-accessible).
-    "seed" can be used to alter the result predictably.
-    Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s
-XXH64() :
-    Calculate the 64-bits hash of sequence of length "len" stored at memory address "input".
-    "seed" can be used to alter the result predictably.
-    This function runs 2x faster on 64-bits systems, but slower on 32-bits systems (see benchmark).
-*/
-
-
-/* ****************************
-*  Streaming Hash Functions
-******************************/
-typedef struct XXH32_state_s XXH32_state_t;   /* incomplete type */
-typedef struct XXH64_state_s XXH64_state_t;   /* incomplete type */
-
-/*! State allocation, compatible with dynamic libraries */
-
-XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void);
-XXH_PUBLIC_API XXH_errorcode  XXH32_freeState(XXH32_state_t* statePtr);
-
-XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);
-XXH_PUBLIC_API XXH_errorcode  XXH64_freeState(XXH64_state_t* statePtr);
-
-
-/* hash streaming */
-
-XXH_PUBLIC_API XXH_errorcode XXH32_reset  (XXH32_state_t* statePtr, unsigned int seed);
-XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
-XXH_PUBLIC_API XXH32_hash_t  XXH32_digest (const XXH32_state_t* statePtr);
-
-XXH_PUBLIC_API XXH_errorcode XXH64_reset  (XXH64_state_t* statePtr, unsigned long long seed);
-XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
-XXH_PUBLIC_API XXH64_hash_t  XXH64_digest (const XXH64_state_t* statePtr);
-
-/*
-These functions generate the xxHash of an input provided in multiple segments.
-Note that, for small input, they are slower than single-call functions, due to state management.
-For small input, prefer `XXH32()` and `XXH64()` .
-
-XXH state must first be allocated, using XXH*_createState() .
-
-Start a new hash by initializing state with a seed, using XXH*_reset().
-
-Then, feed the hash state by calling XXH*_update() as many times as necessary.
-Obviously, input must be allocated and read accessible.
-The function returns an error code, with 0 meaning OK, and any other value meaning there is an error.
-
-Finally, a hash value can be produced anytime, by using XXH*_digest().
-This function returns the nn-bits hash as an int or long long.
-
-It's still possible to continue inserting input into the hash state after a digest,
-and generate some new hashes later on, by calling again XXH*_digest().
-
-When done, free XXH state space if it was allocated dynamically.
-*/
-
-
-/* **************************
-*  Utils
-****************************/
-#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L))   /* ! C99 */
-#  define restrict   /* disable restrict */
-#endif
-
-XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dst_state, const XXH32_state_t* restrict src_state);
-XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dst_state, const XXH64_state_t* restrict src_state);
-
-
-/* **************************
-*  Canonical representation
-****************************/
-/* Default result type for XXH functions are primitive unsigned 32 and 64 bits.
-*  The canonical representation uses human-readable write convention, aka big-endian (large digits first).
-*  These functions allow transformation of hash result into and from its canonical format.
-*  This way, hash values can be written into a file / memory, and remain comparable on different systems and programs.
-*/
-typedef struct { unsigned char digest[4]; } XXH32_canonical_t;
-typedef struct { unsigned char digest[8]; } XXH64_canonical_t;
-
-XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
-XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
-
-XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
-XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
-
-#endif /* XXHASH_H_5627135585666179 */
-
-
-
-/* ================================================================================================
-   This section contains definitions which are not guaranteed to remain stable.
-   They may change in future versions, becoming incompatible with a different version of the library.
-   They shall only be used with static linking.
-   Never use these definitions in association with dynamic linking !
-=================================================================================================== */
-#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXH_STATIC_H_3543687687345)
-#define XXH_STATIC_H_3543687687345
-
-/* These definitions are only meant to allow allocation of XXH state
-   statically, on stack, or in a struct for example.
-   Do not use members directly. */
-
-   struct XXH32_state_s {
-       unsigned total_len_32;
-       unsigned large_len;
-       unsigned v1;
-       unsigned v2;
-       unsigned v3;
-       unsigned v4;
-       unsigned mem32[4];   /* buffer defined as U32 for alignment */
-       unsigned memsize;
-       unsigned reserved;   /* never read nor write, will be removed in a future version */
-   };   /* typedef'd to XXH32_state_t */
-
-   struct XXH64_state_s {
-       unsigned long long total_len;
-       unsigned long long v1;
-       unsigned long long v2;
-       unsigned long long v3;
-       unsigned long long v4;
-       unsigned long long mem64[4];   /* buffer defined as U64 for alignment */
-       unsigned memsize;
-       unsigned reserved[2];          /* never read nor write, will be removed in a future version */
-   };   /* typedef'd to XXH64_state_t */
-
-
-#  ifdef XXH_PRIVATE_API
-#    include "xxhash.c"   /* include xxhash functions as `static`, for inlining */
-#  endif
-
-#endif /* XXH_STATIC_LINKING_ONLY && XXH_STATIC_H_3543687687345 */
-
-
-#if defined (__cplusplus)
-}
-#endif
diff --git a/vendor/github.com/DataDog/zstd/zbuff.h b/vendor/github.com/DataDog/zstd/zbuff.h
deleted file mode 100644
index a93115d..0000000
--- a/vendor/github.com/DataDog/zstd/zbuff.h
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-/* ***************************************************************
-*  NOTES/WARNINGS
-******************************************************************/
-/* The streaming API defined here is deprecated.
- * Consider migrating towards ZSTD_compressStream() API in `zstd.h`
- * See 'lib/README.md'.
- *****************************************************************/
-
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-#ifndef ZSTD_BUFFERED_H_23987
-#define ZSTD_BUFFERED_H_23987
-
-/* *************************************
-*  Dependencies
-***************************************/
-#include <stddef.h>      /* size_t */
-#include "zstd.h"        /* ZSTD_CStream, ZSTD_DStream, ZSTDLIB_API */
-
-
-/* ***************************************************************
-*  Compiler specifics
-*****************************************************************/
-/* Deprecation warnings */
-/* Should these warnings be a problem,
-   it is generally possible to disable them,
-   typically with -Wno-deprecated-declarations for gcc
-   or _CRT_SECURE_NO_WARNINGS in Visual.
-   Otherwise, it's also possible to define ZBUFF_DISABLE_DEPRECATE_WARNINGS */
-#ifdef ZBUFF_DISABLE_DEPRECATE_WARNINGS
-#  define ZBUFF_DEPRECATED(message) ZSTDLIB_API  /* disable deprecation warnings */
-#else
-#  if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
-#    define ZBUFF_DEPRECATED(message) [[deprecated(message)]] ZSTDLIB_API
-#  elif (defined(__GNUC__) && (__GNUC__ >= 5)) || defined(__clang__)
-#    define ZBUFF_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated(message)))
-#  elif defined(__GNUC__) && (__GNUC__ >= 3)
-#    define ZBUFF_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated))
-#  elif defined(_MSC_VER)
-#    define ZBUFF_DEPRECATED(message) ZSTDLIB_API __declspec(deprecated(message))
-#  else
-#    pragma message("WARNING: You need to implement ZBUFF_DEPRECATED for this compiler")
-#    define ZBUFF_DEPRECATED(message) ZSTDLIB_API
-#  endif
-#endif /* ZBUFF_DISABLE_DEPRECATE_WARNINGS */
-
-
-/* *************************************
-*  Streaming functions
-***************************************/
-/* This is the easier "buffered" streaming API,
-*  using an internal buffer to lift all restrictions on user-provided buffers
-*  which can be any size, any place, for both input and output.
-*  ZBUFF and ZSTD are 100% interoperable,
-*  frames created by one can be decoded by the other one */
-
-typedef ZSTD_CStream ZBUFF_CCtx;
-ZBUFF_DEPRECATED("use ZSTD_createCStream") ZBUFF_CCtx* ZBUFF_createCCtx(void);
-ZBUFF_DEPRECATED("use ZSTD_freeCStream")   size_t      ZBUFF_freeCCtx(ZBUFF_CCtx* cctx);
-
-ZBUFF_DEPRECATED("use ZSTD_initCStream")           size_t ZBUFF_compressInit(ZBUFF_CCtx* cctx, int compressionLevel);
-ZBUFF_DEPRECATED("use ZSTD_initCStream_usingDict") size_t ZBUFF_compressInitDictionary(ZBUFF_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
-
-ZBUFF_DEPRECATED("use ZSTD_compressStream") size_t ZBUFF_compressContinue(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr, const void* src, size_t* srcSizePtr);
-ZBUFF_DEPRECATED("use ZSTD_flushStream")    size_t ZBUFF_compressFlush(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr);
-ZBUFF_DEPRECATED("use ZSTD_endStream")      size_t ZBUFF_compressEnd(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr);
-
-/*-*************************************************
-*  Streaming compression - howto
-*
-*  A ZBUFF_CCtx object is required to track streaming operation.
-*  Use ZBUFF_createCCtx() and ZBUFF_freeCCtx() to create/release resources.
-*  ZBUFF_CCtx objects can be reused multiple times.
-*
-*  Start by initializing ZBUF_CCtx.
-*  Use ZBUFF_compressInit() to start a new compression operation.
-*  Use ZBUFF_compressInitDictionary() for a compression which requires a dictionary.
-*
-*  Use ZBUFF_compressContinue() repetitively to consume input stream.
-*  *srcSizePtr and *dstCapacityPtr can be any size.
-*  The function will report how many bytes were read or written within *srcSizePtr and *dstCapacityPtr.
-*  Note that it may not consume the entire input, in which case it's up to the caller to present again remaining data.
-*  The content of `dst` will be overwritten (up to *dstCapacityPtr) at each call, so save its content if it matters or change @dst .
-*  @return : a hint to preferred nb of bytes to use as input for next function call (it's just a hint, to improve latency)
-*            or an error code, which can be tested using ZBUFF_isError().
-*
-*  At any moment, it's possible to flush whatever data remains within buffer, using ZBUFF_compressFlush().
-*  The nb of bytes written into `dst` will be reported into *dstCapacityPtr.
-*  Note that the function cannot output more than *dstCapacityPtr,
-*  therefore, some content might still be left into internal buffer if *dstCapacityPtr is too small.
-*  @return : nb of bytes still present into internal buffer (0 if it's empty)
-*            or an error code, which can be tested using ZBUFF_isError().
-*
-*  ZBUFF_compressEnd() instructs to finish a frame.
-*  It will perform a flush and write frame epilogue.
-*  The epilogue is required for decoders to consider a frame completed.
-*  Similar to ZBUFF_compressFlush(), it may not be able to output the entire internal buffer content if *dstCapacityPtr is too small.
-*  In which case, call again ZBUFF_compressFlush() to complete the flush.
-*  @return : nb of bytes still present into internal buffer (0 if it's empty)
-*            or an error code, which can be tested using ZBUFF_isError().
-*
-*  Hint : _recommended buffer_ sizes (not compulsory) : ZBUFF_recommendedCInSize() / ZBUFF_recommendedCOutSize()
-*  input : ZBUFF_recommendedCInSize==128 KB block size is the internal unit, use this value to reduce intermediate stages (better latency)
-*  output : ZBUFF_recommendedCOutSize==ZSTD_compressBound(128 KB) + 3 + 3 : ensures it's always possible to write/flush/end a full block. Skip some buffering.
-*  By using both, it ensures that input will be entirely consumed, and output will always contain the result, reducing intermediate buffering.
-* **************************************************/
-
-
-typedef ZSTD_DStream ZBUFF_DCtx;
-ZBUFF_DEPRECATED("use ZSTD_createDStream") ZBUFF_DCtx* ZBUFF_createDCtx(void);
-ZBUFF_DEPRECATED("use ZSTD_freeDStream")   size_t      ZBUFF_freeDCtx(ZBUFF_DCtx* dctx);
-
-ZBUFF_DEPRECATED("use ZSTD_initDStream")           size_t ZBUFF_decompressInit(ZBUFF_DCtx* dctx);
-ZBUFF_DEPRECATED("use ZSTD_initDStream_usingDict") size_t ZBUFF_decompressInitDictionary(ZBUFF_DCtx* dctx, const void* dict, size_t dictSize);
-
-ZBUFF_DEPRECATED("use ZSTD_decompressStream") size_t ZBUFF_decompressContinue(ZBUFF_DCtx* dctx,
-                                            void* dst, size_t* dstCapacityPtr,
-                                      const void* src, size_t* srcSizePtr);
-
-/*-***************************************************************************
-*  Streaming decompression howto
-*
-*  A ZBUFF_DCtx object is required to track streaming operations.
-*  Use ZBUFF_createDCtx() and ZBUFF_freeDCtx() to create/release resources.
-*  Use ZBUFF_decompressInit() to start a new decompression operation,
-*   or ZBUFF_decompressInitDictionary() if decompression requires a dictionary.
-*  Note that ZBUFF_DCtx objects can be re-init multiple times.
-*
-*  Use ZBUFF_decompressContinue() repetitively to consume your input.
-*  *srcSizePtr and *dstCapacityPtr can be any size.
-*  The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
-*  Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
-*  The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`.
-*  @return : 0 when a frame is completely decoded and fully flushed,
-*            1 when there is still some data left within internal buffer to flush,
-*            >1 when more data is expected, with value being a suggested next input size (it's just a hint, which helps latency),
-*            or an error code, which can be tested using ZBUFF_isError().
-*
-*  Hint : recommended buffer sizes (not compulsory) : ZBUFF_recommendedDInSize() and ZBUFF_recommendedDOutSize()
-*  output : ZBUFF_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
-*  input  : ZBUFF_recommendedDInSize == 128KB + 3;
-*           just follow indications from ZBUFF_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
-* *******************************************************************************/
-
-
-/* *************************************
-*  Tool functions
-***************************************/
-ZBUFF_DEPRECATED("use ZSTD_isError")      unsigned ZBUFF_isError(size_t errorCode);
-ZBUFF_DEPRECATED("use ZSTD_getErrorName") const char* ZBUFF_getErrorName(size_t errorCode);
-
-/** Functions below provide recommended buffer sizes for Compression or Decompression operations.
-*   These sizes are just hints, they tend to offer better latency */
-ZBUFF_DEPRECATED("use ZSTD_CStreamInSize")  size_t ZBUFF_recommendedCInSize(void);
-ZBUFF_DEPRECATED("use ZSTD_CStreamOutSize") size_t ZBUFF_recommendedCOutSize(void);
-ZBUFF_DEPRECATED("use ZSTD_DStreamInSize")  size_t ZBUFF_recommendedDInSize(void);
-ZBUFF_DEPRECATED("use ZSTD_DStreamOutSize") size_t ZBUFF_recommendedDOutSize(void);
-
-#endif  /* ZSTD_BUFFERED_H_23987 */
-
-
-#ifdef ZBUFF_STATIC_LINKING_ONLY
-#ifndef ZBUFF_STATIC_H_30298098432
-#define ZBUFF_STATIC_H_30298098432
-
-/* ====================================================================================
- * The definitions in this section are considered experimental.
- * They should never be used in association with a dynamic library, as they may change in the future.
- * They are provided for advanced usages.
- * Use them only in association with static linking.
- * ==================================================================================== */
-
-/*--- Dependency ---*/
-#define ZSTD_STATIC_LINKING_ONLY   /* ZSTD_parameters, ZSTD_customMem */
-#include "zstd.h"
-
-
-/*--- Custom memory allocator ---*/
-/*! ZBUFF_createCCtx_advanced() :
- *  Create a ZBUFF compression context using external alloc and free functions */
-ZBUFF_DEPRECATED("use ZSTD_createCStream_advanced") ZBUFF_CCtx* ZBUFF_createCCtx_advanced(ZSTD_customMem customMem);
-
-/*! ZBUFF_createDCtx_advanced() :
- *  Create a ZBUFF decompression context using external alloc and free functions */
-ZBUFF_DEPRECATED("use ZSTD_createDStream_advanced") ZBUFF_DCtx* ZBUFF_createDCtx_advanced(ZSTD_customMem customMem);
-
-
-/*--- Advanced Streaming Initialization ---*/
-ZBUFF_DEPRECATED("use ZSTD_initDStream_usingDict") size_t ZBUFF_compressInit_advanced(ZBUFF_CCtx* zbc,
-                                               const void* dict, size_t dictSize,
-                                               ZSTD_parameters params, unsigned long long pledgedSrcSize);
-
-
-#endif    /* ZBUFF_STATIC_H_30298098432 */
-#endif    /* ZBUFF_STATIC_LINKING_ONLY */
-
-
-#if defined (__cplusplus)
-}
-#endif
diff --git a/vendor/github.com/DataDog/zstd/zbuff_common.c b/vendor/github.com/DataDog/zstd/zbuff_common.c
deleted file mode 100644
index 661b9b0..0000000
--- a/vendor/github.com/DataDog/zstd/zbuff_common.c
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-/*-*************************************
-*  Dependencies
-***************************************/
-#include "error_private.h"
-#include "zbuff.h"
-
-/*-****************************************
-*  ZBUFF Error Management  (deprecated)
-******************************************/
-
-/*! ZBUFF_isError() :
-*   tells if a return value is an error code */
-unsigned ZBUFF_isError(size_t errorCode) { return ERR_isError(errorCode); }
-/*! ZBUFF_getErrorName() :
-*   provides error code string from function result (useful for debugging) */
-const char* ZBUFF_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
diff --git a/vendor/github.com/DataDog/zstd/zbuff_compress.c b/vendor/github.com/DataDog/zstd/zbuff_compress.c
deleted file mode 100644
index f39c60d..0000000
--- a/vendor/github.com/DataDog/zstd/zbuff_compress.c
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-
-
-/* *************************************
-*  Dependencies
-***************************************/
-#define ZBUFF_STATIC_LINKING_ONLY
-#include "zbuff.h"
-
-
-/*-***********************************************************
-*  Streaming compression
-*
-*  A ZBUFF_CCtx object is required to track streaming operation.
-*  Use ZBUFF_createCCtx() and ZBUFF_freeCCtx() to create/release resources.
-*  Use ZBUFF_compressInit() to start a new compression operation.
-*  ZBUFF_CCtx objects can be reused multiple times.
-*
-*  Use ZBUFF_compressContinue() repetitively to consume your input.
-*  *srcSizePtr and *dstCapacityPtr can be any size.
-*  The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
-*  Note that it may not consume the entire input, in which case it's up to the caller to call again the function with remaining input.
-*  The content of dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters or change dst .
-*  @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency)
-*            or an error code, which can be tested using ZBUFF_isError().
-*
-*  ZBUFF_compressFlush() can be used to instruct ZBUFF to compress and output whatever remains within its buffer.
-*  Note that it will not output more than *dstCapacityPtr.
-*  Therefore, some content might still be left into its internal buffer if dst buffer is too small.
-*  @return : nb of bytes still present into internal buffer (0 if it's empty)
-*            or an error code, which can be tested using ZBUFF_isError().
-*
-*  ZBUFF_compressEnd() instructs to finish a frame.
-*  It will perform a flush and write frame epilogue.
-*  Similar to ZBUFF_compressFlush(), it may not be able to output the entire internal buffer content if *dstCapacityPtr is too small.
-*  @return : nb of bytes still present into internal buffer (0 if it's empty)
-*            or an error code, which can be tested using ZBUFF_isError().
-*
-*  Hint : recommended buffer sizes (not compulsory)
-*  input : ZSTD_BLOCKSIZE_MAX (128 KB), internal unit size, it improves latency to use this value.
-*  output : ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + ZBUFF_endFrameSize : ensures it's always possible to write/flush/end a full block at best speed.
-* ***********************************************************/
-
-ZBUFF_CCtx* ZBUFF_createCCtx(void)
-{
-    return ZSTD_createCStream();
-}
-
-ZBUFF_CCtx* ZBUFF_createCCtx_advanced(ZSTD_customMem customMem)
-{
-    return ZSTD_createCStream_advanced(customMem);
-}
-
-size_t ZBUFF_freeCCtx(ZBUFF_CCtx* zbc)
-{
-    return ZSTD_freeCStream(zbc);
-}
-
-
-/* ======   Initialization   ====== */
-
-size_t ZBUFF_compressInit_advanced(ZBUFF_CCtx* zbc,
-                                   const void* dict, size_t dictSize,
-                                   ZSTD_parameters params, unsigned long long pledgedSrcSize)
-{
-    if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;  /* preserve "0 == unknown" behavior */
-    return ZSTD_initCStream_advanced(zbc, dict, dictSize, params, pledgedSrcSize);
-}
-
-
-size_t ZBUFF_compressInitDictionary(ZBUFF_CCtx* zbc, const void* dict, size_t dictSize, int compressionLevel)
-{
-    return ZSTD_initCStream_usingDict(zbc, dict, dictSize, compressionLevel);
-}
-
-size_t ZBUFF_compressInit(ZBUFF_CCtx* zbc, int compressionLevel)
-{
-    return ZSTD_initCStream(zbc, compressionLevel);
-}
-
-/* ======   Compression   ====== */
-
-
-size_t ZBUFF_compressContinue(ZBUFF_CCtx* zbc,
-                              void* dst, size_t* dstCapacityPtr,
-                        const void* src, size_t* srcSizePtr)
-{
-    size_t result;
-    ZSTD_outBuffer outBuff;
-    ZSTD_inBuffer inBuff;
-    outBuff.dst = dst;
-    outBuff.pos = 0;
-    outBuff.size = *dstCapacityPtr;
-    inBuff.src = src;
-    inBuff.pos = 0;
-    inBuff.size = *srcSizePtr;
-    result = ZSTD_compressStream(zbc, &outBuff, &inBuff);
-    *dstCapacityPtr = outBuff.pos;
-    *srcSizePtr = inBuff.pos;
-    return result;
-}
-
-
-
-/* ======   Finalize   ====== */
-
-size_t ZBUFF_compressFlush(ZBUFF_CCtx* zbc, void* dst, size_t* dstCapacityPtr)
-{
-    size_t result;
-    ZSTD_outBuffer outBuff;
-    outBuff.dst = dst;
-    outBuff.pos = 0;
-    outBuff.size = *dstCapacityPtr;
-    result = ZSTD_flushStream(zbc, &outBuff);
-    *dstCapacityPtr = outBuff.pos;
-    return result;
-}
-
-
-size_t ZBUFF_compressEnd(ZBUFF_CCtx* zbc, void* dst, size_t* dstCapacityPtr)
-{
-    size_t result;
-    ZSTD_outBuffer outBuff;
-    outBuff.dst = dst;
-    outBuff.pos = 0;
-    outBuff.size = *dstCapacityPtr;
-    result = ZSTD_endStream(zbc, &outBuff);
-    *dstCapacityPtr = outBuff.pos;
-    return result;
-}
-
-
-
-/* *************************************
-*  Tool functions
-***************************************/
-size_t ZBUFF_recommendedCInSize(void)  { return ZSTD_CStreamInSize(); }
-size_t ZBUFF_recommendedCOutSize(void) { return ZSTD_CStreamOutSize(); }
diff --git a/vendor/github.com/DataDog/zstd/zbuff_decompress.c b/vendor/github.com/DataDog/zstd/zbuff_decompress.c
deleted file mode 100644
index 923c22b..0000000
--- a/vendor/github.com/DataDog/zstd/zbuff_decompress.c
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-
-
-/* *************************************
-*  Dependencies
-***************************************/
-#define ZBUFF_STATIC_LINKING_ONLY
-#include "zbuff.h"
-
-
-ZBUFF_DCtx* ZBUFF_createDCtx(void)
-{
-    return ZSTD_createDStream();
-}
-
-ZBUFF_DCtx* ZBUFF_createDCtx_advanced(ZSTD_customMem customMem)
-{
-    return ZSTD_createDStream_advanced(customMem);
-}
-
-size_t ZBUFF_freeDCtx(ZBUFF_DCtx* zbd)
-{
-    return ZSTD_freeDStream(zbd);
-}
-
-
-/* *** Initialization *** */
-
-size_t ZBUFF_decompressInitDictionary(ZBUFF_DCtx* zbd, const void* dict, size_t dictSize)
-{
-    return ZSTD_initDStream_usingDict(zbd, dict, dictSize);
-}
-
-size_t ZBUFF_decompressInit(ZBUFF_DCtx* zbd)
-{
-    return ZSTD_initDStream(zbd);
-}
-
-
-/* *** Decompression *** */
-
-size_t ZBUFF_decompressContinue(ZBUFF_DCtx* zbd,
-                                void* dst, size_t* dstCapacityPtr,
-                          const void* src, size_t* srcSizePtr)
-{
-    ZSTD_outBuffer outBuff;
-    ZSTD_inBuffer inBuff;
-    size_t result;
-    outBuff.dst  = dst;
-    outBuff.pos  = 0;
-    outBuff.size = *dstCapacityPtr;
-    inBuff.src  = src;
-    inBuff.pos  = 0;
-    inBuff.size = *srcSizePtr;
-    result = ZSTD_decompressStream(zbd, &outBuff, &inBuff);
-    *dstCapacityPtr = outBuff.pos;
-    *srcSizePtr = inBuff.pos;
-    return result;
-}
-
-
-/* *************************************
-*  Tool functions
-***************************************/
-size_t ZBUFF_recommendedDInSize(void)  { return ZSTD_DStreamInSize(); }
-size_t ZBUFF_recommendedDOutSize(void) { return ZSTD_DStreamOutSize(); }
diff --git a/vendor/github.com/DataDog/zstd/zdict.c b/vendor/github.com/DataDog/zstd/zdict.c
deleted file mode 100644
index ee21ee1..0000000
--- a/vendor/github.com/DataDog/zstd/zdict.c
+++ /dev/null
@@ -1,1111 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-
-/*-**************************************
-*  Tuning parameters
-****************************************/
-#define MINRATIO 4   /* minimum nb of apparition to be selected in dictionary */
-#define ZDICT_MAX_SAMPLES_SIZE (2000U << 20)
-#define ZDICT_MIN_SAMPLES_SIZE (ZDICT_CONTENTSIZE_MIN * MINRATIO)
-
-
-/*-**************************************
-*  Compiler Options
-****************************************/
-/* Unix Large Files support (>4GB) */
-#define _FILE_OFFSET_BITS 64
-#if (defined(__sun__) && (!defined(__LP64__)))   /* Sun Solaris 32-bits requires specific definitions */
-#  define _LARGEFILE_SOURCE
-#elif ! defined(__LP64__)                        /* No point defining Large file for 64 bit */
-#  define _LARGEFILE64_SOURCE
-#endif
-
-
-/*-*************************************
-*  Dependencies
-***************************************/
-#include <stdlib.h>        /* malloc, free */
-#include <string.h>        /* memset */
-#include <stdio.h>         /* fprintf, fopen, ftello64 */
-#include <time.h>          /* clock */
-
-#include "mem.h"           /* read */
-#include "fse.h"           /* FSE_normalizeCount, FSE_writeNCount */
-#define HUF_STATIC_LINKING_ONLY
-#include "huf.h"           /* HUF_buildCTable, HUF_writeCTable */
-#include "zstd_internal.h" /* includes zstd.h */
-#include "xxhash.h"        /* XXH64 */
-#include "divsufsort.h"
-#ifndef ZDICT_STATIC_LINKING_ONLY
-#  define ZDICT_STATIC_LINKING_ONLY
-#endif
-#include "zdict.h"
-
-
-/*-*************************************
-*  Constants
-***************************************/
-#define KB *(1 <<10)
-#define MB *(1 <<20)
-#define GB *(1U<<30)
-
-#define DICTLISTSIZE_DEFAULT 10000
-
-#define NOISELENGTH 32
-
-static const int g_compressionLevel_default = 3;
-static const U32 g_selectivity_default = 9;
-
-
-/*-*************************************
-*  Console display
-***************************************/
-#define DISPLAY(...)         { fprintf(stderr, __VA_ARGS__); fflush( stderr ); }
-#define DISPLAYLEVEL(l, ...) if (notificationLevel>=l) { DISPLAY(__VA_ARGS__); }    /* 0 : no display;   1: errors;   2: default;  3: details;  4: debug */
-
-static clock_t ZDICT_clockSpan(clock_t nPrevious) { return clock() - nPrevious; }
-
-static void ZDICT_printHex(const void* ptr, size_t length)
-{
-    const BYTE* const b = (const BYTE*)ptr;
-    size_t u;
-    for (u=0; u<length; u++) {
-        BYTE c = b[u];
-        if (c<32 || c>126) c = '.';   /* non-printable char */
-        DISPLAY("%c", c);
-    }
-}
-
-
-/*-********************************************************
-*  Helper functions
-**********************************************************/
-unsigned ZDICT_isError(size_t errorCode) { return ERR_isError(errorCode); }
-
-const char* ZDICT_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
-
-unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize)
-{
-    if (dictSize < 8) return 0;
-    if (MEM_readLE32(dictBuffer) != ZSTD_MAGIC_DICTIONARY) return 0;
-    return MEM_readLE32((const char*)dictBuffer + 4);
-}
-
-
-/*-********************************************************
-*  Dictionary training functions
-**********************************************************/
-static unsigned ZDICT_NbCommonBytes (size_t val)
-{
-    if (MEM_isLittleEndian()) {
-        if (MEM_64bits()) {
-#       if defined(_MSC_VER) && defined(_WIN64)
-            unsigned long r = 0;
-            _BitScanForward64( &r, (U64)val );
-            return (unsigned)(r>>3);
-#       elif defined(__GNUC__) && (__GNUC__ >= 3)
-            return (__builtin_ctzll((U64)val) >> 3);
-#       else
-            static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
-            return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
-#       endif
-        } else { /* 32 bits */
-#       if defined(_MSC_VER)
-            unsigned long r=0;
-            _BitScanForward( &r, (U32)val );
-            return (unsigned)(r>>3);
-#       elif defined(__GNUC__) && (__GNUC__ >= 3)
-            return (__builtin_ctz((U32)val) >> 3);
-#       else
-            static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
-            return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
-#       endif
-        }
-    } else {  /* Big Endian CPU */
-        if (MEM_64bits()) {
-#       if defined(_MSC_VER) && defined(_WIN64)
-            unsigned long r = 0;
-            _BitScanReverse64( &r, val );
-            return (unsigned)(r>>3);
-#       elif defined(__GNUC__) && (__GNUC__ >= 3)
-            return (__builtin_clzll(val) >> 3);
-#       else
-            unsigned r;
-            const unsigned n32 = sizeof(size_t)*4;   /* calculate this way due to compiler complaining in 32-bits mode */
-            if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
-            if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
-            r += (!val);
-            return r;
-#       endif
-        } else { /* 32 bits */
-#       if defined(_MSC_VER)
-            unsigned long r = 0;
-            _BitScanReverse( &r, (unsigned long)val );
-            return (unsigned)(r>>3);
-#       elif defined(__GNUC__) && (__GNUC__ >= 3)
-            return (__builtin_clz((U32)val) >> 3);
-#       else
-            unsigned r;
-            if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
-            r += (!val);
-            return r;
-#       endif
-    }   }
-}
-
-
-/*! ZDICT_count() :
-    Count the nb of common bytes between 2 pointers.
-    Note : this function presumes end of buffer followed by noisy guard band.
-*/
-static size_t ZDICT_count(const void* pIn, const void* pMatch)
-{
-    const char* const pStart = (const char*)pIn;
-    for (;;) {
-        size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
-        if (!diff) {
-            pIn = (const char*)pIn+sizeof(size_t);
-            pMatch = (const char*)pMatch+sizeof(size_t);
-            continue;
-        }
-        pIn = (const char*)pIn+ZDICT_NbCommonBytes(diff);
-        return (size_t)((const char*)pIn - pStart);
-    }
-}
-
-
-typedef struct {
-    U32 pos;
-    U32 length;
-    U32 savings;
-} dictItem;
-
-static void ZDICT_initDictItem(dictItem* d)
-{
-    d->pos = 1;
-    d->length = 0;
-    d->savings = (U32)(-1);
-}
-
-
-#define LLIMIT 64          /* heuristic determined experimentally */
-#define MINMATCHLENGTH 7   /* heuristic determined experimentally */
-static dictItem ZDICT_analyzePos(
-                       BYTE* doneMarks,
-                       const int* suffix, U32 start,
-                       const void* buffer, U32 minRatio, U32 notificationLevel)
-{
-    U32 lengthList[LLIMIT] = {0};
-    U32 cumulLength[LLIMIT] = {0};
-    U32 savings[LLIMIT] = {0};
-    const BYTE* b = (const BYTE*)buffer;
-    size_t maxLength = LLIMIT;
-    size_t pos = suffix[start];
-    U32 end = start;
-    dictItem solution;
-
-    /* init */
-    memset(&solution, 0, sizeof(solution));
-    doneMarks[pos] = 1;
-
-    /* trivial repetition cases */
-    if ( (MEM_read16(b+pos+0) == MEM_read16(b+pos+2))
-       ||(MEM_read16(b+pos+1) == MEM_read16(b+pos+3))
-       ||(MEM_read16(b+pos+2) == MEM_read16(b+pos+4)) ) {
-        /* skip and mark segment */
-        U16 const pattern16 = MEM_read16(b+pos+4);
-        U32 u, patternEnd = 6;
-        while (MEM_read16(b+pos+patternEnd) == pattern16) patternEnd+=2 ;
-        if (b[pos+patternEnd] == b[pos+patternEnd-1]) patternEnd++;
-        for (u=1; u<patternEnd; u++)
-            doneMarks[pos+u] = 1;
-        return solution;
-    }
-
-    /* look forward */
-    {   size_t length;
-        do {
-            end++;
-            length = ZDICT_count(b + pos, b + suffix[end]);
-        } while (length >= MINMATCHLENGTH);
-    }
-
-    /* look backward */
-    {   size_t length;
-        do {
-            length = ZDICT_count(b + pos, b + *(suffix+start-1));
-            if (length >=MINMATCHLENGTH) start--;
-        } while(length >= MINMATCHLENGTH);
-    }
-
-    /* exit if not found a minimum nb of repetitions */
-    if (end-start < minRatio) {
-        U32 idx;
-        for(idx=start; idx<end; idx++)
-            doneMarks[suffix[idx]] = 1;
-        return solution;
-    }
-
-    {   int i;
-        U32 mml;
-        U32 refinedStart = start;
-        U32 refinedEnd = end;
-
-        DISPLAYLEVEL(4, "\n");
-        DISPLAYLEVEL(4, "found %3u matches of length >= %i at pos %7u  ", (unsigned)(end-start), MINMATCHLENGTH, (unsigned)pos);
-        DISPLAYLEVEL(4, "\n");
-
-        for (mml = MINMATCHLENGTH ; ; mml++) {
-            BYTE currentChar = 0;
-            U32 currentCount = 0;
-            U32 currentID = refinedStart;
-            U32 id;
-            U32 selectedCount = 0;
-            U32 selectedID = currentID;
-            for (id =refinedStart; id < refinedEnd; id++) {
-                if (b[suffix[id] + mml] != currentChar) {
-                    if (currentCount > selectedCount) {
-                        selectedCount = currentCount;
-                        selectedID = currentID;
-                    }
-                    currentID = id;
-                    currentChar = b[ suffix[id] + mml];
-                    currentCount = 0;
-                }
-                currentCount ++;
-            }
-            if (currentCount > selectedCount) {  /* for last */
-                selectedCount = currentCount;
-                selectedID = currentID;
-            }
-
-            if (selectedCount < minRatio)
-                break;
-            refinedStart = selectedID;
-            refinedEnd = refinedStart + selectedCount;
-        }
-
-        /* evaluate gain based on new dict */
-        start = refinedStart;
-        pos = suffix[refinedStart];
-        end = start;
-        memset(lengthList, 0, sizeof(lengthList));
-
-        /* look forward */
-        {   size_t length;
-            do {
-                end++;
-                length = ZDICT_count(b + pos, b + suffix[end]);
-                if (length >= LLIMIT) length = LLIMIT-1;
-                lengthList[length]++;
-            } while (length >=MINMATCHLENGTH);
-        }
-
-        /* look backward */
-        {   size_t length = MINMATCHLENGTH;
-            while ((length >= MINMATCHLENGTH) & (start > 0)) {
-                length = ZDICT_count(b + pos, b + suffix[start - 1]);
-                if (length >= LLIMIT) length = LLIMIT - 1;
-                lengthList[length]++;
-                if (length >= MINMATCHLENGTH) start--;
-            }
-        }
-
-        /* largest useful length */
-        memset(cumulLength, 0, sizeof(cumulLength));
-        cumulLength[maxLength-1] = lengthList[maxLength-1];
-        for (i=(int)(maxLength-2); i>=0; i--)
-            cumulLength[i] = cumulLength[i+1] + lengthList[i];
-
-        for (i=LLIMIT-1; i>=MINMATCHLENGTH; i--) if (cumulLength[i]>=minRatio) break;
-        maxLength = i;
-
-        /* reduce maxLength in case of final into repetitive data */
-        {   U32 l = (U32)maxLength;
-            BYTE const c = b[pos + maxLength-1];
-            while (b[pos+l-2]==c) l--;
-            maxLength = l;
-        }
-        if (maxLength < MINMATCHLENGTH) return solution;   /* skip : no long-enough solution */
-
-        /* calculate savings */
-        savings[5] = 0;
-        for (i=MINMATCHLENGTH; i<=(int)maxLength; i++)
-            savings[i] = savings[i-1] + (lengthList[i] * (i-3));
-
-        DISPLAYLEVEL(4, "Selected dict at position %u, of length %u : saves %u (ratio: %.2f)  \n",
-                     (unsigned)pos, (unsigned)maxLength, (unsigned)savings[maxLength], (double)savings[maxLength] / maxLength);
-
-        solution.pos = (U32)pos;
-        solution.length = (U32)maxLength;
-        solution.savings = savings[maxLength];
-
-        /* mark positions done */
-        {   U32 id;
-            for (id=start; id<end; id++) {
-                U32 p, pEnd, length;
-                U32 const testedPos = suffix[id];
-                if (testedPos == pos)
-                    length = solution.length;
-                else {
-                    length = (U32)ZDICT_count(b+pos, b+testedPos);
-                    if (length > solution.length) length = solution.length;
-                }
-                pEnd = (U32)(testedPos + length);
-                for (p=testedPos; p<pEnd; p++)
-                    doneMarks[p] = 1;
-    }   }   }
-
-    return solution;
-}
-
-
-static int isIncluded(const void* in, const void* container, size_t length)
-{
-    const char* const ip = (const char*) in;
-    const char* const into = (const char*) container;
-    size_t u;
-
-    for (u=0; u<length; u++) {  /* works because end of buffer is a noisy guard band */
-        if (ip[u] != into[u]) break;
-    }
-
-    return u==length;
-}
-
-/*! ZDICT_tryMerge() :
-    check if dictItem can be merged, do it if possible
-    @return : id of destination elt, 0 if not merged
-*/
-static U32 ZDICT_tryMerge(dictItem* table, dictItem elt, U32 eltNbToSkip, const void* buffer)
-{
-    const U32 tableSize = table->pos;
-    const U32 eltEnd = elt.pos + elt.length;
-    const char* const buf = (const char*) buffer;
-
-    /* tail overlap */
-    U32 u; for (u=1; u<tableSize; u++) {
-        if (u==eltNbToSkip) continue;
-        if ((table[u].pos > elt.pos) && (table[u].pos <= eltEnd)) {  /* overlap, existing > new */
-            /* append */
-            U32 const addedLength = table[u].pos - elt.pos;
-            table[u].length += addedLength;
-            table[u].pos = elt.pos;
-            table[u].savings += elt.savings * addedLength / elt.length;   /* rough approx */
-            table[u].savings += elt.length / 8;    /* rough approx bonus */
-            elt = table[u];
-            /* sort : improve rank */
-            while ((u>1) && (table[u-1].savings < elt.savings))
-            table[u] = table[u-1], u--;
-            table[u] = elt;
-            return u;
-    }   }
-
-    /* front overlap */
-    for (u=1; u<tableSize; u++) {
-        if (u==eltNbToSkip) continue;
-
-        if ((table[u].pos + table[u].length >= elt.pos) && (table[u].pos < elt.pos)) {  /* overlap, existing < new */
-            /* append */
-            int const addedLength = (int)eltEnd - (table[u].pos + table[u].length);
-            table[u].savings += elt.length / 8;    /* rough approx bonus */
-            if (addedLength > 0) {   /* otherwise, elt fully included into existing */
-                table[u].length += addedLength;
-                table[u].savings += elt.savings * addedLength / elt.length;   /* rough approx */
-            }
-            /* sort : improve rank */
-            elt = table[u];
-            while ((u>1) && (table[u-1].savings < elt.savings))
-                table[u] = table[u-1], u--;
-            table[u] = elt;
-            return u;
-        }
-
-        if (MEM_read64(buf + table[u].pos) == MEM_read64(buf + elt.pos + 1)) {
-            if (isIncluded(buf + table[u].pos, buf + elt.pos + 1, table[u].length)) {
-                size_t const addedLength = MAX( (int)elt.length - (int)table[u].length , 1 );
-                table[u].pos = elt.pos;
-                table[u].savings += (U32)(elt.savings * addedLength / elt.length);
-                table[u].length = MIN(elt.length, table[u].length + 1);
-                return u;
-            }
-        }
-    }
-
-    return 0;
-}
-
-
-static void ZDICT_removeDictItem(dictItem* table, U32 id)
-{
-    /* convention : table[0].pos stores nb of elts */
-    U32 const max = table[0].pos;
-    U32 u;
-    if (!id) return;   /* protection, should never happen */
-    for (u=id; u<max-1; u++)
-        table[u] = table[u+1];
-    table->pos--;
-}
-
-
-static void ZDICT_insertDictItem(dictItem* table, U32 maxSize, dictItem elt, const void* buffer)
-{
-    /* merge if possible */
-    U32 mergeId = ZDICT_tryMerge(table, elt, 0, buffer);
-    if (mergeId) {
-        U32 newMerge = 1;
-        while (newMerge) {
-            newMerge = ZDICT_tryMerge(table, table[mergeId], mergeId, buffer);
-            if (newMerge) ZDICT_removeDictItem(table, mergeId);
-            mergeId = newMerge;
-        }
-        return;
-    }
-
-    /* insert */
-    {   U32 current;
-        U32 nextElt = table->pos;
-        if (nextElt >= maxSize) nextElt = maxSize-1;
-        current = nextElt-1;
-        while (table[current].savings < elt.savings) {
-            table[current+1] = table[current];
-            current--;
-        }
-        table[current+1] = elt;
-        table->pos = nextElt+1;
-    }
-}
-
-
-static U32 ZDICT_dictSize(const dictItem* dictList)
-{
-    U32 u, dictSize = 0;
-    for (u=1; u<dictList[0].pos; u++)
-        dictSize += dictList[u].length;
-    return dictSize;
-}
-
-
-static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize,
-                            const void* const buffer, size_t bufferSize,   /* buffer must end with noisy guard band */
-                            const size_t* fileSizes, unsigned nbFiles,
-                            unsigned minRatio, U32 notificationLevel)
-{
-    int* const suffix0 = (int*)malloc((bufferSize+2)*sizeof(*suffix0));
-    int* const suffix = suffix0+1;
-    U32* reverseSuffix = (U32*)malloc((bufferSize)*sizeof(*reverseSuffix));
-    BYTE* doneMarks = (BYTE*)malloc((bufferSize+16)*sizeof(*doneMarks));   /* +16 for overflow security */
-    U32* filePos = (U32*)malloc(nbFiles * sizeof(*filePos));
-    size_t result = 0;
-    clock_t displayClock = 0;
-    clock_t const refreshRate = CLOCKS_PER_SEC * 3 / 10;
-
-#   define DISPLAYUPDATE(l, ...) if (notificationLevel>=l) { \
-            if (ZDICT_clockSpan(displayClock) > refreshRate)  \
-            { displayClock = clock(); DISPLAY(__VA_ARGS__); \
-            if (notificationLevel>=4) fflush(stderr); } }
-
-    /* init */
-    DISPLAYLEVEL(2, "\r%70s\r", "");   /* clean display line */
-    if (!suffix0 || !reverseSuffix || !doneMarks || !filePos) {
-        result = ERROR(memory_allocation);
-        goto _cleanup;
-    }
-    if (minRatio < MINRATIO) minRatio = MINRATIO;
-    memset(doneMarks, 0, bufferSize+16);
-
-    /* limit sample set size (divsufsort limitation)*/
-    if (bufferSize > ZDICT_MAX_SAMPLES_SIZE) DISPLAYLEVEL(3, "sample set too large : reduced to %u MB ...\n", (unsigned)(ZDICT_MAX_SAMPLES_SIZE>>20));
-    while (bufferSize > ZDICT_MAX_SAMPLES_SIZE) bufferSize -= fileSizes[--nbFiles];
-
-    /* sort */
-    DISPLAYLEVEL(2, "sorting %u files of total size %u MB ...\n", nbFiles, (unsigned)(bufferSize>>20));
-    {   int const divSuftSortResult = divsufsort((const unsigned char*)buffer, suffix, (int)bufferSize, 0);
-        if (divSuftSortResult != 0) { result = ERROR(GENERIC); goto _cleanup; }
-    }
-    suffix[bufferSize] = (int)bufferSize;   /* leads into noise */
-    suffix0[0] = (int)bufferSize;           /* leads into noise */
-    /* build reverse suffix sort */
-    {   size_t pos;
-        for (pos=0; pos < bufferSize; pos++)
-            reverseSuffix[suffix[pos]] = (U32)pos;
-        /* note filePos tracks borders between samples.
-           It's not used at this stage, but planned to become useful in a later update */
-        filePos[0] = 0;
-        for (pos=1; pos<nbFiles; pos++)
-            filePos[pos] = (U32)(filePos[pos-1] + fileSizes[pos-1]);
-    }
-
-    DISPLAYLEVEL(2, "finding patterns ... \n");
-    DISPLAYLEVEL(3, "minimum ratio : %u \n", minRatio);
-
-    {   U32 cursor; for (cursor=0; cursor < bufferSize; ) {
-            dictItem solution;
-            if (doneMarks[cursor]) { cursor++; continue; }
-            solution = ZDICT_analyzePos(doneMarks, suffix, reverseSuffix[cursor], buffer, minRatio, notificationLevel);
-            if (solution.length==0) { cursor++; continue; }
-            ZDICT_insertDictItem(dictList, dictListSize, solution, buffer);
-            cursor += solution.length;
-            DISPLAYUPDATE(2, "\r%4.2f %% \r", (double)cursor / bufferSize * 100);
-    }   }
-
-_cleanup:
-    free(suffix0);
-    free(reverseSuffix);
-    free(doneMarks);
-    free(filePos);
-    return result;
-}
-
-
-static void ZDICT_fillNoise(void* buffer, size_t length)
-{
-    unsigned const prime1 = 2654435761U;
-    unsigned const prime2 = 2246822519U;
-    unsigned acc = prime1;
-    size_t p=0;;
-    for (p=0; p<length; p++) {
-        acc *= prime2;
-        ((unsigned char*)buffer)[p] = (unsigned char)(acc >> 21);
-    }
-}
-
-
-typedef struct
-{
-    ZSTD_CDict* dict;    /* dictionary */
-    ZSTD_CCtx* zc;     /* working context */
-    void* workPlace;   /* must be ZSTD_BLOCKSIZE_MAX allocated */
-} EStats_ress_t;
-
-#define MAXREPOFFSET 1024
-
-static void ZDICT_countEStats(EStats_ress_t esr, ZSTD_parameters params,
-                              unsigned* countLit, unsigned* offsetcodeCount, unsigned* matchlengthCount, unsigned* litlengthCount, U32* repOffsets,
-                              const void* src, size_t srcSize,
-                              U32 notificationLevel)
-{
-    size_t const blockSizeMax = MIN (ZSTD_BLOCKSIZE_MAX, 1 << params.cParams.windowLog);
-    size_t cSize;
-
-    if (srcSize > blockSizeMax) srcSize = blockSizeMax;   /* protection vs large samples */
-    {   size_t const errorCode = ZSTD_compressBegin_usingCDict(esr.zc, esr.dict);
-        if (ZSTD_isError(errorCode)) { DISPLAYLEVEL(1, "warning : ZSTD_compressBegin_usingCDict failed \n"); return; }
-
-    }
-    cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize);
-    if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (unsigned)srcSize); return; }
-
-    if (cSize) {  /* if == 0; block is not compressible */
-        const seqStore_t* const seqStorePtr = ZSTD_getSeqStore(esr.zc);
-
-        /* literals stats */
-        {   const BYTE* bytePtr;
-            for(bytePtr = seqStorePtr->litStart; bytePtr < seqStorePtr->lit; bytePtr++)
-                countLit[*bytePtr]++;
-        }
-
-        /* seqStats */
-        {   U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
-            ZSTD_seqToCodes(seqStorePtr);
-
-            {   const BYTE* codePtr = seqStorePtr->ofCode;
-                U32 u;
-                for (u=0; u<nbSeq; u++) offsetcodeCount[codePtr[u]]++;
-            }
-
-            {   const BYTE* codePtr = seqStorePtr->mlCode;
-                U32 u;
-                for (u=0; u<nbSeq; u++) matchlengthCount[codePtr[u]]++;
-            }
-
-            {   const BYTE* codePtr = seqStorePtr->llCode;
-                U32 u;
-                for (u=0; u<nbSeq; u++) litlengthCount[codePtr[u]]++;
-            }
-
-            if (nbSeq >= 2) { /* rep offsets */
-                const seqDef* const seq = seqStorePtr->sequencesStart;
-                U32 offset1 = seq[0].offset - 3;
-                U32 offset2 = seq[1].offset - 3;
-                if (offset1 >= MAXREPOFFSET) offset1 = 0;
-                if (offset2 >= MAXREPOFFSET) offset2 = 0;
-                repOffsets[offset1] += 3;
-                repOffsets[offset2] += 1;
-    }   }   }
-}
-
-static size_t ZDICT_totalSampleSize(const size_t* fileSizes, unsigned nbFiles)
-{
-    size_t total=0;
-    unsigned u;
-    for (u=0; u<nbFiles; u++) total += fileSizes[u];
-    return total;
-}
-
-typedef struct { U32 offset; U32 count; } offsetCount_t;
-
-static void ZDICT_insertSortCount(offsetCount_t table[ZSTD_REP_NUM+1], U32 val, U32 count)
-{
-    U32 u;
-    table[ZSTD_REP_NUM].offset = val;
-    table[ZSTD_REP_NUM].count = count;
-    for (u=ZSTD_REP_NUM; u>0; u--) {
-        offsetCount_t tmp;
-        if (table[u-1].count >= table[u].count) break;
-        tmp = table[u-1];
-        table[u-1] = table[u];
-        table[u] = tmp;
-    }
-}
-
-/* ZDICT_flatLit() :
- * rewrite `countLit` to contain a mostly flat but still compressible distribution of literals.
- * necessary to avoid generating a non-compressible distribution that HUF_writeCTable() cannot encode.
- */
-static void ZDICT_flatLit(unsigned* countLit)
-{
-    int u;
-    for (u=1; u<256; u++) countLit[u] = 2;
-    countLit[0]   = 4;
-    countLit[253] = 1;
-    countLit[254] = 1;
-}
-
-#define OFFCODE_MAX 30  /* only applicable to first block */
-static size_t ZDICT_analyzeEntropy(void*  dstBuffer, size_t maxDstSize,
-                                   unsigned compressionLevel,
-                             const void*  srcBuffer, const size_t* fileSizes, unsigned nbFiles,
-                             const void* dictBuffer, size_t  dictBufferSize,
-                                   unsigned notificationLevel)
-{
-    unsigned countLit[256];
-    HUF_CREATE_STATIC_CTABLE(hufTable, 255);
-    unsigned offcodeCount[OFFCODE_MAX+1];
-    short offcodeNCount[OFFCODE_MAX+1];
-    U32 offcodeMax = ZSTD_highbit32((U32)(dictBufferSize + 128 KB));
-    unsigned matchLengthCount[MaxML+1];
-    short matchLengthNCount[MaxML+1];
-    unsigned litLengthCount[MaxLL+1];
-    short litLengthNCount[MaxLL+1];
-    U32 repOffset[MAXREPOFFSET];
-    offsetCount_t bestRepOffset[ZSTD_REP_NUM+1];
-    EStats_ress_t esr = { NULL, NULL, NULL };
-    ZSTD_parameters params;
-    U32 u, huffLog = 11, Offlog = OffFSELog, mlLog = MLFSELog, llLog = LLFSELog, total;
-    size_t pos = 0, errorCode;
-    size_t eSize = 0;
-    size_t const totalSrcSize = ZDICT_totalSampleSize(fileSizes, nbFiles);
-    size_t const averageSampleSize = totalSrcSize / (nbFiles + !nbFiles);
-    BYTE* dstPtr = (BYTE*)dstBuffer;
-
-    /* init */
-    DEBUGLOG(4, "ZDICT_analyzeEntropy");
-    if (offcodeMax>OFFCODE_MAX) { eSize = ERROR(dictionaryCreation_failed); goto _cleanup; }   /* too large dictionary */
-    for (u=0; u<256; u++) countLit[u] = 1;   /* any character must be described */
-    for (u=0; u<=offcodeMax; u++) offcodeCount[u] = 1;
-    for (u=0; u<=MaxML; u++) matchLengthCount[u] = 1;
-    for (u=0; u<=MaxLL; u++) litLengthCount[u] = 1;
-    memset(repOffset, 0, sizeof(repOffset));
-    repOffset[1] = repOffset[4] = repOffset[8] = 1;
-    memset(bestRepOffset, 0, sizeof(bestRepOffset));
-    if (compressionLevel==0) compressionLevel = g_compressionLevel_default;
-    params = ZSTD_getParams(compressionLevel, averageSampleSize, dictBufferSize);
-
-    esr.dict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, ZSTD_dlm_byRef, ZSTD_dct_rawContent, params.cParams, ZSTD_defaultCMem);
-    esr.zc = ZSTD_createCCtx();
-    esr.workPlace = malloc(ZSTD_BLOCKSIZE_MAX);
-    if (!esr.dict || !esr.zc || !esr.workPlace) {
-        eSize = ERROR(memory_allocation);
-        DISPLAYLEVEL(1, "Not enough memory \n");
-        goto _cleanup;
-    }
-
-    /* collect stats on all samples */
-    for (u=0; u<nbFiles; u++) {
-        ZDICT_countEStats(esr, params,
-                          countLit, offcodeCount, matchLengthCount, litLengthCount, repOffset,
-                         (const char*)srcBuffer + pos, fileSizes[u],
-                          notificationLevel);
-        pos += fileSizes[u];
-    }
-
-    /* analyze, build stats, starting with literals */
-    {   size_t maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog);
-        if (HUF_isError(maxNbBits)) {
-            eSize = maxNbBits;
-            DISPLAYLEVEL(1, " HUF_buildCTable error \n");
-            goto _cleanup;
-        }
-        if (maxNbBits==8) {  /* not compressible : will fail on HUF_writeCTable() */
-            DISPLAYLEVEL(2, "warning : pathological dataset : literals are not compressible : samples are noisy or too regular \n");
-            ZDICT_flatLit(countLit);  /* replace distribution by a fake "mostly flat but still compressible" distribution, that HUF_writeCTable() can encode */
-            maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog);
-            assert(maxNbBits==9);
-        }
-        huffLog = (U32)maxNbBits;
-    }
-
-    /* looking for most common first offsets */
-    {   U32 offset;
-        for (offset=1; offset<MAXREPOFFSET; offset++)
-            ZDICT_insertSortCount(bestRepOffset, offset, repOffset[offset]);
-    }
-    /* note : the result of this phase should be used to better appreciate the impact on statistics */
-
-    total=0; for (u=0; u<=offcodeMax; u++) total+=offcodeCount[u];
-    errorCode = FSE_normalizeCount(offcodeNCount, Offlog, offcodeCount, total, offcodeMax);
-    if (FSE_isError(errorCode)) {
-        eSize = errorCode;
-        DISPLAYLEVEL(1, "FSE_normalizeCount error with offcodeCount \n");
-        goto _cleanup;
-    }
-    Offlog = (U32)errorCode;
-
-    total=0; for (u=0; u<=MaxML; u++) total+=matchLengthCount[u];
-    errorCode = FSE_normalizeCount(matchLengthNCount, mlLog, matchLengthCount, total, MaxML);
-    if (FSE_isError(errorCode)) {
-        eSize = errorCode;
-        DISPLAYLEVEL(1, "FSE_normalizeCount error with matchLengthCount \n");
-        goto _cleanup;
-    }
-    mlLog = (U32)errorCode;
-
-    total=0; for (u=0; u<=MaxLL; u++) total+=litLengthCount[u];
-    errorCode = FSE_normalizeCount(litLengthNCount, llLog, litLengthCount, total, MaxLL);
-    if (FSE_isError(errorCode)) {
-        eSize = errorCode;
-        DISPLAYLEVEL(1, "FSE_normalizeCount error with litLengthCount \n");
-        goto _cleanup;
-    }
-    llLog = (U32)errorCode;
-
-    /* write result to buffer */
-    {   size_t const hhSize = HUF_writeCTable(dstPtr, maxDstSize, hufTable, 255, huffLog);
-        if (HUF_isError(hhSize)) {
-            eSize = hhSize;
-            DISPLAYLEVEL(1, "HUF_writeCTable error \n");
-            goto _cleanup;
-        }
-        dstPtr += hhSize;
-        maxDstSize -= hhSize;
-        eSize += hhSize;
-    }
-
-    {   size_t const ohSize = FSE_writeNCount(dstPtr, maxDstSize, offcodeNCount, OFFCODE_MAX, Offlog);
-        if (FSE_isError(ohSize)) {
-            eSize = ohSize;
-            DISPLAYLEVEL(1, "FSE_writeNCount error with offcodeNCount \n");
-            goto _cleanup;
-        }
-        dstPtr += ohSize;
-        maxDstSize -= ohSize;
-        eSize += ohSize;
-    }
-
-    {   size_t const mhSize = FSE_writeNCount(dstPtr, maxDstSize, matchLengthNCount, MaxML, mlLog);
-        if (FSE_isError(mhSize)) {
-            eSize = mhSize;
-            DISPLAYLEVEL(1, "FSE_writeNCount error with matchLengthNCount \n");
-            goto _cleanup;
-        }
-        dstPtr += mhSize;
-        maxDstSize -= mhSize;
-        eSize += mhSize;
-    }
-
-    {   size_t const lhSize = FSE_writeNCount(dstPtr, maxDstSize, litLengthNCount, MaxLL, llLog);
-        if (FSE_isError(lhSize)) {
-            eSize = lhSize;
-            DISPLAYLEVEL(1, "FSE_writeNCount error with litlengthNCount \n");
-            goto _cleanup;
-        }
-        dstPtr += lhSize;
-        maxDstSize -= lhSize;
-        eSize += lhSize;
-    }
-
-    if (maxDstSize<12) {
-        eSize = ERROR(dstSize_tooSmall);
-        DISPLAYLEVEL(1, "not enough space to write RepOffsets \n");
-        goto _cleanup;
-    }
-# if 0
-    MEM_writeLE32(dstPtr+0, bestRepOffset[0].offset);
-    MEM_writeLE32(dstPtr+4, bestRepOffset[1].offset);
-    MEM_writeLE32(dstPtr+8, bestRepOffset[2].offset);
-#else
-    /* at this stage, we don't use the result of "most common first offset",
-       as the impact of statistics is not properly evaluated */
-    MEM_writeLE32(dstPtr+0, repStartValue[0]);
-    MEM_writeLE32(dstPtr+4, repStartValue[1]);
-    MEM_writeLE32(dstPtr+8, repStartValue[2]);
-#endif
-    eSize += 12;
-
-_cleanup:
-    ZSTD_freeCDict(esr.dict);
-    ZSTD_freeCCtx(esr.zc);
-    free(esr.workPlace);
-
-    return eSize;
-}
-
-
-
-size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,
-                          const void* customDictContent, size_t dictContentSize,
-                          const void* samplesBuffer, const size_t* samplesSizes,
-                          unsigned nbSamples, ZDICT_params_t params)
-{
-    size_t hSize;
-#define HBUFFSIZE 256   /* should prove large enough for all entropy headers */
-    BYTE header[HBUFFSIZE];
-    int const compressionLevel = (params.compressionLevel == 0) ? g_compressionLevel_default : params.compressionLevel;
-    U32 const notificationLevel = params.notificationLevel;
-
-    /* check conditions */
-    DEBUGLOG(4, "ZDICT_finalizeDictionary");
-    if (dictBufferCapacity < dictContentSize) return ERROR(dstSize_tooSmall);
-    if (dictContentSize < ZDICT_CONTENTSIZE_MIN) return ERROR(srcSize_wrong);
-    if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) return ERROR(dstSize_tooSmall);
-
-    /* dictionary header */
-    MEM_writeLE32(header, ZSTD_MAGIC_DICTIONARY);
-    {   U64 const randomID = XXH64(customDictContent, dictContentSize, 0);
-        U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768;
-        U32 const dictID = params.dictID ? params.dictID : compliantID;
-        MEM_writeLE32(header+4, dictID);
-    }
-    hSize = 8;
-
-    /* entropy tables */
-    DISPLAYLEVEL(2, "\r%70s\r", "");   /* clean display line */
-    DISPLAYLEVEL(2, "statistics ... \n");
-    {   size_t const eSize = ZDICT_analyzeEntropy(header+hSize, HBUFFSIZE-hSize,
-                                  compressionLevel,
-                                  samplesBuffer, samplesSizes, nbSamples,
-                                  customDictContent, dictContentSize,
-                                  notificationLevel);
-        if (ZDICT_isError(eSize)) return eSize;
-        hSize += eSize;
-    }
-
-    /* copy elements in final buffer ; note : src and dst buffer can overlap */
-    if (hSize + dictContentSize > dictBufferCapacity) dictContentSize = dictBufferCapacity - hSize;
-    {   size_t const dictSize = hSize + dictContentSize;
-        char* dictEnd = (char*)dictBuffer + dictSize;
-        memmove(dictEnd - dictContentSize, customDictContent, dictContentSize);
-        memcpy(dictBuffer, header, hSize);
-        return dictSize;
-    }
-}
-
-
-static size_t ZDICT_addEntropyTablesFromBuffer_advanced(
-        void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
-        const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
-        ZDICT_params_t params)
-{
-    int const compressionLevel = (params.compressionLevel == 0) ? g_compressionLevel_default : params.compressionLevel;
-    U32 const notificationLevel = params.notificationLevel;
-    size_t hSize = 8;
-
-    /* calculate entropy tables */
-    DISPLAYLEVEL(2, "\r%70s\r", "");   /* clean display line */
-    DISPLAYLEVEL(2, "statistics ... \n");
-    {   size_t const eSize = ZDICT_analyzeEntropy((char*)dictBuffer+hSize, dictBufferCapacity-hSize,
-                                  compressionLevel,
-                                  samplesBuffer, samplesSizes, nbSamples,
-                                  (char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize,
-                                  notificationLevel);
-        if (ZDICT_isError(eSize)) return eSize;
-        hSize += eSize;
-    }
-
-    /* add dictionary header (after entropy tables) */
-    MEM_writeLE32(dictBuffer, ZSTD_MAGIC_DICTIONARY);
-    {   U64 const randomID = XXH64((char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize, 0);
-        U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768;
-        U32 const dictID = params.dictID ? params.dictID : compliantID;
-        MEM_writeLE32((char*)dictBuffer+4, dictID);
-    }
-
-    if (hSize + dictContentSize < dictBufferCapacity)
-        memmove((char*)dictBuffer + hSize, (char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize);
-    return MIN(dictBufferCapacity, hSize+dictContentSize);
-}
-
-/* Hidden declaration for dbio.c */
-size_t ZDICT_trainFromBuffer_unsafe_legacy(
-                            void* dictBuffer, size_t maxDictSize,
-                            const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
-                            ZDICT_legacy_params_t params);
-/*! ZDICT_trainFromBuffer_unsafe_legacy() :
-*   Warning : `samplesBuffer` must be followed by noisy guard band.
-*   @return : size of dictionary, or an error code which can be tested with ZDICT_isError()
-*/
-size_t ZDICT_trainFromBuffer_unsafe_legacy(
-                            void* dictBuffer, size_t maxDictSize,
-                            const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
-                            ZDICT_legacy_params_t params)
-{
-    U32 const dictListSize = MAX(MAX(DICTLISTSIZE_DEFAULT, nbSamples), (U32)(maxDictSize/16));
-    dictItem* const dictList = (dictItem*)malloc(dictListSize * sizeof(*dictList));
-    unsigned const selectivity = params.selectivityLevel == 0 ? g_selectivity_default : params.selectivityLevel;
-    unsigned const minRep = (selectivity > 30) ? MINRATIO : nbSamples >> selectivity;
-    size_t const targetDictSize = maxDictSize;
-    size_t const samplesBuffSize = ZDICT_totalSampleSize(samplesSizes, nbSamples);
-    size_t dictSize = 0;
-    U32 const notificationLevel = params.zParams.notificationLevel;
-
-    /* checks */
-    if (!dictList) return ERROR(memory_allocation);
-    if (maxDictSize < ZDICT_DICTSIZE_MIN) { free(dictList); return ERROR(dstSize_tooSmall); }   /* requested dictionary size is too small */
-    if (samplesBuffSize < ZDICT_MIN_SAMPLES_SIZE) { free(dictList); return ERROR(dictionaryCreation_failed); }   /* not enough source to create dictionary */
-
-    /* init */
-    ZDICT_initDictItem(dictList);
-
-    /* build dictionary */
-    ZDICT_trainBuffer_legacy(dictList, dictListSize,
-                       samplesBuffer, samplesBuffSize,
-                       samplesSizes, nbSamples,
-                       minRep, notificationLevel);
-
-    /* display best matches */
-    if (params.zParams.notificationLevel>= 3) {
-        unsigned const nb = MIN(25, dictList[0].pos);
-        unsigned const dictContentSize = ZDICT_dictSize(dictList);
-        unsigned u;
-        DISPLAYLEVEL(3, "\n %u segments found, of total size %u \n", (unsigned)dictList[0].pos-1, dictContentSize);
-        DISPLAYLEVEL(3, "list %u best segments \n", nb-1);
-        for (u=1; u<nb; u++) {
-            unsigned const pos = dictList[u].pos;
-            unsigned const length = dictList[u].length;
-            U32 const printedLength = MIN(40, length);
-            if ((pos > samplesBuffSize) || ((pos + length) > samplesBuffSize)) {
-                free(dictList);
-                return ERROR(GENERIC);   /* should never happen */
-            }
-            DISPLAYLEVEL(3, "%3u:%3u bytes at pos %8u, savings %7u bytes |",
-                         u, length, pos, (unsigned)dictList[u].savings);
-            ZDICT_printHex((const char*)samplesBuffer+pos, printedLength);
-            DISPLAYLEVEL(3, "| \n");
-    }   }
-
-
-    /* create dictionary */
-    {   unsigned dictContentSize = ZDICT_dictSize(dictList);
-        if (dictContentSize < ZDICT_CONTENTSIZE_MIN) { free(dictList); return ERROR(dictionaryCreation_failed); }   /* dictionary content too small */
-        if (dictContentSize < targetDictSize/4) {
-            DISPLAYLEVEL(2, "!  warning : selected content significantly smaller than requested (%u < %u) \n", dictContentSize, (unsigned)maxDictSize);
-            if (samplesBuffSize < 10 * targetDictSize)
-                DISPLAYLEVEL(2, "!  consider increasing the number of samples (total size : %u MB)\n", (unsigned)(samplesBuffSize>>20));
-            if (minRep > MINRATIO) {
-                DISPLAYLEVEL(2, "!  consider increasing selectivity to produce larger dictionary (-s%u) \n", selectivity+1);
-                DISPLAYLEVEL(2, "!  note : larger dictionaries are not necessarily better, test its efficiency on samples \n");
-            }
-        }
-
-        if ((dictContentSize > targetDictSize*3) && (nbSamples > 2*MINRATIO) && (selectivity>1)) {
-            unsigned proposedSelectivity = selectivity-1;
-            while ((nbSamples >> proposedSelectivity) <= MINRATIO) { proposedSelectivity--; }
-            DISPLAYLEVEL(2, "!  note : calculated dictionary significantly larger than requested (%u > %u) \n", dictContentSize, (unsigned)maxDictSize);
-            DISPLAYLEVEL(2, "!  consider increasing dictionary size, or produce denser dictionary (-s%u) \n", proposedSelectivity);
-            DISPLAYLEVEL(2, "!  always test dictionary efficiency on real samples \n");
-        }
-
-        /* limit dictionary size */
-        {   U32 const max = dictList->pos;   /* convention : nb of useful elts within dictList */
-            U32 currentSize = 0;
-            U32 n; for (n=1; n<max; n++) {
-                currentSize += dictList[n].length;
-                if (currentSize > targetDictSize) { currentSize -= dictList[n].length; break; }
-            }
-            dictList->pos = n;
-            dictContentSize = currentSize;
-        }
-
-        /* build dict content */
-        {   U32 u;
-            BYTE* ptr = (BYTE*)dictBuffer + maxDictSize;
-            for (u=1; u<dictList->pos; u++) {
-                U32 l = dictList[u].length;
-                ptr -= l;
-                if (ptr<(BYTE*)dictBuffer) { free(dictList); return ERROR(GENERIC); }   /* should not happen */
-                memcpy(ptr, (const char*)samplesBuffer+dictList[u].pos, l);
-        }   }
-
-        dictSize = ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, maxDictSize,
-                                                             samplesBuffer, samplesSizes, nbSamples,
-                                                             params.zParams);
-    }
-
-    /* clean up */
-    free(dictList);
-    return dictSize;
-}
-
-
-/* ZDICT_trainFromBuffer_legacy() :
- * issue : samplesBuffer need to be followed by a noisy guard band.
- * work around : duplicate the buffer, and add the noise */
-size_t ZDICT_trainFromBuffer_legacy(void* dictBuffer, size_t dictBufferCapacity,
-                              const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
-                              ZDICT_legacy_params_t params)
-{
-    size_t result;
-    void* newBuff;
-    size_t const sBuffSize = ZDICT_totalSampleSize(samplesSizes, nbSamples);
-    if (sBuffSize < ZDICT_MIN_SAMPLES_SIZE) return 0;   /* not enough content => no dictionary */
-
-    newBuff = malloc(sBuffSize + NOISELENGTH);
-    if (!newBuff) return ERROR(memory_allocation);
-
-    memcpy(newBuff, samplesBuffer, sBuffSize);
-    ZDICT_fillNoise((char*)newBuff + sBuffSize, NOISELENGTH);   /* guard band, for end of buffer condition */
-
-    result =
-        ZDICT_trainFromBuffer_unsafe_legacy(dictBuffer, dictBufferCapacity, newBuff,
-                                            samplesSizes, nbSamples, params);
-    free(newBuff);
-    return result;
-}
-
-
-size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
-                             const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples)
-{
-    ZDICT_fastCover_params_t params;
-    DEBUGLOG(3, "ZDICT_trainFromBuffer");
-    memset(&params, 0, sizeof(params));
-    params.d = 8;
-    params.steps = 4;
-    /* Default to level 6 since no compression level information is available */
-    params.zParams.compressionLevel = 3;
-#if defined(DEBUGLEVEL) && (DEBUGLEVEL>=1)
-    params.zParams.notificationLevel = DEBUGLEVEL;
-#endif
-    return ZDICT_optimizeTrainFromBuffer_fastCover(dictBuffer, dictBufferCapacity,
-                                               samplesBuffer, samplesSizes, nbSamples,
-                                               &params);
-}
-
-size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
-                                  const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples)
-{
-    ZDICT_params_t params;
-    memset(&params, 0, sizeof(params));
-    return ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, dictBufferCapacity,
-                                                     samplesBuffer, samplesSizes, nbSamples,
-                                                     params);
-}
diff --git a/vendor/github.com/DataDog/zstd/zdict.h b/vendor/github.com/DataDog/zstd/zdict.h
deleted file mode 100644
index 37978ec..0000000
--- a/vendor/github.com/DataDog/zstd/zdict.h
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef DICTBUILDER_H_001
-#define DICTBUILDER_H_001
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-/*======  Dependencies  ======*/
-#include <stddef.h>  /* size_t */
-
-
-/* =====   ZDICTLIB_API : control library symbols visibility   ===== */
-#ifndef ZDICTLIB_VISIBILITY
-#  if defined(__GNUC__) && (__GNUC__ >= 4)
-#    define ZDICTLIB_VISIBILITY __attribute__ ((visibility ("default")))
-#  else
-#    define ZDICTLIB_VISIBILITY
-#  endif
-#endif
-#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
-#  define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBILITY
-#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
-#  define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
-#else
-#  define ZDICTLIB_API ZDICTLIB_VISIBILITY
-#endif
-
-
-/*! ZDICT_trainFromBuffer():
- *  Train a dictionary from an array of samples.
- *  Redirect towards ZDICT_optimizeTrainFromBuffer_fastCover() single-threaded, with d=8, steps=4,
- *  f=20, and accel=1.
- *  Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
- *  supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
- *  The resulting dictionary will be saved into `dictBuffer`.
- * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
- *          or an error code, which can be tested with ZDICT_isError().
- *  Note:  Dictionary training will fail if there are not enough samples to construct a
- *         dictionary, or if most of the samples are too small (< 8 bytes being the lower limit).
- *         If dictionary training fails, you should use zstd without a dictionary, as the dictionary
- *         would've been ineffective anyways. If you believe your samples would benefit from a dictionary
- *         please open an issue with details, and we can look into it.
- *  Note: ZDICT_trainFromBuffer()'s memory usage is about 6 MB.
- *  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
- *        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
- *        In general, it's recommended to provide a few thousands samples, though this can vary a lot.
- *        It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
- */
-ZDICTLIB_API size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
-                                    const void* samplesBuffer,
-                                    const size_t* samplesSizes, unsigned nbSamples);
-
-
-/*======   Helper functions   ======*/
-ZDICTLIB_API unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize);  /**< extracts dictID; @return zero if error (not a valid dictionary) */
-ZDICTLIB_API unsigned ZDICT_isError(size_t errorCode);
-ZDICTLIB_API const char* ZDICT_getErrorName(size_t errorCode);
-
-
-
-#ifdef ZDICT_STATIC_LINKING_ONLY
-
-/* ====================================================================================
- * The definitions in this section are considered experimental.
- * They should never be used with a dynamic library, as they may change in the future.
- * They are provided for advanced usages.
- * Use them only in association with static linking.
- * ==================================================================================== */
-
-typedef struct {
-    int      compressionLevel;   /* optimize for a specific zstd compression level; 0 means default */
-    unsigned notificationLevel;  /* Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */
-    unsigned dictID;             /* force dictID value; 0 means auto mode (32-bits random value) */
-} ZDICT_params_t;
-
-/*! ZDICT_cover_params_t:
- *  k and d are the only required parameters.
- *  For others, value 0 means default.
- */
-typedef struct {
-    unsigned k;                  /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */
-    unsigned d;                  /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */
-    unsigned steps;              /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */
-    unsigned nbThreads;          /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */
-    double splitPoint;           /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (1.0), 1.0 when all samples are used for both training and testing */
-    unsigned shrinkDict;         /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking  */
-    unsigned shrinkDictMaxRegression; /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */
-    ZDICT_params_t zParams;
-} ZDICT_cover_params_t;
-
-typedef struct {
-    unsigned k;                  /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */
-    unsigned d;                  /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */
-    unsigned f;                  /* log of size of frequency array : constraint: 0 < f <= 31 : 1 means default(20)*/
-    unsigned steps;              /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */
-    unsigned nbThreads;          /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */
-    double splitPoint;           /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (0.75), 1.0 when all samples are used for both training and testing */
-    unsigned accel;              /* Acceleration level: constraint: 0 < accel <= 10, higher means faster and less accurate, 0 means default(1) */
-    unsigned shrinkDict;         /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking  */
-    unsigned shrinkDictMaxRegression; /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */
-
-    ZDICT_params_t zParams;
-} ZDICT_fastCover_params_t;
-
-/*! ZDICT_trainFromBuffer_cover():
- *  Train a dictionary from an array of samples using the COVER algorithm.
- *  Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
- *  supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
- *  The resulting dictionary will be saved into `dictBuffer`.
- * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
- *          or an error code, which can be tested with ZDICT_isError().
- *          See ZDICT_trainFromBuffer() for details on failure modes.
- *  Note: ZDICT_trainFromBuffer_cover() requires about 9 bytes of memory for each input byte.
- *  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
- *        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
- *        In general, it's recommended to provide a few thousands samples, though this can vary a lot.
- *        It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
- */
-ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover(
-          void *dictBuffer, size_t dictBufferCapacity,
-    const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,
-          ZDICT_cover_params_t parameters);
-
-/*! ZDICT_optimizeTrainFromBuffer_cover():
- * The same requirements as above hold for all the parameters except `parameters`.
- * This function tries many parameter combinations and picks the best parameters.
- * `*parameters` is filled with the best parameters found,
- * dictionary constructed with those parameters is stored in `dictBuffer`.
- *
- * All of the parameters d, k, steps are optional.
- * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}.
- * if steps is zero it defaults to its default value.
- * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000].
- *
- * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
- *          or an error code, which can be tested with ZDICT_isError().
- *          On success `*parameters` contains the parameters selected.
- *          See ZDICT_trainFromBuffer() for details on failure modes.
- * Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread.
- */
-ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover(
-          void* dictBuffer, size_t dictBufferCapacity,
-    const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
-          ZDICT_cover_params_t* parameters);
-
-/*! ZDICT_trainFromBuffer_fastCover():
- *  Train a dictionary from an array of samples using a modified version of COVER algorithm.
- *  Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
- *  supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
- *  d and k are required.
- *  All other parameters are optional, will use default values if not provided
- *  The resulting dictionary will be saved into `dictBuffer`.
- * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
- *          or an error code, which can be tested with ZDICT_isError().
- *          See ZDICT_trainFromBuffer() for details on failure modes.
- *  Note: ZDICT_trainFromBuffer_fastCover() requires 6 * 2^f bytes of memory.
- *  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
- *        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
- *        In general, it's recommended to provide a few thousands samples, though this can vary a lot.
- *        It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
- */
-ZDICTLIB_API size_t ZDICT_trainFromBuffer_fastCover(void *dictBuffer,
-                    size_t dictBufferCapacity, const void *samplesBuffer,
-                    const size_t *samplesSizes, unsigned nbSamples,
-                    ZDICT_fastCover_params_t parameters);
-
-/*! ZDICT_optimizeTrainFromBuffer_fastCover():
- * The same requirements as above hold for all the parameters except `parameters`.
- * This function tries many parameter combinations (specifically, k and d combinations)
- * and picks the best parameters. `*parameters` is filled with the best parameters found,
- * dictionary constructed with those parameters is stored in `dictBuffer`.
- * All of the parameters d, k, steps, f, and accel are optional.
- * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}.
- * if steps is zero it defaults to its default value.
- * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000].
- * If f is zero, default value of 20 is used.
- * If accel is zero, default value of 1 is used.
- *
- * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
- *          or an error code, which can be tested with ZDICT_isError().
- *          On success `*parameters` contains the parameters selected.
- *          See ZDICT_trainFromBuffer() for details on failure modes.
- * Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread.
- */
-ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer,
-                    size_t dictBufferCapacity, const void* samplesBuffer,
-                    const size_t* samplesSizes, unsigned nbSamples,
-                    ZDICT_fastCover_params_t* parameters);
-
-/*! ZDICT_finalizeDictionary():
- * Given a custom content as a basis for dictionary, and a set of samples,
- * finalize dictionary by adding headers and statistics.
- *
- * Samples must be stored concatenated in a flat buffer `samplesBuffer`,
- * supplied with an array of sizes `samplesSizes`, providing the size of each sample in order.
- *
- * dictContentSize must be >= ZDICT_CONTENTSIZE_MIN bytes.
- * maxDictSize must be >= dictContentSize, and must be >= ZDICT_DICTSIZE_MIN bytes.
- *
- * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`),
- *          or an error code, which can be tested by ZDICT_isError().
- * Note: ZDICT_finalizeDictionary() will push notifications into stderr if instructed to, using notificationLevel>0.
- * Note 2: dictBuffer and dictContent can overlap
- */
-#define ZDICT_CONTENTSIZE_MIN 128
-#define ZDICT_DICTSIZE_MIN    256
-ZDICTLIB_API size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,
-                                const void* dictContent, size_t dictContentSize,
-                                const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
-                                ZDICT_params_t parameters);
-
-typedef struct {
-    unsigned selectivityLevel;   /* 0 means default; larger => select more => larger dictionary */
-    ZDICT_params_t zParams;
-} ZDICT_legacy_params_t;
-
-/*! ZDICT_trainFromBuffer_legacy():
- *  Train a dictionary from an array of samples.
- *  Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
- *  supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
- *  The resulting dictionary will be saved into `dictBuffer`.
- * `parameters` is optional and can be provided with values set to 0 to mean "default".
- * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
- *          or an error code, which can be tested with ZDICT_isError().
- *          See ZDICT_trainFromBuffer() for details on failure modes.
- *  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
- *        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
- *        In general, it's recommended to provide a few thousands samples, though this can vary a lot.
- *        It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
- *  Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0.
- */
-ZDICTLIB_API size_t ZDICT_trainFromBuffer_legacy(
-    void *dictBuffer, size_t dictBufferCapacity,
-    const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,
-    ZDICT_legacy_params_t parameters);
-
-/* Deprecation warnings */
-/* It is generally possible to disable deprecation warnings from compiler,
-   for example with -Wno-deprecated-declarations for gcc
-   or _CRT_SECURE_NO_WARNINGS in Visual.
-   Otherwise, it's also possible to manually define ZDICT_DISABLE_DEPRECATE_WARNINGS */
-#ifdef ZDICT_DISABLE_DEPRECATE_WARNINGS
-#  define ZDICT_DEPRECATED(message) ZDICTLIB_API   /* disable deprecation warnings */
-#else
-#  define ZDICT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
-#  if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
-#    define ZDICT_DEPRECATED(message) [[deprecated(message)]] ZDICTLIB_API
-#  elif (ZDICT_GCC_VERSION >= 405) || defined(__clang__)
-#    define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated(message)))
-#  elif (ZDICT_GCC_VERSION >= 301)
-#    define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated))
-#  elif defined(_MSC_VER)
-#    define ZDICT_DEPRECATED(message) ZDICTLIB_API __declspec(deprecated(message))
-#  else
-#    pragma message("WARNING: You need to implement ZDICT_DEPRECATED for this compiler")
-#    define ZDICT_DEPRECATED(message) ZDICTLIB_API
-#  endif
-#endif /* ZDICT_DISABLE_DEPRECATE_WARNINGS */
-
-ZDICT_DEPRECATED("use ZDICT_finalizeDictionary() instead")
-size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
-                                  const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples);
-
-
-#endif   /* ZDICT_STATIC_LINKING_ONLY */
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif   /* DICTBUILDER_H_001 */
diff --git a/vendor/github.com/DataDog/zstd/zstd.go b/vendor/github.com/DataDog/zstd/zstd.go
deleted file mode 100644
index b6af4eb..0000000
--- a/vendor/github.com/DataDog/zstd/zstd.go
+++ /dev/null
@@ -1,147 +0,0 @@
-package zstd
-
-/*
-#define ZSTD_STATIC_LINKING_ONLY
-#include "zstd.h"
-#include "stdint.h"  // for uintptr_t
-
-// The following *_wrapper function are used for removing superflouos
-// memory allocations when calling the wrapped functions from Go code.
-// See https://github.com/golang/go/issues/24450 for details.
-
-static size_t ZSTD_compress_wrapper(uintptr_t dst, size_t maxDstSize, const uintptr_t src, size_t srcSize, int compressionLevel) {
-	return ZSTD_compress((void*)dst, maxDstSize, (const void*)src, srcSize, compressionLevel);
-}
-
-static size_t ZSTD_decompress_wrapper(uintptr_t dst, size_t maxDstSize, uintptr_t src, size_t srcSize) {
-	return ZSTD_decompress((void*)dst, maxDstSize, (const void *)src, srcSize);
-}
-
-*/
-import "C"
-import (
-	"bytes"
-	"errors"
-	"io/ioutil"
-	"runtime"
-	"unsafe"
-)
-
-// Defines best and standard values for zstd cli
-const (
-	BestSpeed          = 1
-	BestCompression    = 20
-	DefaultCompression = 5
-)
-
-var (
-	// ErrEmptySlice is returned when there is nothing to compress
-	ErrEmptySlice = errors.New("Bytes slice is empty")
-)
-
-// CompressBound returns the worst case size needed for a destination buffer,
-// which can be used to preallocate a destination buffer or select a previously
-// allocated buffer from a pool.
-// See zstd.h to mirror implementation of ZSTD_COMPRESSBOUND
-func CompressBound(srcSize int) int {
-	lowLimit := 128 << 10 // 128 kB
-	var margin int
-	if srcSize < lowLimit {
-		margin = (lowLimit - srcSize) >> 11
-	}
-	return srcSize + (srcSize >> 8) + margin
-}
-
-// cCompressBound is a cgo call to check the go implementation above against the c code.
-func cCompressBound(srcSize int) int {
-	return int(C.ZSTD_compressBound(C.size_t(srcSize)))
-}
-
-// Compress src into dst.  If you have a buffer to use, you can pass it to
-// prevent allocation.  If it is too small, or if nil is passed, a new buffer
-// will be allocated and returned.
-func Compress(dst, src []byte) ([]byte, error) {
-	return CompressLevel(dst, src, DefaultCompression)
-}
-
-// CompressLevel is the same as Compress but you can pass a compression level
-func CompressLevel(dst, src []byte, level int) ([]byte, error) {
-	bound := CompressBound(len(src))
-	if cap(dst) >= bound {
-		dst = dst[0:bound] // Reuse dst buffer
-	} else {
-		dst = make([]byte, bound)
-	}
-
-	srcPtr := C.uintptr_t(uintptr(0)) // Do not point anywhere, if src is empty
-	if len(src) > 0 {
-		srcPtr = C.uintptr_t(uintptr(unsafe.Pointer(&src[0])))
-	}
-
-	cWritten := C.ZSTD_compress_wrapper(
-		C.uintptr_t(uintptr(unsafe.Pointer(&dst[0]))),
-		C.size_t(len(dst)),
-		srcPtr,
-		C.size_t(len(src)),
-		C.int(level))
-
-	runtime.KeepAlive(src)
-	written := int(cWritten)
-	// Check if the return is an Error code
-	if err := getError(written); err != nil {
-		return nil, err
-	}
-	return dst[:written], nil
-}
-
-// Decompress src into dst.  If you have a buffer to use, you can pass it to
-// prevent allocation.  If it is too small, or if nil is passed, a new buffer
-// will be allocated and returned.
-func Decompress(dst, src []byte) ([]byte, error) {
-	if len(src) == 0 {
-		return []byte{}, ErrEmptySlice
-	}
-	decompress := func(dst, src []byte) ([]byte, error) {
-
-		cWritten := C.ZSTD_decompress_wrapper(
-			C.uintptr_t(uintptr(unsafe.Pointer(&dst[0]))),
-			C.size_t(len(dst)),
-			C.uintptr_t(uintptr(unsafe.Pointer(&src[0]))),
-			C.size_t(len(src)))
-
-		runtime.KeepAlive(src)
-		written := int(cWritten)
-		// Check error
-		if err := getError(written); err != nil {
-			return nil, err
-		}
-		return dst[:written], nil
-	}
-
-	if len(dst) == 0 {
-		// Attempt to use zStd to determine decompressed size (may result in error or 0)
-		size := int(C.size_t(C.ZSTD_getDecompressedSize(unsafe.Pointer(&src[0]), C.size_t(len(src)))))
-
-		if err := getError(size); err != nil {
-			return nil, err
-		}
-
-		if size > 0 {
-			dst = make([]byte, size)
-		} else {
-			dst = make([]byte, len(src)*3) // starting guess
-		}
-	}
-	for i := 0; i < 3; i++ { // 3 tries to allocate a bigger buffer
-		result, err := decompress(dst, src)
-		if !IsDstSizeTooSmallError(err) {
-			return result, err
-		}
-		dst = make([]byte, len(dst)*2) // Grow buffer by 2
-	}
-
-	// We failed getting a dst buffer of correct size, use stream API
-	r := NewReader(bytes.NewReader(src))
-	defer r.Close()
-	return ioutil.ReadAll(r)
-}
diff --git a/vendor/github.com/DataDog/zstd/zstd.h b/vendor/github.com/DataDog/zstd/zstd.h
deleted file mode 100644
index a1910ee..0000000
--- a/vendor/github.com/DataDog/zstd/zstd.h
+++ /dev/null
@@ -1,1945 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-#ifndef ZSTD_H_235446
-#define ZSTD_H_235446
-
-/* ======   Dependency   ======*/
-#include <stddef.h>   /* size_t */
-
-
-/* =====   ZSTDLIB_API : control library symbols visibility   ===== */
-#ifndef ZSTDLIB_VISIBILITY
-#  if defined(__GNUC__) && (__GNUC__ >= 4)
-#    define ZSTDLIB_VISIBILITY __attribute__ ((visibility ("default")))
-#  else
-#    define ZSTDLIB_VISIBILITY
-#  endif
-#endif
-#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
-#  define ZSTDLIB_API __declspec(dllexport) ZSTDLIB_VISIBILITY
-#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
-#  define ZSTDLIB_API __declspec(dllimport) ZSTDLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
-#else
-#  define ZSTDLIB_API ZSTDLIB_VISIBILITY
-#endif
-
-
-/*******************************************************************************
-  Introduction
-
-  zstd, short for Zstandard, is a fast lossless compression algorithm, targeting
-  real-time compression scenarios at zlib-level and better compression ratios.
-  The zstd compression library provides in-memory compression and decompression
-  functions.
-
-  The library supports regular compression levels from 1 up to ZSTD_maxCLevel(),
-  which is currently 22. Levels >= 20, labeled `--ultra`, should be used with
-  caution, as they require more memory. The library also offers negative
-  compression levels, which extend the range of speed vs. ratio preferences.
-  The lower the level, the faster the speed (at the cost of compression).
-
-  Compression can be done in:
-    - a single step (described as Simple API)
-    - a single step, reusing a context (described as Explicit context)
-    - unbounded multiple steps (described as Streaming compression)
-
-  The compression ratio achievable on small data can be highly improved using
-  a dictionary. Dictionary compression can be performed in:
-    - a single step (described as Simple dictionary API)
-    - a single step, reusing a dictionary (described as Bulk-processing
-      dictionary API)
-
-  Advanced experimental functions can be accessed using
-  `#define ZSTD_STATIC_LINKING_ONLY` before including zstd.h.
-
-  Advanced experimental APIs should never be used with a dynamically-linked
-  library. They are not "stable"; their definitions or signatures may change in
-  the future. Only static linking is allowed.
-*******************************************************************************/
-
-/*------   Version   ------*/
-#define ZSTD_VERSION_MAJOR    1
-#define ZSTD_VERSION_MINOR    4
-#define ZSTD_VERSION_RELEASE  1
-
-#define ZSTD_VERSION_NUMBER  (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
-ZSTDLIB_API unsigned ZSTD_versionNumber(void);   /**< to check runtime library version */
-
-#define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE
-#define ZSTD_QUOTE(str) #str
-#define ZSTD_EXPAND_AND_QUOTE(str) ZSTD_QUOTE(str)
-#define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION)
-ZSTDLIB_API const char* ZSTD_versionString(void);   /* requires v1.3.0+ */
-
-/* *************************************
- *  Default constant
- ***************************************/
-#ifndef ZSTD_CLEVEL_DEFAULT
-#  define ZSTD_CLEVEL_DEFAULT 3
-#endif
-
-/* *************************************
- *  Constants
- ***************************************/
-
-/* All magic numbers are supposed read/written to/from files/memory using little-endian convention */
-#define ZSTD_MAGICNUMBER            0xFD2FB528    /* valid since v0.8.0 */
-#define ZSTD_MAGIC_DICTIONARY       0xEC30A437    /* valid since v0.7.0 */
-#define ZSTD_MAGIC_SKIPPABLE_START  0x184D2A50    /* all 16 values, from 0x184D2A50 to 0x184D2A5F, signal the beginning of a skippable frame */
-#define ZSTD_MAGIC_SKIPPABLE_MASK   0xFFFFFFF0
-
-#define ZSTD_BLOCKSIZELOG_MAX  17
-#define ZSTD_BLOCKSIZE_MAX     (1<<ZSTD_BLOCKSIZELOG_MAX)
-
-
-
-/***************************************
-*  Simple API
-***************************************/
-/*! ZSTD_compress() :
- *  Compresses `src` content as a single zstd compressed frame into already allocated `dst`.
- *  Hint : compression runs faster if `dstCapacity` >=  `ZSTD_compressBound(srcSize)`.
- *  @return : compressed size written into `dst` (<= `dstCapacity),
- *            or an error code if it fails (which can be tested using ZSTD_isError()). */
-ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity,
-                            const void* src, size_t srcSize,
-                                  int compressionLevel);
-
-/*! ZSTD_decompress() :
- *  `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.
- *  `dstCapacity` is an upper bound of originalSize to regenerate.
- *  If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.
- *  @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
- *            or an errorCode if it fails (which can be tested using ZSTD_isError()). */
-ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity,
-                              const void* src, size_t compressedSize);
-
-/*! ZSTD_getFrameContentSize() : requires v1.3.0+
- *  `src` should point to the start of a ZSTD encoded frame.
- *  `srcSize` must be at least as large as the frame header.
- *            hint : any size >= `ZSTD_frameHeaderSize_max` is large enough.
- *  @return : - decompressed size of `src` frame content, if known
- *            - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
- *            - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
- *   note 1 : a 0 return value means the frame is valid but "empty".
- *   note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode.
- *            When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
- *            In which case, it's necessary to use streaming mode to decompress data.
- *            Optionally, application can rely on some implicit limit,
- *            as ZSTD_decompress() only needs an upper bound of decompressed size.
- *            (For example, data could be necessarily cut into blocks <= 16 KB).
- *   note 3 : decompressed size is always present when compression is completed using single-pass functions,
- *            such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict().
- *   note 4 : decompressed size can be very large (64-bits value),
- *            potentially larger than what local system can handle as a single memory segment.
- *            In which case, it's necessary to use streaming mode to decompress data.
- *   note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified.
- *            Always ensure return value fits within application's authorized limits.
- *            Each application can set its own limits.
- *   note 6 : This function replaces ZSTD_getDecompressedSize() */
-#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
-#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
-ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
-
-/*! ZSTD_getDecompressedSize() :
- *  NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize().
- *  Both functions work the same way, but ZSTD_getDecompressedSize() blends
- *  "empty", "unknown" and "error" results to the same return value (0),
- *  while ZSTD_getFrameContentSize() gives them separate return values.
- * @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */
-ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
-
-/*! ZSTD_findFrameCompressedSize() :
- * `src` should point to the start of a ZSTD frame or skippable frame.
- * `srcSize` must be >= first frame size
- * @return : the compressed size of the first frame starting at `src`,
- *           suitable to pass as `srcSize` to `ZSTD_decompress` or similar,
- *        or an error code if input is invalid */
-ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize);
-
-
-/*======  Helper functions  ======*/
-#define ZSTD_COMPRESSBOUND(srcSize)   ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0))  /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
-ZSTDLIB_API size_t      ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
-ZSTDLIB_API unsigned    ZSTD_isError(size_t code);          /*!< tells if a `size_t` function result is an error code */
-ZSTDLIB_API const char* ZSTD_getErrorName(size_t code);     /*!< provides readable string from an error code */
-ZSTDLIB_API int         ZSTD_minCLevel(void);               /*!< minimum negative compression level allowed */
-ZSTDLIB_API int         ZSTD_maxCLevel(void);               /*!< maximum compression level available */
-
-
-/***************************************
-*  Explicit context
-***************************************/
-/*= Compression context
- *  When compressing many times,
- *  it is recommended to allocate a context just once,
- *  and re-use it for each successive compression operation.
- *  This will make workload friendlier for system's memory.
- *  Note : re-using context is just a speed / resource optimization.
- *         It doesn't change the compression ratio, which remains identical.
- *  Note 2 : In multi-threaded environments,
- *         use one different context per thread for parallel execution.
- */
-typedef struct ZSTD_CCtx_s ZSTD_CCtx;
-ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void);
-ZSTDLIB_API size_t     ZSTD_freeCCtx(ZSTD_CCtx* cctx);
-
-/*! ZSTD_compressCCtx() :
- *  Same as ZSTD_compress(), using an explicit ZSTD_CCtx
- *  The function will compress at requested compression level,
- *  ignoring any other parameter */
-ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
-                                     void* dst, size_t dstCapacity,
-                               const void* src, size_t srcSize,
-                                     int compressionLevel);
-
-/*= Decompression context
- *  When decompressing many times,
- *  it is recommended to allocate a context only once,
- *  and re-use it for each successive compression operation.
- *  This will make workload friendlier for system's memory.
- *  Use one context per thread for parallel execution. */
-typedef struct ZSTD_DCtx_s ZSTD_DCtx;
-ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx(void);
-ZSTDLIB_API size_t     ZSTD_freeDCtx(ZSTD_DCtx* dctx);
-
-/*! ZSTD_decompressDCtx() :
- *  Same as ZSTD_decompress(),
- *  requires an allocated ZSTD_DCtx.
- *  Compatible with sticky parameters.
- */
-ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx,
-                                       void* dst, size_t dstCapacity,
-                                 const void* src, size_t srcSize);
-
-
-/***************************************
-*  Advanced compression API
-***************************************/
-
-/* API design :
- *   Parameters are pushed one by one into an existing context,
- *   using ZSTD_CCtx_set*() functions.
- *   Pushed parameters are sticky : they are valid for next compressed frame, and any subsequent frame.
- *   "sticky" parameters are applicable to `ZSTD_compress2()` and `ZSTD_compressStream*()` !
- *   They do not apply to "simple" one-shot variants such as ZSTD_compressCCtx()
- *
- *   It's possible to reset all parameters to "default" using ZSTD_CCtx_reset().
- *
- *   This API supercedes all other "advanced" API entry points in the experimental section.
- *   In the future, we expect to remove from experimental API entry points which are redundant with this API.
- */
-
-
-/* Compression strategies, listed from fastest to strongest */
-typedef enum { ZSTD_fast=1,
-               ZSTD_dfast=2,
-               ZSTD_greedy=3,
-               ZSTD_lazy=4,
-               ZSTD_lazy2=5,
-               ZSTD_btlazy2=6,
-               ZSTD_btopt=7,
-               ZSTD_btultra=8,
-               ZSTD_btultra2=9
-               /* note : new strategies _might_ be added in the future.
-                         Only the order (from fast to strong) is guaranteed */
-} ZSTD_strategy;
-
-
-typedef enum {
-
-    /* compression parameters
-     * Note: When compressing with a ZSTD_CDict these parameters are superseded
-     * by the parameters used to construct the ZSTD_CDict. See ZSTD_CCtx_refCDict()
-     * for more info (superseded-by-cdict). */
-    ZSTD_c_compressionLevel=100, /* Update all compression parameters according to pre-defined cLevel table
-                              * Default level is ZSTD_CLEVEL_DEFAULT==3.
-                              * Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT.
-                              * Note 1 : it's possible to pass a negative compression level.
-                              * Note 2 : setting a level sets all default values of other compression parameters */
-    ZSTD_c_windowLog=101,    /* Maximum allowed back-reference distance, expressed as power of 2.
-                              * Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX.
-                              * Special: value 0 means "use default windowLog".
-                              * Note: Using a windowLog greater than ZSTD_WINDOWLOG_LIMIT_DEFAULT
-                              *       requires explicitly allowing such window size at decompression stage if using streaming. */
-    ZSTD_c_hashLog=102,      /* Size of the initial probe table, as a power of 2.
-                              * Resulting memory usage is (1 << (hashLog+2)).
-                              * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX.
-                              * Larger tables improve compression ratio of strategies <= dFast,
-                              * and improve speed of strategies > dFast.
-                              * Special: value 0 means "use default hashLog". */
-    ZSTD_c_chainLog=103,     /* Size of the multi-probe search table, as a power of 2.
-                              * Resulting memory usage is (1 << (chainLog+2)).
-                              * Must be clamped between ZSTD_CHAINLOG_MIN and ZSTD_CHAINLOG_MAX.
-                              * Larger tables result in better and slower compression.
-                              * This parameter is useless when using "fast" strategy.
-                              * It's still useful when using "dfast" strategy,
-                              * in which case it defines a secondary probe table.
-                              * Special: value 0 means "use default chainLog". */
-    ZSTD_c_searchLog=104,    /* Number of search attempts, as a power of 2.
-                              * More attempts result in better and slower compression.
-                              * This parameter is useless when using "fast" and "dFast" strategies.
-                              * Special: value 0 means "use default searchLog". */
-    ZSTD_c_minMatch=105,     /* Minimum size of searched matches.
-                              * Note that Zstandard can still find matches of smaller size,
-                              * it just tweaks its search algorithm to look for this size and larger.
-                              * Larger values increase compression and decompression speed, but decrease ratio.
-                              * Must be clamped between ZSTD_MINMATCH_MIN and ZSTD_MINMATCH_MAX.
-                              * Note that currently, for all strategies < btopt, effective minimum is 4.
-                              *                    , for all strategies > fast, effective maximum is 6.
-                              * Special: value 0 means "use default minMatchLength". */
-    ZSTD_c_targetLength=106, /* Impact of this field depends on strategy.
-                              * For strategies btopt, btultra & btultra2:
-                              *     Length of Match considered "good enough" to stop search.
-                              *     Larger values make compression stronger, and slower.
-                              * For strategy fast:
-                              *     Distance between match sampling.
-                              *     Larger values make compression faster, and weaker.
-                              * Special: value 0 means "use default targetLength". */
-    ZSTD_c_strategy=107,     /* See ZSTD_strategy enum definition.
-                              * The higher the value of selected strategy, the more complex it is,
-                              * resulting in stronger and slower compression.
-                              * Special: value 0 means "use default strategy". */
-
-    /* LDM mode parameters */
-    ZSTD_c_enableLongDistanceMatching=160, /* Enable long distance matching.
-                                     * This parameter is designed to improve compression ratio
-                                     * for large inputs, by finding large matches at long distance.
-                                     * It increases memory usage and window size.
-                                     * Note: enabling this parameter increases default ZSTD_c_windowLog to 128 MB
-                                     * except when expressly set to a different value. */
-    ZSTD_c_ldmHashLog=161,   /* Size of the table for long distance matching, as a power of 2.
-                              * Larger values increase memory usage and compression ratio,
-                              * but decrease compression speed.
-                              * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX
-                              * default: windowlog - 7.
-                              * Special: value 0 means "automatically determine hashlog". */
-    ZSTD_c_ldmMinMatch=162,  /* Minimum match size for long distance matcher.
-                              * Larger/too small values usually decrease compression ratio.
-                              * Must be clamped between ZSTD_LDM_MINMATCH_MIN and ZSTD_LDM_MINMATCH_MAX.
-                              * Special: value 0 means "use default value" (default: 64). */
-    ZSTD_c_ldmBucketSizeLog=163, /* Log size of each bucket in the LDM hash table for collision resolution.
-                              * Larger values improve collision resolution but decrease compression speed.
-                              * The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX.
-                              * Special: value 0 means "use default value" (default: 3). */
-    ZSTD_c_ldmHashRateLog=164, /* Frequency of inserting/looking up entries into the LDM hash table.
-                              * Must be clamped between 0 and (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN).
-                              * Default is MAX(0, (windowLog - ldmHashLog)), optimizing hash table usage.
-                              * Larger values improve compression speed.
-                              * Deviating far from default value will likely result in a compression ratio decrease.
-                              * Special: value 0 means "automatically determine hashRateLog". */
-
-    /* frame parameters */
-    ZSTD_c_contentSizeFlag=200, /* Content size will be written into frame header _whenever known_ (default:1)
-                              * Content size must be known at the beginning of compression.
-                              * This is automatically the case when using ZSTD_compress2(),
-                              * For streaming variants, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */
-    ZSTD_c_checksumFlag=201, /* A 32-bits checksum of content is written at end of frame (default:0) */
-    ZSTD_c_dictIDFlag=202,   /* When applicable, dictionary's ID is written into frame header (default:1) */
-
-    /* multi-threading parameters */
-    /* These parameters are only useful if multi-threading is enabled (compiled with build macro ZSTD_MULTITHREAD).
-     * They return an error otherwise. */
-    ZSTD_c_nbWorkers=400,    /* Select how many threads will be spawned to compress in parallel.
-                              * When nbWorkers >= 1, triggers asynchronous mode when used with ZSTD_compressStream*() :
-                              * ZSTD_compressStream*() consumes input and flush output if possible, but immediately gives back control to caller,
-                              * while compression work is performed in parallel, within worker threads.
-                              * (note : a strong exception to this rule is when first invocation of ZSTD_compressStream2() sets ZSTD_e_end :
-                              *  in which case, ZSTD_compressStream2() delegates to ZSTD_compress2(), which is always a blocking call).
-                              * More workers improve speed, but also increase memory usage.
-                              * Default value is `0`, aka "single-threaded mode" : no worker is spawned, compression is performed inside Caller's thread, all invocations are blocking */
-    ZSTD_c_jobSize=401,      /* Size of a compression job. This value is enforced only when nbWorkers >= 1.
-                              * Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads.
-                              * 0 means default, which is dynamically determined based on compression parameters.
-                              * Job size must be a minimum of overlap size, or 1 MB, whichever is largest.
-                              * The minimum size is automatically and transparently enforced */
-    ZSTD_c_overlapLog=402,   /* Control the overlap size, as a fraction of window size.
-                              * The overlap size is an amount of data reloaded from previous job at the beginning of a new job.
-                              * It helps preserve compression ratio, while each job is compressed in parallel.
-                              * This value is enforced only when nbWorkers >= 1.
-                              * Larger values increase compression ratio, but decrease speed.
-                              * Possible values range from 0 to 9 :
-                              * - 0 means "default" : value will be determined by the library, depending on strategy
-                              * - 1 means "no overlap"
-                              * - 9 means "full overlap", using a full window size.
-                              * Each intermediate rank increases/decreases load size by a factor 2 :
-                              * 9: full window;  8: w/2;  7: w/4;  6: w/8;  5:w/16;  4: w/32;  3:w/64;  2:w/128;  1:no overlap;  0:default
-                              * default value varies between 6 and 9, depending on strategy */
-
-    /* note : additional experimental parameters are also available
-     * within the experimental section of the API.
-     * At the time of this writing, they include :
-     * ZSTD_c_rsyncable
-     * ZSTD_c_format
-     * ZSTD_c_forceMaxWindow
-     * ZSTD_c_forceAttachDict
-     * ZSTD_c_literalCompressionMode
-     * ZSTD_c_targetCBlockSize
-     * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
-     * note : never ever use experimentalParam? names directly;
-     *        also, the enums values themselves are unstable and can still change.
-     */
-     ZSTD_c_experimentalParam1=500,
-     ZSTD_c_experimentalParam2=10,
-     ZSTD_c_experimentalParam3=1000,
-     ZSTD_c_experimentalParam4=1001,
-     ZSTD_c_experimentalParam5=1002,
-     ZSTD_c_experimentalParam6=1003,
-} ZSTD_cParameter;
-
-typedef struct {
-    size_t error;
-    int lowerBound;
-    int upperBound;
-} ZSTD_bounds;
-
-/*! ZSTD_cParam_getBounds() :
- *  All parameters must belong to an interval with lower and upper bounds,
- *  otherwise they will either trigger an error or be automatically clamped.
- * @return : a structure, ZSTD_bounds, which contains
- *         - an error status field, which must be tested using ZSTD_isError()
- *         - lower and upper bounds, both inclusive
- */
-ZSTDLIB_API ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter cParam);
-
-/*! ZSTD_CCtx_setParameter() :
- *  Set one compression parameter, selected by enum ZSTD_cParameter.
- *  All parameters have valid bounds. Bounds can be queried using ZSTD_cParam_getBounds().
- *  Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).
- *  Setting a parameter is generally only possible during frame initialization (before starting compression).
- *  Exception : when using multi-threading mode (nbWorkers >= 1),
- *              the following parameters can be updated _during_ compression (within same frame):
- *              => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy.
- *              new parameters will be active for next job only (after a flush()).
- * @return : an error code (which can be tested using ZSTD_isError()).
- */
-ZSTDLIB_API size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value);
-
-/*! ZSTD_CCtx_setPledgedSrcSize() :
- *  Total input data size to be compressed as a single frame.
- *  Value will be written in frame header, unless if explicitly forbidden using ZSTD_c_contentSizeFlag.
- *  This value will also be controlled at end of frame, and trigger an error if not respected.
- * @result : 0, or an error code (which can be tested with ZSTD_isError()).
- *  Note 1 : pledgedSrcSize==0 actually means zero, aka an empty frame.
- *           In order to mean "unknown content size", pass constant ZSTD_CONTENTSIZE_UNKNOWN.
- *           ZSTD_CONTENTSIZE_UNKNOWN is default value for any new frame.
- *  Note 2 : pledgedSrcSize is only valid once, for the next frame.
- *           It's discarded at the end of the frame, and replaced by ZSTD_CONTENTSIZE_UNKNOWN.
- *  Note 3 : Whenever all input data is provided and consumed in a single round,
- *           for example with ZSTD_compress2(),
- *           or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end),
- *           this value is automatically overridden by srcSize instead.
- */
-ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize);
-
-typedef enum {
-    ZSTD_reset_session_only = 1,
-    ZSTD_reset_parameters = 2,
-    ZSTD_reset_session_and_parameters = 3
-} ZSTD_ResetDirective;
-
-/*! ZSTD_CCtx_reset() :
- *  There are 2 different things that can be reset, independently or jointly :
- *  - The session : will stop compressing current frame, and make CCtx ready to start a new one.
- *                  Useful after an error, or to interrupt any ongoing compression.
- *                  Any internal data not yet flushed is cancelled.
- *                  Compression parameters and dictionary remain unchanged.
- *                  They will be used to compress next frame.
- *                  Resetting session never fails.
- *  - The parameters : changes all parameters back to "default".
- *                  This removes any reference to any dictionary too.
- *                  Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing)
- *                  otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())
- *  - Both : similar to resetting the session, followed by resetting parameters.
- */
-ZSTDLIB_API size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset);
-
-/*! ZSTD_compress2() :
- *  Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API.
- *  ZSTD_compress2() always starts a new frame.
- *  Should cctx hold data from a previously unfinished frame, everything about it is forgotten.
- *  - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
- *  - The function is always blocking, returns when compression is completed.
- *  Hint : compression runs faster if `dstCapacity` >=  `ZSTD_compressBound(srcSize)`.
- * @return : compressed size written into `dst` (<= `dstCapacity),
- *           or an error code if it fails (which can be tested using ZSTD_isError()).
- */
-ZSTDLIB_API size_t ZSTD_compress2( ZSTD_CCtx* cctx,
-                                   void* dst, size_t dstCapacity,
-                             const void* src, size_t srcSize);
-
-
-/***************************************
-*  Advanced decompression API
-***************************************/
-
-/* The advanced API pushes parameters one by one into an existing DCtx context.
- * Parameters are sticky, and remain valid for all following frames
- * using the same DCtx context.
- * It's possible to reset parameters to default values using ZSTD_DCtx_reset().
- * Note : This API is compatible with existing ZSTD_decompressDCtx() and ZSTD_decompressStream().
- *        Therefore, no new decompression function is necessary.
- */
-
-typedef enum {
-
-    ZSTD_d_windowLogMax=100, /* Select a size limit (in power of 2) beyond which
-                              * the streaming API will refuse to allocate memory buffer
-                              * in order to protect the host from unreasonable memory requirements.
-                              * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.
-                              * By default, a decompression context accepts window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT).
-                              * Special: value 0 means "use default maximum windowLog". */
-
-    /* note : additional experimental parameters are also available
-     * within the experimental section of the API.
-     * At the time of this writing, they include :
-     * ZSTD_c_format
-     * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
-     * note : never ever use experimentalParam? names directly
-     */
-     ZSTD_d_experimentalParam1=1000
-
-} ZSTD_dParameter;
-
-/*! ZSTD_dParam_getBounds() :
- *  All parameters must belong to an interval with lower and upper bounds,
- *  otherwise they will either trigger an error or be automatically clamped.
- * @return : a structure, ZSTD_bounds, which contains
- *         - an error status field, which must be tested using ZSTD_isError()
- *         - both lower and upper bounds, inclusive
- */
-ZSTDLIB_API ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam);
-
-/*! ZSTD_DCtx_setParameter() :
- *  Set one compression parameter, selected by enum ZSTD_dParameter.
- *  All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds().
- *  Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).
- *  Setting a parameter is only possible during frame initialization (before starting decompression).
- * @return : 0, or an error code (which can be tested using ZSTD_isError()).
- */
-ZSTDLIB_API size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int value);
-
-/*! ZSTD_DCtx_reset() :
- *  Return a DCtx to clean state.
- *  Session and parameters can be reset jointly or separately.
- *  Parameters can only be reset when no active frame is being decompressed.
- * @return : 0, or an error code, which can be tested with ZSTD_isError()
- */
-ZSTDLIB_API size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset);
-
-
-/****************************
-*  Streaming
-****************************/
-
-typedef struct ZSTD_inBuffer_s {
-  const void* src;    /**< start of input buffer */
-  size_t size;        /**< size of input buffer */
-  size_t pos;         /**< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */
-} ZSTD_inBuffer;
-
-typedef struct ZSTD_outBuffer_s {
-  void*  dst;         /**< start of output buffer */
-  size_t size;        /**< size of output buffer */
-  size_t pos;         /**< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */
-} ZSTD_outBuffer;
-
-
-
-/*-***********************************************************************
-*  Streaming compression - HowTo
-*
-*  A ZSTD_CStream object is required to track streaming operation.
-*  Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources.
-*  ZSTD_CStream objects can be reused multiple times on consecutive compression operations.
-*  It is recommended to re-use ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory.
-*
-*  For parallel execution, use one separate ZSTD_CStream per thread.
-*
-*  note : since v1.3.0, ZSTD_CStream and ZSTD_CCtx are the same thing.
-*
-*  Parameters are sticky : when starting a new compression on the same context,
-*  it will re-use the same sticky parameters as previous compression session.
-*  When in doubt, it's recommended to fully initialize the context before usage.
-*  Use ZSTD_CCtx_reset() to reset the context and ZSTD_CCtx_setParameter(),
-*  ZSTD_CCtx_setPledgedSrcSize(), or ZSTD_CCtx_loadDictionary() and friends to
-*  set more specific parameters, the pledged source size, or load a dictionary.
-*
-*  Use ZSTD_compressStream2() with ZSTD_e_continue as many times as necessary to
-*  consume input stream. The function will automatically update both `pos`
-*  fields within `input` and `output`.
-*  Note that the function may not consume the entire input, for example, because
-*  the output buffer is already full, in which case `input.pos < input.size`.
-*  The caller must check if input has been entirely consumed.
-*  If not, the caller must make some room to receive more compressed data,
-*  and then present again remaining input data.
-*  note: ZSTD_e_continue is guaranteed to make some forward progress when called,
-*        but doesn't guarantee maximal forward progress. This is especially relevant
-*        when compressing with multiple threads. The call won't block if it can
-*        consume some input, but if it can't it will wait for some, but not all,
-*        output to be flushed.
-* @return : provides a minimum amount of data remaining to be flushed from internal buffers
-*           or an error code, which can be tested using ZSTD_isError().
-*
-*  At any moment, it's possible to flush whatever data might remain stuck within internal buffer,
-*  using ZSTD_compressStream2() with ZSTD_e_flush. `output->pos` will be updated.
-*  Note that, if `output->size` is too small, a single invocation with ZSTD_e_flush might not be enough (return code > 0).
-*  In which case, make some room to receive more compressed data, and call again ZSTD_compressStream2() with ZSTD_e_flush.
-*  You must continue calling ZSTD_compressStream2() with ZSTD_e_flush until it returns 0, at which point you can change the
-*  operation.
-*  note: ZSTD_e_flush will flush as much output as possible, meaning when compressing with multiple threads, it will
-*        block until the flush is complete or the output buffer is full.
-*  @return : 0 if internal buffers are entirely flushed,
-*            >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
-*            or an error code, which can be tested using ZSTD_isError().
-*
-*  Calling ZSTD_compressStream2() with ZSTD_e_end instructs to finish a frame.
-*  It will perform a flush and write frame epilogue.
-*  The epilogue is required for decoders to consider a frame completed.
-*  flush operation is the same, and follows same rules as calling ZSTD_compressStream2() with ZSTD_e_flush.
-*  You must continue calling ZSTD_compressStream2() with ZSTD_e_end until it returns 0, at which point you are free to
-*  start a new frame.
-*  note: ZSTD_e_end will flush as much output as possible, meaning when compressing with multiple threads, it will
-*        block until the flush is complete or the output buffer is full.
-*  @return : 0 if frame fully completed and fully flushed,
-*            >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
-*            or an error code, which can be tested using ZSTD_isError().
-*
-* *******************************************************************/
-
-typedef ZSTD_CCtx ZSTD_CStream;  /**< CCtx and CStream are now effectively same object (>= v1.3.0) */
-                                 /* Continue to distinguish them for compatibility with older versions <= v1.2.0 */
-/*===== ZSTD_CStream management functions =====*/
-ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void);
-ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs);
-
-/*===== Streaming compression functions =====*/
-typedef enum {
-    ZSTD_e_continue=0, /* collect more data, encoder decides when to output compressed result, for optimal compression ratio */
-    ZSTD_e_flush=1,    /* flush any data provided so far,
-                        * it creates (at least) one new block, that can be decoded immediately on reception;
-                        * frame will continue: any future data can still reference previously compressed data, improving compression.
-                        * note : multithreaded compression will block to flush as much output as possible. */
-    ZSTD_e_end=2       /* flush any remaining data _and_ close current frame.
-                        * note that frame is only closed after compressed data is fully flushed (return value == 0).
-                        * After that point, any additional data starts a new frame.
-                        * note : each frame is independent (does not reference any content from previous frame).
-                        : note : multithreaded compression will block to flush as much output as possible. */
-} ZSTD_EndDirective;
-
-/*! ZSTD_compressStream2() :
- *  Behaves about the same as ZSTD_compressStream, with additional control on end directive.
- *  - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
- *  - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)
- *  - output->pos must be <= dstCapacity, input->pos must be <= srcSize
- *  - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.
- *  - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.
- *  - When nbWorkers>=1, function is non-blocking : it just acquires a copy of input, and distributes jobs to internal worker threads, flush whatever is available,
- *                                                  and then immediately returns, just indicating that there is some data remaining to be flushed.
- *                                                  The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.
- *  - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.
- *  - @return provides a minimum amount of data remaining to be flushed from internal buffers
- *            or an error code, which can be tested using ZSTD_isError().
- *            if @return != 0, flush is not fully completed, there is still some data left within internal buffers.
- *            This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.
- *            For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.
- *  - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),
- *            only ZSTD_e_end or ZSTD_e_flush operations are allowed.
- *            Before starting a new compression job, or changing compression parameters,
- *            it is required to fully flush internal buffers.
- */
-ZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
-                                         ZSTD_outBuffer* output,
-                                         ZSTD_inBuffer* input,
-                                         ZSTD_EndDirective endOp);
-
-
-/* These buffer sizes are softly recommended.
- * They are not required : ZSTD_compressStream*() happily accepts any buffer size, for both input and output.
- * Respecting the recommended size just makes it a bit easier for ZSTD_compressStream*(),
- * reducing the amount of memory shuffling and buffering, resulting in minor performance savings.
- *
- * However, note that these recommendations are from the perspective of a C caller program.
- * If the streaming interface is invoked from some other language,
- * especially managed ones such as Java or Go, through a foreign function interface such as jni or cgo,
- * a major performance rule is to reduce crossing such interface to an absolute minimum.
- * It's not rare that performance ends being spent more into the interface, rather than compression itself.
- * In which cases, prefer using large buffers, as large as practical,
- * for both input and output, to reduce the nb of roundtrips.
- */
-ZSTDLIB_API size_t ZSTD_CStreamInSize(void);    /**< recommended size for input buffer */
-ZSTDLIB_API size_t ZSTD_CStreamOutSize(void);   /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block. */
-
-
-/* *****************************************************************************
- * This following is a legacy streaming API.
- * It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2().
- * It is redundant, but remains fully supported.
- * Advanced parameters and dictionary compression can only be used through the
- * new API.
- ******************************************************************************/
-
-/*!
- * Equivalent to:
- *
- *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
- *     ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
- *     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
- */
-ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
-/*!
- * Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue).
- * NOTE: The return value is different. ZSTD_compressStream() returns a hint for
- * the next read size (if non-zero and not an error). ZSTD_compressStream2()
- * returns the minimum nb of bytes left to flush (if non-zero and not an error).
- */
-ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
-/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush). */
-ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
-/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end). */
-ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
-
-
-/*-***************************************************************************
-*  Streaming decompression - HowTo
-*
-*  A ZSTD_DStream object is required to track streaming operations.
-*  Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources.
-*  ZSTD_DStream objects can be re-used multiple times.
-*
-*  Use ZSTD_initDStream() to start a new decompression operation.
-* @return : recommended first input size
-*  Alternatively, use advanced API to set specific properties.
-*
-*  Use ZSTD_decompressStream() repetitively to consume your input.
-*  The function will update both `pos` fields.
-*  If `input.pos < input.size`, some input has not been consumed.
-*  It's up to the caller to present again remaining data.
-*  The function tries to flush all data decoded immediately, respecting output buffer size.
-*  If `output.pos < output.size`, decoder has flushed everything it could.
-*  But if `output.pos == output.size`, there might be some data left within internal buffers.,
-*  In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer.
-*  Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX.
-* @return : 0 when a frame is completely decoded and fully flushed,
-*        or an error code, which can be tested using ZSTD_isError(),
-*        or any other value > 0, which means there is still some decoding or flushing to do to complete current frame :
-*                                the return value is a suggested next input size (just a hint for better latency)
-*                                that will never request more than the remaining frame size.
-* *******************************************************************************/
-
-typedef ZSTD_DCtx ZSTD_DStream;  /**< DCtx and DStream are now effectively same object (>= v1.3.0) */
-                                 /* For compatibility with versions <= v1.2.0, prefer differentiating them. */
-/*===== ZSTD_DStream management functions =====*/
-ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void);
-ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds);
-
-/*===== Streaming decompression functions =====*/
-
-/* This function is redundant with the advanced API and equivalent to:
- *
- *     ZSTD_DCtx_reset(zds);
- *     ZSTD_DCtx_refDDict(zds, NULL);
- */
-ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds);
-
-ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
-
-ZSTDLIB_API size_t ZSTD_DStreamInSize(void);    /*!< recommended size for input buffer */
-ZSTDLIB_API size_t ZSTD_DStreamOutSize(void);   /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */
-
-
-/**************************
-*  Simple dictionary API
-***************************/
-/*! ZSTD_compress_usingDict() :
- *  Compression at an explicit compression level using a Dictionary.
- *  A dictionary can be any arbitrary data segment (also called a prefix),
- *  or a buffer with specified information (see dictBuilder/zdict.h).
- *  Note : This function loads the dictionary, resulting in significant startup delay.
- *         It's intended for a dictionary used only once.
- *  Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. */
-ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,
-                                           void* dst, size_t dstCapacity,
-                                     const void* src, size_t srcSize,
-                                     const void* dict,size_t dictSize,
-                                           int compressionLevel);
-
-/*! ZSTD_decompress_usingDict() :
- *  Decompression using a known Dictionary.
- *  Dictionary must be identical to the one used during compression.
- *  Note : This function loads the dictionary, resulting in significant startup delay.
- *         It's intended for a dictionary used only once.
- *  Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
-ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
-                                             void* dst, size_t dstCapacity,
-                                       const void* src, size_t srcSize,
-                                       const void* dict,size_t dictSize);
-
-
-/***********************************
- *  Bulk processing dictionary API
- **********************************/
-typedef struct ZSTD_CDict_s ZSTD_CDict;
-
-/*! ZSTD_createCDict() :
- *  When compressing multiple messages / blocks using the same dictionary, it's recommended to load it only once.
- *  ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup cost.
- *  ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
- * `dictBuffer` can be released after ZSTD_CDict creation, because its content is copied within CDict.
- *  Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate `dictBuffer` content.
- *  Note : A ZSTD_CDict can be created from an empty dictBuffer, but it is inefficient when used to compress small data. */
-ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,
-                                         int compressionLevel);
-
-/*! ZSTD_freeCDict() :
- *  Function frees memory allocated by ZSTD_createCDict(). */
-ZSTDLIB_API size_t      ZSTD_freeCDict(ZSTD_CDict* CDict);
-
-/*! ZSTD_compress_usingCDict() :
- *  Compression using a digested Dictionary.
- *  Recommended when same dictionary is used multiple times.
- *  Note : compression level is _decided at dictionary creation time_,
- *     and frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no) */
-ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
-                                            void* dst, size_t dstCapacity,
-                                      const void* src, size_t srcSize,
-                                      const ZSTD_CDict* cdict);
-
-
-typedef struct ZSTD_DDict_s ZSTD_DDict;
-
-/*! ZSTD_createDDict() :
- *  Create a digested dictionary, ready to start decompression operation without startup delay.
- *  dictBuffer can be released after DDict creation, as its content is copied inside DDict. */
-ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize);
-
-/*! ZSTD_freeDDict() :
- *  Function frees memory allocated with ZSTD_createDDict() */
-ZSTDLIB_API size_t      ZSTD_freeDDict(ZSTD_DDict* ddict);
-
-/*! ZSTD_decompress_usingDDict() :
- *  Decompression using a digested Dictionary.
- *  Recommended when same dictionary is used multiple times. */
-ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
-                                              void* dst, size_t dstCapacity,
-                                        const void* src, size_t srcSize,
-                                        const ZSTD_DDict* ddict);
-
-
-/********************************
- *  Dictionary helper functions
- *******************************/
-
-/*! ZSTD_getDictID_fromDict() :
- *  Provides the dictID stored within dictionary.
- *  if @return == 0, the dictionary is not conformant with Zstandard specification.
- *  It can still be loaded, but as a content-only dictionary. */
-ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
-
-/*! ZSTD_getDictID_fromDDict() :
- *  Provides the dictID of the dictionary loaded into `ddict`.
- *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
- *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
-ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
-
-/*! ZSTD_getDictID_fromFrame() :
- *  Provides the dictID required to decompressed the frame stored within `src`.
- *  If @return == 0, the dictID could not be decoded.
- *  This could for one of the following reasons :
- *  - The frame does not require a dictionary to be decoded (most common case).
- *  - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
- *    Note : this use case also happens when using a non-conformant dictionary.
- *  - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
- *  - This is not a Zstandard frame.
- *  When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code. */
-ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
-
-
-/*******************************************************************************
- * Advanced dictionary and prefix API
- *
- * This API allows dictionaries to be used with ZSTD_compress2(),
- * ZSTD_compressStream2(), and ZSTD_decompress(). Dictionaries are sticky, and
- * only reset with the context is reset with ZSTD_reset_parameters or
- * ZSTD_reset_session_and_parameters. Prefixes are single-use.
- ******************************************************************************/
-
-
-/*! ZSTD_CCtx_loadDictionary() :
- *  Create an internal CDict from `dict` buffer.
- *  Decompression will have to use same dictionary.
- * @result : 0, or an error code (which can be tested with ZSTD_isError()).
- *  Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary,
- *           meaning "return to no-dictionary mode".
- *  Note 1 : Dictionary is sticky, it will be used for all future compressed frames.
- *           To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters).
- *  Note 2 : Loading a dictionary involves building tables.
- *           It's also a CPU consuming operation, with non-negligible impact on latency.
- *           Tables are dependent on compression parameters, and for this reason,
- *           compression parameters can no longer be changed after loading a dictionary.
- *  Note 3 :`dict` content will be copied internally.
- *           Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead.
- *           In such a case, dictionary buffer must outlive its users.
- *  Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()
- *           to precisely select how dictionary content must be interpreted. */
-ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
-
-/*! ZSTD_CCtx_refCDict() :
- *  Reference a prepared dictionary, to be used for all next compressed frames.
- *  Note that compression parameters are enforced from within CDict,
- *  and supersede any compression parameter previously set within CCtx.
- *  The parameters ignored are labled as "superseded-by-cdict" in the ZSTD_cParameter enum docs.
- *  The ignored parameters will be used again if the CCtx is returned to no-dictionary mode.
- *  The dictionary will remain valid for future compressed frames using same CCtx.
- * @result : 0, or an error code (which can be tested with ZSTD_isError()).
- *  Special : Referencing a NULL CDict means "return to no-dictionary mode".
- *  Note 1 : Currently, only one dictionary can be managed.
- *           Referencing a new dictionary effectively "discards" any previous one.
- *  Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx. */
-ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
-
-/*! ZSTD_CCtx_refPrefix() :
- *  Reference a prefix (single-usage dictionary) for next compressed frame.
- *  A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end).
- *  Decompression will need same prefix to properly regenerate data.
- *  Compressing with a prefix is similar in outcome as performing a diff and compressing it,
- *  but performs much faster, especially during decompression (compression speed is tunable with compression level).
- * @result : 0, or an error code (which can be tested with ZSTD_isError()).
- *  Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary
- *  Note 1 : Prefix buffer is referenced. It **must** outlive compression.
- *           Its content must remain unmodified during compression.
- *  Note 2 : If the intention is to diff some large src data blob with some prior version of itself,
- *           ensure that the window size is large enough to contain the entire source.
- *           See ZSTD_c_windowLog.
- *  Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters.
- *           It's a CPU consuming operation, with non-negligible impact on latency.
- *           If there is a need to use the same prefix multiple times, consider loadDictionary instead.
- *  Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dm_rawContent).
- *           Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation. */
-ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx,
-                                 const void* prefix, size_t prefixSize);
-
-/*! ZSTD_DCtx_loadDictionary() :
- *  Create an internal DDict from dict buffer,
- *  to be used to decompress next frames.
- *  The dictionary remains valid for all future frames, until explicitly invalidated.
- * @result : 0, or an error code (which can be tested with ZSTD_isError()).
- *  Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,
- *            meaning "return to no-dictionary mode".
- *  Note 1 : Loading a dictionary involves building tables,
- *           which has a non-negligible impact on CPU usage and latency.
- *           It's recommended to "load once, use many times", to amortize the cost
- *  Note 2 :`dict` content will be copied internally, so `dict` can be released after loading.
- *           Use ZSTD_DCtx_loadDictionary_byReference() to reference dictionary content instead.
- *  Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to take control of
- *           how dictionary content is loaded and interpreted.
- */
-ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
-
-/*! ZSTD_DCtx_refDDict() :
- *  Reference a prepared dictionary, to be used to decompress next frames.
- *  The dictionary remains active for decompression of future frames using same DCtx.
- * @result : 0, or an error code (which can be tested with ZSTD_isError()).
- *  Note 1 : Currently, only one dictionary can be managed.
- *           Referencing a new dictionary effectively "discards" any previous one.
- *  Special: referencing a NULL DDict means "return to no-dictionary mode".
- *  Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx.
- */
-ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
-
-/*! ZSTD_DCtx_refPrefix() :
- *  Reference a prefix (single-usage dictionary) to decompress next frame.
- *  This is the reverse operation of ZSTD_CCtx_refPrefix(),
- *  and must use the same prefix as the one used during compression.
- *  Prefix is **only used once**. Reference is discarded at end of frame.
- *  End of frame is reached when ZSTD_decompressStream() returns 0.
- * @result : 0, or an error code (which can be tested with ZSTD_isError()).
- *  Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary
- *  Note 2 : Prefix buffer is referenced. It **must** outlive decompression.
- *           Prefix buffer must remain unmodified up to the end of frame,
- *           reached when ZSTD_decompressStream() returns 0.
- *  Note 3 : By default, the prefix is treated as raw content (ZSTD_dm_rawContent).
- *           Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section)
- *  Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.
- *           A full dictionary is more costly, as it requires building tables.
- */
-ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx,
-                                 const void* prefix, size_t prefixSize);
-
-/* ===   Memory management   === */
-
-/*! ZSTD_sizeof_*() :
- *  These functions give the _current_ memory usage of selected object.
- *  Note that object memory usage can evolve (increase or decrease) over time. */
-ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
-ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
-ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
-ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
-ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
-ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
-
-#endif  /* ZSTD_H_235446 */
-
-
-/* **************************************************************************************
- *   ADVANCED AND EXPERIMENTAL FUNCTIONS
- ****************************************************************************************
- * The definitions in the following section are considered experimental.
- * They are provided for advanced scenarios.
- * They should never be used with a dynamic library, as prototypes may change in the future.
- * Use them only in association with static linking.
- * ***************************************************************************************/
-
-#if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
-#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
-
-/****************************************************************************************
- *   experimental API (static linking only)
- ****************************************************************************************
- * The following symbols and constants
- * are not planned to join "stable API" status in the near future.
- * They can still change in future versions.
- * Some of them are planned to remain in the static_only section indefinitely.
- * Some of them might be removed in the future (especially when redundant with existing stable functions)
- * ***************************************************************************************/
-
-#define ZSTD_FRAMEHEADERSIZE_PREFIX 5   /* minimum input size required to query frame header size */
-#define ZSTD_FRAMEHEADERSIZE_MIN    6
-#define ZSTD_FRAMEHEADERSIZE_MAX   18   /* can be useful for static allocation */
-#define ZSTD_SKIPPABLEHEADERSIZE    8
-
-/* compression parameter bounds */
-#define ZSTD_WINDOWLOG_MAX_32    30
-#define ZSTD_WINDOWLOG_MAX_64    31
-#define ZSTD_WINDOWLOG_MAX     ((int)(sizeof(size_t) == 4 ? ZSTD_WINDOWLOG_MAX_32 : ZSTD_WINDOWLOG_MAX_64))
-#define ZSTD_WINDOWLOG_MIN       10
-#define ZSTD_HASHLOG_MAX       ((ZSTD_WINDOWLOG_MAX < 30) ? ZSTD_WINDOWLOG_MAX : 30)
-#define ZSTD_HASHLOG_MIN          6
-#define ZSTD_CHAINLOG_MAX_32     29
-#define ZSTD_CHAINLOG_MAX_64     30
-#define ZSTD_CHAINLOG_MAX      ((int)(sizeof(size_t) == 4 ? ZSTD_CHAINLOG_MAX_32 : ZSTD_CHAINLOG_MAX_64))
-#define ZSTD_CHAINLOG_MIN        ZSTD_HASHLOG_MIN
-#define ZSTD_SEARCHLOG_MAX      (ZSTD_WINDOWLOG_MAX-1)
-#define ZSTD_SEARCHLOG_MIN        1
-#define ZSTD_MINMATCH_MAX         7   /* only for ZSTD_fast, other strategies are limited to 6 */
-#define ZSTD_MINMATCH_MIN         3   /* only for ZSTD_btopt+, faster strategies are limited to 4 */
-#define ZSTD_TARGETLENGTH_MAX    ZSTD_BLOCKSIZE_MAX
-#define ZSTD_TARGETLENGTH_MIN     0   /* note : comparing this constant to an unsigned results in a tautological test */
-#define ZSTD_STRATEGY_MIN        ZSTD_fast
-#define ZSTD_STRATEGY_MAX        ZSTD_btultra2
-
-
-#define ZSTD_OVERLAPLOG_MIN       0
-#define ZSTD_OVERLAPLOG_MAX       9
-
-#define ZSTD_WINDOWLOG_LIMIT_DEFAULT 27   /* by default, the streaming decoder will refuse any frame
-                                           * requiring larger than (1<<ZSTD_WINDOWLOG_LIMIT_DEFAULT) window size,
-                                           * to preserve host's memory from unreasonable requirements.
-                                           * This limit can be overridden using ZSTD_DCtx_setParameter(,ZSTD_d_windowLogMax,).
-                                           * The limit does not apply for one-pass decoders (such as ZSTD_decompress()), since no additional memory is allocated */
-
-
-/* LDM parameter bounds */
-#define ZSTD_LDM_HASHLOG_MIN      ZSTD_HASHLOG_MIN
-#define ZSTD_LDM_HASHLOG_MAX      ZSTD_HASHLOG_MAX
-#define ZSTD_LDM_MINMATCH_MIN        4
-#define ZSTD_LDM_MINMATCH_MAX     4096
-#define ZSTD_LDM_BUCKETSIZELOG_MIN   1
-#define ZSTD_LDM_BUCKETSIZELOG_MAX   8
-#define ZSTD_LDM_HASHRATELOG_MIN     0
-#define ZSTD_LDM_HASHRATELOG_MAX (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)
-
-/* Advanced parameter bounds */
-#define ZSTD_TARGETCBLOCKSIZE_MIN   64
-#define ZSTD_TARGETCBLOCKSIZE_MAX   ZSTD_BLOCKSIZE_MAX
-
-/* internal */
-#define ZSTD_HASHLOG3_MAX           17
-
-
-/* ---  Advanced types  --- */
-
-typedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params;
-
-typedef struct {
-    unsigned windowLog;       /**< largest match distance : larger == more compression, more memory needed during decompression */
-    unsigned chainLog;        /**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */
-    unsigned hashLog;         /**< dispatch table : larger == faster, more memory */
-    unsigned searchLog;       /**< nb of searches : larger == more compression, slower */
-    unsigned minMatch;        /**< match length searched : larger == faster decompression, sometimes less compression */
-    unsigned targetLength;    /**< acceptable match size for optimal parser (only) : larger == more compression, slower */
-    ZSTD_strategy strategy;   /**< see ZSTD_strategy definition above */
-} ZSTD_compressionParameters;
-
-typedef struct {
-    int contentSizeFlag; /**< 1: content size will be in frame header (when known) */
-    int checksumFlag;    /**< 1: generate a 32-bits checksum using XXH64 algorithm at end of frame, for error detection */
-    int noDictIDFlag;    /**< 1: no dictID will be saved into frame header (dictID is only useful for dictionary compression) */
-} ZSTD_frameParameters;
-
-typedef struct {
-    ZSTD_compressionParameters cParams;
-    ZSTD_frameParameters fParams;
-} ZSTD_parameters;
-
-typedef enum {
-    ZSTD_dct_auto = 0,       /* dictionary is "full" when starting with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */
-    ZSTD_dct_rawContent = 1, /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */
-    ZSTD_dct_fullDict = 2    /* refuses to load a dictionary if it does not respect Zstandard's specification, starting with ZSTD_MAGIC_DICTIONARY */
-} ZSTD_dictContentType_e;
-
-typedef enum {
-    ZSTD_dlm_byCopy = 0,  /**< Copy dictionary content internally */
-    ZSTD_dlm_byRef = 1,   /**< Reference dictionary content -- the dictionary buffer must outlive its users. */
-} ZSTD_dictLoadMethod_e;
-
-typedef enum {
-    /* Opened question : should we have a format ZSTD_f_auto ?
-     * Today, it would mean exactly the same as ZSTD_f_zstd1.
-     * But, in the future, should several formats become supported,
-     * on the compression side, it would mean "default format".
-     * On the decompression side, it would mean "automatic format detection",
-     * so that ZSTD_f_zstd1 would mean "accept *only* zstd frames".
-     * Since meaning is a little different, another option could be to define different enums for compression and decompression.
-     * This question could be kept for later, when there are actually multiple formats to support,
-     * but there is also the question of pinning enum values, and pinning value `0` is especially important */
-    ZSTD_f_zstd1 = 0,           /* zstd frame format, specified in zstd_compression_format.md (default) */
-    ZSTD_f_zstd1_magicless = 1, /* Variant of zstd frame format, without initial 4-bytes magic number.
-                                 * Useful to save 4 bytes per generated frame.
-                                 * Decoder cannot recognise automatically this format, requiring this instruction. */
-} ZSTD_format_e;
-
-typedef enum {
-    /* Note: this enum and the behavior it controls are effectively internal
-     * implementation details of the compressor. They are expected to continue
-     * to evolve and should be considered only in the context of extremely
-     * advanced performance tuning.
-     *
-     * Zstd currently supports the use of a CDict in two ways:
-     *
-     * - The contents of the CDict can be copied into the working context. This
-     *   means that the compression can search both the dictionary and input
-     *   while operating on a single set of internal tables. This makes
-     *   the compression faster per-byte of input. However, the initial copy of
-     *   the CDict's tables incurs a fixed cost at the beginning of the
-     *   compression. For small compressions (< 8 KB), that copy can dominate
-     *   the cost of the compression.
-     *
-     * - The CDict's tables can be used in-place. In this model, compression is
-     *   slower per input byte, because the compressor has to search two sets of
-     *   tables. However, this model incurs no start-up cost (as long as the
-     *   working context's tables can be reused). For small inputs, this can be
-     *   faster than copying the CDict's tables.
-     *
-     * Zstd has a simple internal heuristic that selects which strategy to use
-     * at the beginning of a compression. However, if experimentation shows that
-     * Zstd is making poor choices, it is possible to override that choice with
-     * this enum.
-     */
-    ZSTD_dictDefaultAttach = 0, /* Use the default heuristic. */
-    ZSTD_dictForceAttach   = 1, /* Never copy the dictionary. */
-    ZSTD_dictForceCopy     = 2, /* Always copy the dictionary. */
-} ZSTD_dictAttachPref_e;
-
-typedef enum {
-  ZSTD_lcm_auto = 0,          /**< Automatically determine the compression mode based on the compression level.
-                               *   Negative compression levels will be uncompressed, and positive compression
-                               *   levels will be compressed. */
-  ZSTD_lcm_huffman = 1,       /**< Always attempt Huffman compression. Uncompressed literals will still be
-                               *   emitted if Huffman compression is not profitable. */
-  ZSTD_lcm_uncompressed = 2,  /**< Always emit uncompressed literals. */
-} ZSTD_literalCompressionMode_e;
-
-
-/***************************************
-*  Frame size functions
-***************************************/
-
-/*! ZSTD_findDecompressedSize() :
- *  `src` should point to the start of a series of ZSTD encoded and/or skippable frames
- *  `srcSize` must be the _exact_ size of this series
- *       (i.e. there should be a frame boundary at `src + srcSize`)
- *  @return : - decompressed size of all data in all successive frames
- *            - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN
- *            - if an error occurred: ZSTD_CONTENTSIZE_ERROR
- *
- *   note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
- *            When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
- *            In which case, it's necessary to use streaming mode to decompress data.
- *   note 2 : decompressed size is always present when compression is done with ZSTD_compress()
- *   note 3 : decompressed size can be very large (64-bits value),
- *            potentially larger than what local system can handle as a single memory segment.
- *            In which case, it's necessary to use streaming mode to decompress data.
- *   note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
- *            Always ensure result fits within application's authorized limits.
- *            Each application can set its own limits.
- *   note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to
- *            read each contained frame header.  This is fast as most of the data is skipped,
- *            however it does mean that all frame data must be present and valid. */
-ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
-
-/*! ZSTD_decompressBound() :
- *  `src` should point to the start of a series of ZSTD encoded and/or skippable frames
- *  `srcSize` must be the _exact_ size of this series
- *       (i.e. there should be a frame boundary at `src + srcSize`)
- *  @return : - upper-bound for the decompressed size of all data in all successive frames
- *            - if an error occured: ZSTD_CONTENTSIZE_ERROR
- *
- *  note 1  : an error can occur if `src` contains an invalid or incorrectly formatted frame.
- *  note 2  : the upper-bound is exact when the decompressed size field is available in every ZSTD encoded frame of `src`.
- *            in this case, `ZSTD_findDecompressedSize` and `ZSTD_decompressBound` return the same value.
- *  note 3  : when the decompressed size field isn't available, the upper-bound for that frame is calculated by:
- *              upper-bound = # blocks * min(128 KB, Window_Size)
- */
-ZSTDLIB_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize);
-
-/*! ZSTD_frameHeaderSize() :
- *  srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX.
- * @return : size of the Frame Header,
- *           or an error code (if srcSize is too small) */
-ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
-
-
-/***************************************
-*  Memory management
-***************************************/
-
-/*! ZSTD_estimate*() :
- *  These functions make it possible to estimate memory usage
- *  of a future {D,C}Ctx, before its creation.
- *  ZSTD_estimateCCtxSize() will provide a budget large enough for any compression level up to selected one.
- *  It will also consider src size to be arbitrarily "large", which is worst case.
- *  If srcSize is known to always be small, ZSTD_estimateCCtxSize_usingCParams() can provide a tighter estimation.
- *  ZSTD_estimateCCtxSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
- *  ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
- *  Note : CCtx size estimation is only correct for single-threaded compression. */
-ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
-ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
-ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
-ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void);
-
-/*! ZSTD_estimateCStreamSize() :
- *  ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one.
- *  It will also consider src size to be arbitrarily "large", which is worst case.
- *  If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.
- *  ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
- *  ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
- *  Note : CStream size estimation is only correct for single-threaded compression.
- *  ZSTD_DStream memory budget depends on window Size.
- *  This information can be passed manually, using ZSTD_estimateDStreamSize,
- *  or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame();
- *  Note : if streaming is init with function ZSTD_init?Stream_usingDict(),
- *         an internal ?Dict will be created, which additional size is not estimated here.
- *         In this case, get total size by adding ZSTD_estimate?DictSize */
-ZSTDLIB_API size_t ZSTD_estimateCStreamSize(int compressionLevel);
-ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);
-ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);
-ZSTDLIB_API size_t ZSTD_estimateDStreamSize(size_t windowSize);
-ZSTDLIB_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
-
-/*! ZSTD_estimate?DictSize() :
- *  ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict().
- *  ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced().
- *  Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller.
- */
-ZSTDLIB_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
-ZSTDLIB_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod);
-ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod);
-
-/*! ZSTD_initStatic*() :
- *  Initialize an object using a pre-allocated fixed-size buffer.
- *  workspace: The memory area to emplace the object into.
- *             Provided pointer *must be 8-bytes aligned*.
- *             Buffer must outlive object.
- *  workspaceSize: Use ZSTD_estimate*Size() to determine
- *                 how large workspace must be to support target scenario.
- * @return : pointer to object (same address as workspace, just different type),
- *           or NULL if error (size too small, incorrect alignment, etc.)
- *  Note : zstd will never resize nor malloc() when using a static buffer.
- *         If the object requires more memory than available,
- *         zstd will just error out (typically ZSTD_error_memory_allocation).
- *  Note 2 : there is no corresponding "free" function.
- *           Since workspace is allocated externally, it must be freed externally too.
- *  Note 3 : cParams : use ZSTD_getCParams() to convert a compression level
- *           into its associated cParams.
- *  Limitation 1 : currently not compatible with internal dictionary creation, triggered by
- *                 ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict().
- *  Limitation 2 : static cctx currently not compatible with multi-threading.
- *  Limitation 3 : static dctx is incompatible with legacy support.
- */
-ZSTDLIB_API ZSTD_CCtx*    ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize);
-ZSTDLIB_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize);    /**< same as ZSTD_initStaticCCtx() */
-
-ZSTDLIB_API ZSTD_DCtx*    ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize);
-ZSTDLIB_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize);    /**< same as ZSTD_initStaticDCtx() */
-
-ZSTDLIB_API const ZSTD_CDict* ZSTD_initStaticCDict(
-                                        void* workspace, size_t workspaceSize,
-                                        const void* dict, size_t dictSize,
-                                        ZSTD_dictLoadMethod_e dictLoadMethod,
-                                        ZSTD_dictContentType_e dictContentType,
-                                        ZSTD_compressionParameters cParams);
-
-ZSTDLIB_API const ZSTD_DDict* ZSTD_initStaticDDict(
-                                        void* workspace, size_t workspaceSize,
-                                        const void* dict, size_t dictSize,
-                                        ZSTD_dictLoadMethod_e dictLoadMethod,
-                                        ZSTD_dictContentType_e dictContentType);
-
-
-/*! Custom memory allocation :
- *  These prototypes make it possible to pass your own allocation/free functions.
- *  ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below.
- *  All allocation/free operations will be completed using these custom variants instead of regular <stdlib.h> ones.
- */
-typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);
-typedef void  (*ZSTD_freeFunction) (void* opaque, void* address);
-typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;
-static ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL };  /**< this constant defers to stdlib's functions */
-
-ZSTDLIB_API ZSTD_CCtx*    ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
-ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
-ZSTDLIB_API ZSTD_DCtx*    ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
-ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
-
-ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize,
-                                                  ZSTD_dictLoadMethod_e dictLoadMethod,
-                                                  ZSTD_dictContentType_e dictContentType,
-                                                  ZSTD_compressionParameters cParams,
-                                                  ZSTD_customMem customMem);
-
-ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
-                                                  ZSTD_dictLoadMethod_e dictLoadMethod,
-                                                  ZSTD_dictContentType_e dictContentType,
-                                                  ZSTD_customMem customMem);
-
-
-
-/***************************************
-*  Advanced compression functions
-***************************************/
-
-/*! ZSTD_createCDict_byReference() :
- *  Create a digested dictionary for compression
- *  Dictionary content is just referenced, not duplicated.
- *  As a consequence, `dictBuffer` **must** outlive CDict,
- *  and its content must remain unmodified throughout the lifetime of CDict. */
-ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
-
-/*! ZSTD_getCParams() :
- * @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.
- * `estimatedSrcSize` value is optional, select 0 if not known */
-ZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
-
-/*! ZSTD_getParams() :
- *  same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`.
- *  All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */
-ZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
-
-/*! ZSTD_checkCParams() :
- *  Ensure param values remain within authorized range.
- * @return 0 on success, or an error code (can be checked with ZSTD_isError()) */
-ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
-
-/*! ZSTD_adjustCParams() :
- *  optimize params for a given `srcSize` and `dictSize`.
- * `srcSize` can be unknown, in which case use ZSTD_CONTENTSIZE_UNKNOWN.
- * `dictSize` must be `0` when there is no dictionary.
- *  cPar can be invalid : all parameters will be clamped within valid range in the @return struct.
- *  This function never fails (wide contract) */
-ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
-
-/*! ZSTD_compress_advanced() :
- *  Same as ZSTD_compress_usingDict(), with fine-tune control over compression parameters (by structure) */
-ZSTDLIB_API size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
-                                          void* dst, size_t dstCapacity,
-                                    const void* src, size_t srcSize,
-                                    const void* dict,size_t dictSize,
-                                          ZSTD_parameters params);
-
-/*! ZSTD_compress_usingCDict_advanced() :
- *  Same as ZSTD_compress_usingCDict(), with fine-tune control over frame parameters */
-ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
-                                              void* dst, size_t dstCapacity,
-                                        const void* src, size_t srcSize,
-                                        const ZSTD_CDict* cdict,
-                                              ZSTD_frameParameters fParams);
-
-
-/*! ZSTD_CCtx_loadDictionary_byReference() :
- *  Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx.
- *  It saves some memory, but also requires that `dict` outlives its usage within `cctx` */
-ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
-
-/*! ZSTD_CCtx_loadDictionary_advanced() :
- *  Same as ZSTD_CCtx_loadDictionary(), but gives finer control over
- *  how to load the dictionary (by copy ? by reference ?)
- *  and how to interpret it (automatic ? force raw mode ? full mode only ?) */
-ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
-
-/*! ZSTD_CCtx_refPrefix_advanced() :
- *  Same as ZSTD_CCtx_refPrefix(), but gives finer control over
- *  how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */
-ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
-
-/* ===   experimental parameters   === */
-/* these parameters can be used with ZSTD_setParameter()
- * they are not guaranteed to remain supported in the future */
-
- /* Enables rsyncable mode,
-  * which makes compressed files more rsync friendly
-  * by adding periodic synchronization points to the compressed data.
-  * The target average block size is ZSTD_c_jobSize / 2.
-  * It's possible to modify the job size to increase or decrease
-  * the granularity of the synchronization point.
-  * Once the jobSize is smaller than the window size,
-  * it will result in compression ratio degradation.
-  * NOTE 1: rsyncable mode only works when multithreading is enabled.
-  * NOTE 2: rsyncable performs poorly in combination with long range mode,
-  * since it will decrease the effectiveness of synchronization points,
-  * though mileage may vary.
-  * NOTE 3: Rsyncable mode limits maximum compression speed to ~400 MB/s.
-  * If the selected compression level is already running significantly slower,
-  * the overall speed won't be significantly impacted.
-  */
- #define ZSTD_c_rsyncable ZSTD_c_experimentalParam1
-
-/* Select a compression format.
- * The value must be of type ZSTD_format_e.
- * See ZSTD_format_e enum definition for details */
-#define ZSTD_c_format ZSTD_c_experimentalParam2
-
-/* Force back-reference distances to remain < windowSize,
- * even when referencing into Dictionary content (default:0) */
-#define ZSTD_c_forceMaxWindow ZSTD_c_experimentalParam3
-
-/* Controls whether the contents of a CDict
- * are used in place, or copied into the working context.
- * Accepts values from the ZSTD_dictAttachPref_e enum.
- * See the comments on that enum for an explanation of the feature. */
-#define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4
-
-/* Controls how the literals are compressed (default is auto).
- * The value must be of type ZSTD_literalCompressionMode_e.
- * See ZSTD_literalCompressionMode_t enum definition for details.
- */
-#define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5
-
-/* Tries to fit compressed block size to be around targetCBlockSize.
- * No target when targetCBlockSize == 0.
- * There is no guarantee on compressed block size (default:0) */
-#define ZSTD_c_targetCBlockSize ZSTD_c_experimentalParam6
-
-/*! ZSTD_CCtx_getParameter() :
- *  Get the requested compression parameter value, selected by enum ZSTD_cParameter,
- *  and store it into int* value.
- * @return : 0, or an error code (which can be tested with ZSTD_isError()).
- */
-ZSTDLIB_API size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value);
-
-
-/*! ZSTD_CCtx_params :
- *  Quick howto :
- *  - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure
- *  - ZSTD_CCtxParams_setParameter() : Push parameters one by one into
- *                                     an existing ZSTD_CCtx_params structure.
- *                                     This is similar to
- *                                     ZSTD_CCtx_setParameter().
- *  - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to
- *                                    an existing CCtx.
- *                                    These parameters will be applied to
- *                                    all subsequent frames.
- *  - ZSTD_compressStream2() : Do compression using the CCtx.
- *  - ZSTD_freeCCtxParams() : Free the memory.
- *
- *  This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams()
- *  for static allocation of CCtx for single-threaded compression.
- */
-ZSTDLIB_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void);
-ZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params);
-
-/*! ZSTD_CCtxParams_reset() :
- *  Reset params to default values.
- */
-ZSTDLIB_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params);
-
-/*! ZSTD_CCtxParams_init() :
- *  Initializes the compression parameters of cctxParams according to
- *  compression level. All other parameters are reset to their default values.
- */
-ZSTDLIB_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel);
-
-/*! ZSTD_CCtxParams_init_advanced() :
- *  Initializes the compression and frame parameters of cctxParams according to
- *  params. All other parameters are reset to their default values.
- */
-ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);
-
-/*! ZSTD_CCtxParams_setParameter() :
- *  Similar to ZSTD_CCtx_setParameter.
- *  Set one compression parameter, selected by enum ZSTD_cParameter.
- *  Parameters must be applied to a ZSTD_CCtx using ZSTD_CCtx_setParametersUsingCCtxParams().
- * @result : 0, or an error code (which can be tested with ZSTD_isError()).
- */
-ZSTDLIB_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value);
-
-/*! ZSTD_CCtxParams_getParameter() :
- * Similar to ZSTD_CCtx_getParameter.
- * Get the requested value of one compression parameter, selected by enum ZSTD_cParameter.
- * @result : 0, or an error code (which can be tested with ZSTD_isError()).
- */
-ZSTDLIB_API size_t ZSTD_CCtxParams_getParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value);
-
-/*! ZSTD_CCtx_setParametersUsingCCtxParams() :
- *  Apply a set of ZSTD_CCtx_params to the compression context.
- *  This can be done even after compression is started,
- *    if nbWorkers==0, this will have no impact until a new compression is started.
- *    if nbWorkers>=1, new parameters will be picked up at next job,
- *       with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated).
- */
-ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams(
-        ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params);
-
-/*! ZSTD_compressStream2_simpleArgs() :
- *  Same as ZSTD_compressStream2(),
- *  but using only integral types as arguments.
- *  This variant might be helpful for binders from dynamic languages
- *  which have troubles handling structures containing memory pointers.
- */
-ZSTDLIB_API size_t ZSTD_compressStream2_simpleArgs (
-                            ZSTD_CCtx* cctx,
-                            void* dst, size_t dstCapacity, size_t* dstPos,
-                      const void* src, size_t srcSize, size_t* srcPos,
-                            ZSTD_EndDirective endOp);
-
-
-/***************************************
-*  Advanced decompression functions
-***************************************/
-
-/*! ZSTD_isFrame() :
- *  Tells if the content of `buffer` starts with a valid Frame Identifier.
- *  Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
- *  Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
- *  Note 3 : Skippable Frame Identifiers are considered valid. */
-ZSTDLIB_API unsigned ZSTD_isFrame(const void* buffer, size_t size);
-
-/*! ZSTD_createDDict_byReference() :
- *  Create a digested dictionary, ready to start decompression operation without startup delay.
- *  Dictionary content is referenced, and therefore stays in dictBuffer.
- *  It is important that dictBuffer outlives DDict,
- *  it must remain read accessible throughout the lifetime of DDict */
-ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
-
-/*! ZSTD_DCtx_loadDictionary_byReference() :
- *  Same as ZSTD_DCtx_loadDictionary(),
- *  but references `dict` content instead of copying it into `dctx`.
- *  This saves memory if `dict` remains around.,
- *  However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression. */
-ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
-
-/*! ZSTD_DCtx_loadDictionary_advanced() :
- *  Same as ZSTD_DCtx_loadDictionary(),
- *  but gives direct control over
- *  how to load the dictionary (by copy ? by reference ?)
- *  and how to interpret it (automatic ? force raw mode ? full mode only ?). */
-ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
-
-/*! ZSTD_DCtx_refPrefix_advanced() :
- *  Same as ZSTD_DCtx_refPrefix(), but gives finer control over
- *  how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */
-ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
-
-/*! ZSTD_DCtx_setMaxWindowSize() :
- *  Refuses allocating internal buffers for frames requiring a window size larger than provided limit.
- *  This protects a decoder context from reserving too much memory for itself (potential attack scenario).
- *  This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.
- *  By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT)
- * @return : 0, or an error code (which can be tested using ZSTD_isError()).
- */
-ZSTDLIB_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize);
-
-/* ZSTD_d_format
- * experimental parameter,
- * allowing selection between ZSTD_format_e input compression formats
- */
-#define ZSTD_d_format ZSTD_d_experimentalParam1
-
-/*! ZSTD_DCtx_setFormat() :
- *  Instruct the decoder context about what kind of data to decode next.
- *  This instruction is mandatory to decode data without a fully-formed header,
- *  such ZSTD_f_zstd1_magicless for example.
- * @return : 0, or an error code (which can be tested using ZSTD_isError()). */
-ZSTDLIB_API size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
-
-/*! ZSTD_decompressStream_simpleArgs() :
- *  Same as ZSTD_decompressStream(),
- *  but using only integral types as arguments.
- *  This can be helpful for binders from dynamic languages
- *  which have troubles handling structures containing memory pointers.
- */
-ZSTDLIB_API size_t ZSTD_decompressStream_simpleArgs (
-                            ZSTD_DCtx* dctx,
-                            void* dst, size_t dstCapacity, size_t* dstPos,
-                      const void* src, size_t srcSize, size_t* srcPos);
-
-
-/********************************************************************
-*  Advanced streaming functions
-*  Warning : most of these functions are now redundant with the Advanced API.
-*  Once Advanced API reaches "stable" status,
-*  redundant functions will be deprecated, and then at some point removed.
-********************************************************************/
-
-/*=====   Advanced Streaming compression functions  =====*/
-/**! ZSTD_initCStream_srcSize() :
- * This function is deprecated, and equivalent to:
- *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
- *     ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
- *     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
- *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
- *
- * pledgedSrcSize must be correct. If it is not known at init time, use
- * ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs,
- * "0" also disables frame content size field. It may be enabled in the future.
- */
-ZSTDLIB_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize);
-/**! ZSTD_initCStream_usingDict() :
- * This function is deprecated, and is equivalent to:
- *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
- *     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
- *     ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
- *
- * Creates of an internal CDict (incompatible with static CCtx), except if
- * dict == NULL or dictSize < 8, in which case no dict is used.
- * Note: dict is loaded with ZSTD_dm_auto (treated as a full zstd dictionary if
- * it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.
- */
-ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel);
-/**! ZSTD_initCStream_advanced() :
- * This function is deprecated, and is approximately equivalent to:
- *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
- *     ZSTD_CCtx_setZstdParams(zcs, params); // Set the zstd params and leave the rest as-is
- *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
- *     ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
- *
- * pledgedSrcSize must be correct. If srcSize is not known at init time, use
- * value ZSTD_CONTENTSIZE_UNKNOWN. dict is loaded with ZSTD_dm_auto and ZSTD_dlm_byCopy.
- */
-ZSTDLIB_API size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize,
-                                             ZSTD_parameters params, unsigned long long pledgedSrcSize);
-/**! ZSTD_initCStream_usingCDict() :
- * This function is deprecated, and equivalent to:
- *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
- *     ZSTD_CCtx_refCDict(zcs, cdict);
- *
- * note : cdict will just be referenced, and must outlive compression session
- */
-ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
-/**! ZSTD_initCStream_usingCDict_advanced() :
- * This function is deprecated, and is approximately equivalent to:
- *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
- *     ZSTD_CCtx_setZstdFrameParams(zcs, fParams); // Set the zstd frame params and leave the rest as-is
- *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
- *     ZSTD_CCtx_refCDict(zcs, cdict);
- *
- * same as ZSTD_initCStream_usingCDict(), with control over frame parameters.
- * pledgedSrcSize must be correct. If srcSize is not known at init time, use
- * value ZSTD_CONTENTSIZE_UNKNOWN.
- */
-ZSTDLIB_API size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize);
-
-/*! ZSTD_resetCStream() :
- * This function is deprecated, and is equivalent to:
- *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
- *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
- *
- *  start a new frame, using same parameters from previous frame.
- *  This is typically useful to skip dictionary loading stage, since it will re-use it in-place.
- *  Note that zcs must be init at least once before using ZSTD_resetCStream().
- *  If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN.
- *  If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.
- *  For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs,
- *  but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.
- * @return : 0, or an error code (which can be tested using ZSTD_isError())
- */
-ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
-
-
-typedef struct {
-    unsigned long long ingested;   /* nb input bytes read and buffered */
-    unsigned long long consumed;   /* nb input bytes actually compressed */
-    unsigned long long produced;   /* nb of compressed bytes generated and buffered */
-    unsigned long long flushed;    /* nb of compressed bytes flushed : not provided; can be tracked from caller side */
-    unsigned currentJobID;         /* MT only : latest started job nb */
-    unsigned nbActiveWorkers;      /* MT only : nb of workers actively compressing at probe time */
-} ZSTD_frameProgression;
-
-/* ZSTD_getFrameProgression() :
- * tells how much data has been ingested (read from input)
- * consumed (input actually compressed) and produced (output) for current frame.
- * Note : (ingested - consumed) is amount of input data buffered internally, not yet compressed.
- * Aggregates progression inside active worker threads.
- */
-ZSTDLIB_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx);
-
-/*! ZSTD_toFlushNow() :
- *  Tell how many bytes are ready to be flushed immediately.
- *  Useful for multithreading scenarios (nbWorkers >= 1).
- *  Probe the oldest active job, defined as oldest job not yet entirely flushed,
- *  and check its output buffer.
- * @return : amount of data stored in oldest job and ready to be flushed immediately.
- *  if @return == 0, it means either :
- *  + there is no active job (could be checked with ZSTD_frameProgression()), or
- *  + oldest job is still actively compressing data,
- *    but everything it has produced has also been flushed so far,
- *    therefore flush speed is limited by production speed of oldest job
- *    irrespective of the speed of concurrent (and newer) jobs.
- */
-ZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
-
-
-/*=====   Advanced Streaming decompression functions  =====*/
-/**
- * This function is deprecated, and is equivalent to:
- *
- *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
- *     ZSTD_DCtx_loadDictionary(zds, dict, dictSize);
- *
- * note: no dictionary will be used if dict == NULL or dictSize < 8
- */
-ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
-/**
- * This function is deprecated, and is equivalent to:
- *
- *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
- *     ZSTD_DCtx_refDDict(zds, ddict);
- *
- * note : ddict is referenced, it must outlive decompression session
- */
-ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
-/**
- * This function is deprecated, and is equivalent to:
- *
- *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
- *
- * re-use decompression parameters from previous init; saves dictionary loading
- */
-ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
-
-
-/*********************************************************************
-*  Buffer-less and synchronous inner streaming functions
-*
-*  This is an advanced API, giving full control over buffer management, for users which need direct control over memory.
-*  But it's also a complex one, with several restrictions, documented below.
-*  Prefer normal streaming API for an easier experience.
-********************************************************************* */
-
-/**
-  Buffer-less streaming compression (synchronous mode)
-
-  A ZSTD_CCtx object is required to track streaming operations.
-  Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.
-  ZSTD_CCtx object can be re-used multiple times within successive compression operations.
-
-  Start by initializing a context.
-  Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression,
-  or ZSTD_compressBegin_advanced(), for finer parameter control.
-  It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx()
-
-  Then, consume your input using ZSTD_compressContinue().
-  There are some important considerations to keep in mind when using this advanced function :
-  - ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only.
-  - Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks.
-  - Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario.
-    Worst case evaluation is provided by ZSTD_compressBound().
-    ZSTD_compressContinue() doesn't guarantee recover after a failed compression.
-  - ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog).
-    It remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks)
-  - ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps.
-    In which case, it will "discard" the relevant memory section from its history.
-
-  Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum.
-  It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.
-  Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.
-
-  `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again.
-*/
-
-/*=====   Buffer-less streaming compression functions  =====*/
-ZSTDLIB_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
-ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
-ZSTDLIB_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */
-ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */
-ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize);   /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */
-ZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**<  note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
-
-ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
-ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
-
-
-/*-
-  Buffer-less streaming decompression (synchronous mode)
-
-  A ZSTD_DCtx object is required to track streaming operations.
-  Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
-  A ZSTD_DCtx object can be re-used multiple times.
-
-  First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().
-  Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
-  Data fragment must be large enough to ensure successful decoding.
- `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
-  @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
-           >0 : `srcSize` is too small, please provide at least @result bytes on next attempt.
-           errorCode, which can be tested using ZSTD_isError().
-
-  It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
-  such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).
-  Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.
-  As a consequence, check that values remain within valid application range.
-  For example, do not allocate memory blindly, check that `windowSize` is within expectation.
-  Each application can set its own limits, depending on local restrictions.
-  For extended interoperability, it is recommended to support `windowSize` of at least 8 MB.
-
-  ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes.
-  ZSTD_decompressContinue() is very sensitive to contiguity,
-  if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,
-  or that previous contiguous segment is large enough to properly handle maximum back-reference distance.
-  There are multiple ways to guarantee this condition.
-
-  The most memory efficient way is to use a round buffer of sufficient size.
-  Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),
-  which can @return an error code if required value is too large for current system (in 32-bits mode).
-  In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,
-  up to the moment there is not enough room left in the buffer to guarantee decoding another full block,
-  which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.
-  At which point, decoding can resume from the beginning of the buffer.
-  Note that already decoded data stored in the buffer should be flushed before being overwritten.
-
-  There are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory.
-
-  Finally, if you control the compression process, you can also ignore all buffer size rules,
-  as long as the encoder and decoder progress in "lock-step",
-  aka use exactly the same buffer sizes, break contiguity at the same place, etc.
-
-  Once buffers are setup, start decompression, with ZSTD_decompressBegin().
-  If decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().
-
-  Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.
-  ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().
-  ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.
-
- @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
-  It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.
-  It can also be an error code, which can be tested with ZSTD_isError().
-
-  A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
-  Context can then be reset to start a new decompression.
-
-  Note : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType().
-  This information is not required to properly decode a frame.
-
-  == Special case : skippable frames ==
-
-  Skippable frames allow integration of user-defined data into a flow of concatenated frames.
-  Skippable frames will be ignored (skipped) by decompressor.
-  The format of skippable frames is as follows :
-  a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F
-  b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
-  c) Frame Content - any content (User Data) of length equal to Frame Size
-  For skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame.
-  For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content.
-*/
-
-/*=====   Buffer-less streaming decompression functions  =====*/
-typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;
-typedef struct {
-    unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
-    unsigned long long windowSize;       /* can be very large, up to <= frameContentSize */
-    unsigned blockSizeMax;
-    ZSTD_frameType_e frameType;          /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
-    unsigned headerSize;
-    unsigned dictID;
-    unsigned checksumFlag;
-} ZSTD_frameHeader;
-
-/*! ZSTD_getFrameHeader() :
- *  decode Frame Header, or requires larger `srcSize`.
- * @return : 0, `zfhPtr` is correctly filled,
- *          >0, `srcSize` is too small, value is wanted `srcSize` amount,
- *           or an error code, which can be tested using ZSTD_isError() */
-ZSTDLIB_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize);   /**< doesn't consume input */
-/*! ZSTD_getFrameHeader_advanced() :
- *  same as ZSTD_getFrameHeader(),
- *  with added capability to select a format (like ZSTD_f_zstd1_magicless) */
-ZSTDLIB_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
-ZSTDLIB_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize);  /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
-
-ZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
-ZSTDLIB_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
-ZSTDLIB_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
-
-ZSTDLIB_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
-ZSTDLIB_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
-
-/* misc */
-ZSTDLIB_API void   ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
-typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
-ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
-
-
-
-
-/* ============================ */
-/**       Block level API       */
-/* ============================ */
-
-/*!
-    Block functions produce and decode raw zstd blocks, without frame metadata.
-    Frame metadata cost is typically ~18 bytes, which can be non-negligible for very small blocks (< 100 bytes).
-    User will have to take in charge required information to regenerate data, such as compressed and content sizes.
-
-    A few rules to respect :
-    - Compressing and decompressing require a context structure
-      + Use ZSTD_createCCtx() and ZSTD_createDCtx()
-    - It is necessary to init context before starting
-      + compression : any ZSTD_compressBegin*() variant, including with dictionary
-      + decompression : any ZSTD_decompressBegin*() variant, including with dictionary
-      + copyCCtx() and copyDCtx() can be used too
-    - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
-      + If input is larger than a block size, it's necessary to split input data into multiple blocks
-      + For inputs larger than a single block, really consider using regular ZSTD_compress() instead.
-        Frame metadata is not that costly, and quickly becomes negligible as source size grows larger.
-    - When a block is considered not compressible enough, ZSTD_compressBlock() result will be zero.
-      In which case, nothing is produced into `dst` !
-      + User must test for such outcome and deal directly with uncompressed data
-      + ZSTD_decompressBlock() doesn't accept uncompressed data as input !!!
-      + In case of multiple successive blocks, should some of them be uncompressed,
-        decoder must be informed of their existence in order to follow proper history.
-        Use ZSTD_insertBlock() for such a case.
-*/
-
-/*=====   Raw zstd block functions  =====*/
-ZSTDLIB_API size_t ZSTD_getBlockSize   (const ZSTD_CCtx* cctx);
-ZSTDLIB_API size_t ZSTD_compressBlock  (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
-ZSTDLIB_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
-ZSTDLIB_API size_t ZSTD_insertBlock    (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize);  /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
-
-
-#endif   /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */
-
-#if defined (__cplusplus)
-}
-#endif
diff --git a/vendor/github.com/DataDog/zstd/zstd_common.c b/vendor/github.com/DataDog/zstd/zstd_common.c
deleted file mode 100644
index 667f4a2..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_common.c
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-
-
-/*-*************************************
-*  Dependencies
-***************************************/
-#include <stdlib.h>      /* malloc, calloc, free */
-#include <string.h>      /* memset */
-#include "error_private.h"
-#include "zstd_internal.h"
-
-
-/*-****************************************
-*  Version
-******************************************/
-unsigned ZSTD_versionNumber(void) { return ZSTD_VERSION_NUMBER; }
-
-const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; }
-
-
-/*-****************************************
-*  ZSTD Error Management
-******************************************/
-#undef ZSTD_isError   /* defined within zstd_internal.h */
-/*! ZSTD_isError() :
- *  tells if a return value is an error code
- *  symbol is required for external callers */
-unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
-
-/*! ZSTD_getErrorName() :
- *  provides error code string from function result (useful for debugging) */
-const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); }
-
-/*! ZSTD_getError() :
- *  convert a `size_t` function result into a proper ZSTD_errorCode enum */
-ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); }
-
-/*! ZSTD_getErrorString() :
- *  provides error code string from enum */
-const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); }
-
-
-
-/*=**************************************************************
-*  Custom allocator
-****************************************************************/
-void* ZSTD_malloc(size_t size, ZSTD_customMem customMem)
-{
-    if (customMem.customAlloc)
-        return customMem.customAlloc(customMem.opaque, size);
-    return malloc(size);
-}
-
-void* ZSTD_calloc(size_t size, ZSTD_customMem customMem)
-{
-    if (customMem.customAlloc) {
-        /* calloc implemented as malloc+memset;
-         * not as efficient as calloc, but next best guess for custom malloc */
-        void* const ptr = customMem.customAlloc(customMem.opaque, size);
-        memset(ptr, 0, size);
-        return ptr;
-    }
-    return calloc(1, size);
-}
-
-void ZSTD_free(void* ptr, ZSTD_customMem customMem)
-{
-    if (ptr!=NULL) {
-        if (customMem.customFree)
-            customMem.customFree(customMem.opaque, ptr);
-        else
-            free(ptr);
-    }
-}
diff --git a/vendor/github.com/DataDog/zstd/zstd_compress.c b/vendor/github.com/DataDog/zstd/zstd_compress.c
deleted file mode 100644
index 1476512..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_compress.c
+++ /dev/null
@@ -1,4475 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-/*-*************************************
-*  Dependencies
-***************************************/
-#include <limits.h>         /* INT_MAX */
-#include <string.h>         /* memset */
-#include "cpu.h"
-#include "mem.h"
-#include "hist.h"           /* HIST_countFast_wksp */
-#define FSE_STATIC_LINKING_ONLY   /* FSE_encodeSymbol */
-#include "fse.h"
-#define HUF_STATIC_LINKING_ONLY
-#include "huf.h"
-#include "zstd_compress_internal.h"
-#include "zstd_fast.h"
-#include "zstd_double_fast.h"
-#include "zstd_lazy.h"
-#include "zstd_opt.h"
-#include "zstd_ldm.h"
-
-
-/*-*************************************
-*  Helper functions
-***************************************/
-size_t ZSTD_compressBound(size_t srcSize) {
-    return ZSTD_COMPRESSBOUND(srcSize);
-}
-
-
-/*-*************************************
-*  Context memory management
-***************************************/
-struct ZSTD_CDict_s {
-    void* dictBuffer;
-    const void* dictContent;
-    size_t dictContentSize;
-    void* workspace;
-    size_t workspaceSize;
-    ZSTD_matchState_t matchState;
-    ZSTD_compressedBlockState_t cBlockState;
-    ZSTD_customMem customMem;
-    U32 dictID;
-};  /* typedef'd to ZSTD_CDict within "zstd.h" */
-
-ZSTD_CCtx* ZSTD_createCCtx(void)
-{
-    return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
-}
-
-static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager)
-{
-    assert(cctx != NULL);
-    memset(cctx, 0, sizeof(*cctx));
-    cctx->customMem = memManager;
-    cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
-    {   size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters);
-        assert(!ZSTD_isError(err));
-        (void)err;
-    }
-}
-
-ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
-{
-    ZSTD_STATIC_ASSERT(zcss_init==0);
-    ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
-    if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
-    {   ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_malloc(sizeof(ZSTD_CCtx), customMem);
-        if (!cctx) return NULL;
-        ZSTD_initCCtx(cctx, customMem);
-        return cctx;
-    }
-}
-
-ZSTD_CCtx* ZSTD_initStaticCCtx(void *workspace, size_t workspaceSize)
-{
-    ZSTD_CCtx* const cctx = (ZSTD_CCtx*) workspace;
-    if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL;  /* minimum size */
-    if ((size_t)workspace & 7) return NULL;  /* must be 8-aligned */
-    memset(workspace, 0, workspaceSize);   /* may be a bit generous, could memset be smaller ? */
-    cctx->staticSize = workspaceSize;
-    cctx->workSpace = (void*)(cctx+1);
-    cctx->workSpaceSize = workspaceSize - sizeof(ZSTD_CCtx);
-
-    /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
-    if (cctx->workSpaceSize < HUF_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t)) return NULL;
-    assert(((size_t)cctx->workSpace & (sizeof(void*)-1)) == 0);   /* ensure correct alignment */
-    cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)cctx->workSpace;
-    cctx->blockState.nextCBlock = cctx->blockState.prevCBlock + 1;
-    {
-        void* const ptr = cctx->blockState.nextCBlock + 1;
-        cctx->entropyWorkspace = (U32*)ptr;
-    }
-    cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
-    return cctx;
-}
-
-/**
- * Clears and frees all of the dictionaries in the CCtx.
- */
-static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx)
-{
-    ZSTD_free(cctx->localDict.dictBuffer, cctx->customMem);
-    ZSTD_freeCDict(cctx->localDict.cdict);
-    memset(&cctx->localDict, 0, sizeof(cctx->localDict));
-    memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));
-    cctx->cdict = NULL;
-}
-
-static size_t ZSTD_sizeof_localDict(ZSTD_localDict dict)
-{
-    size_t const bufferSize = dict.dictBuffer != NULL ? dict.dictSize : 0;
-    size_t const cdictSize = ZSTD_sizeof_CDict(dict.cdict);
-    return bufferSize + cdictSize;
-}
-
-static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
-{
-    assert(cctx != NULL);
-    assert(cctx->staticSize == 0);
-    ZSTD_free(cctx->workSpace, cctx->customMem); cctx->workSpace = NULL;
-    ZSTD_clearAllDicts(cctx);
-#ifdef ZSTD_MULTITHREAD
-    ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL;
-#endif
-}
-
-size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
-{
-    if (cctx==NULL) return 0;   /* support free on NULL */
-    RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
-                    "not compatible with static CCtx");
-    ZSTD_freeCCtxContent(cctx);
-    ZSTD_free(cctx, cctx->customMem);
-    return 0;
-}
-
-
-static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
-{
-#ifdef ZSTD_MULTITHREAD
-    return ZSTDMT_sizeof_CCtx(cctx->mtctx);
-#else
-    (void)cctx;
-    return 0;
-#endif
-}
-
-
-size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
-{
-    if (cctx==NULL) return 0;   /* support sizeof on NULL */
-    return sizeof(*cctx) + cctx->workSpaceSize
-           + ZSTD_sizeof_localDict(cctx->localDict)
-           + ZSTD_sizeof_mtctx(cctx);
-}
-
-size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
-{
-    return ZSTD_sizeof_CCtx(zcs);  /* same object */
-}
-
-/* private API call, for dictBuilder only */
-const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
-
-static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
-        ZSTD_compressionParameters cParams)
-{
-    ZSTD_CCtx_params cctxParams;
-    memset(&cctxParams, 0, sizeof(cctxParams));
-    cctxParams.cParams = cParams;
-    cctxParams.compressionLevel = ZSTD_CLEVEL_DEFAULT;  /* should not matter, as all cParams are presumed properly defined */
-    assert(!ZSTD_checkCParams(cParams));
-    cctxParams.fParams.contentSizeFlag = 1;
-    return cctxParams;
-}
-
-static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(
-        ZSTD_customMem customMem)
-{
-    ZSTD_CCtx_params* params;
-    if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
-    params = (ZSTD_CCtx_params*)ZSTD_calloc(
-            sizeof(ZSTD_CCtx_params), customMem);
-    if (!params) { return NULL; }
-    params->customMem = customMem;
-    params->compressionLevel = ZSTD_CLEVEL_DEFAULT;
-    params->fParams.contentSizeFlag = 1;
-    return params;
-}
-
-ZSTD_CCtx_params* ZSTD_createCCtxParams(void)
-{
-    return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem);
-}
-
-size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params)
-{
-    if (params == NULL) { return 0; }
-    ZSTD_free(params, params->customMem);
-    return 0;
-}
-
-size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params)
-{
-    return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
-}
-
-size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
-    RETURN_ERROR_IF(!cctxParams, GENERIC);
-    memset(cctxParams, 0, sizeof(*cctxParams));
-    cctxParams->compressionLevel = compressionLevel;
-    cctxParams->fParams.contentSizeFlag = 1;
-    return 0;
-}
-
-size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
-{
-    RETURN_ERROR_IF(!cctxParams, GENERIC);
-    FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );
-    memset(cctxParams, 0, sizeof(*cctxParams));
-    cctxParams->cParams = params.cParams;
-    cctxParams->fParams = params.fParams;
-    cctxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT;   /* should not matter, as all cParams are presumed properly defined */
-    assert(!ZSTD_checkCParams(params.cParams));
-    return 0;
-}
-
-/* ZSTD_assignParamsToCCtxParams() :
- * params is presumed valid at this stage */
-static ZSTD_CCtx_params ZSTD_assignParamsToCCtxParams(
-        ZSTD_CCtx_params cctxParams, ZSTD_parameters params)
-{
-    ZSTD_CCtx_params ret = cctxParams;
-    ret.cParams = params.cParams;
-    ret.fParams = params.fParams;
-    ret.compressionLevel = ZSTD_CLEVEL_DEFAULT;   /* should not matter, as all cParams are presumed properly defined */
-    assert(!ZSTD_checkCParams(params.cParams));
-    return ret;
-}
-
-ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
-{
-    ZSTD_bounds bounds = { 0, 0, 0 };
-
-    switch(param)
-    {
-    case ZSTD_c_compressionLevel:
-        bounds.lowerBound = ZSTD_minCLevel();
-        bounds.upperBound = ZSTD_maxCLevel();
-        return bounds;
-
-    case ZSTD_c_windowLog:
-        bounds.lowerBound = ZSTD_WINDOWLOG_MIN;
-        bounds.upperBound = ZSTD_WINDOWLOG_MAX;
-        return bounds;
-
-    case ZSTD_c_hashLog:
-        bounds.lowerBound = ZSTD_HASHLOG_MIN;
-        bounds.upperBound = ZSTD_HASHLOG_MAX;
-        return bounds;
-
-    case ZSTD_c_chainLog:
-        bounds.lowerBound = ZSTD_CHAINLOG_MIN;
-        bounds.upperBound = ZSTD_CHAINLOG_MAX;
-        return bounds;
-
-    case ZSTD_c_searchLog:
-        bounds.lowerBound = ZSTD_SEARCHLOG_MIN;
-        bounds.upperBound = ZSTD_SEARCHLOG_MAX;
-        return bounds;
-
-    case ZSTD_c_minMatch:
-        bounds.lowerBound = ZSTD_MINMATCH_MIN;
-        bounds.upperBound = ZSTD_MINMATCH_MAX;
-        return bounds;
-
-    case ZSTD_c_targetLength:
-        bounds.lowerBound = ZSTD_TARGETLENGTH_MIN;
-        bounds.upperBound = ZSTD_TARGETLENGTH_MAX;
-        return bounds;
-
-    case ZSTD_c_strategy:
-        bounds.lowerBound = ZSTD_STRATEGY_MIN;
-        bounds.upperBound = ZSTD_STRATEGY_MAX;
-        return bounds;
-
-    case ZSTD_c_contentSizeFlag:
-        bounds.lowerBound = 0;
-        bounds.upperBound = 1;
-        return bounds;
-
-    case ZSTD_c_checksumFlag:
-        bounds.lowerBound = 0;
-        bounds.upperBound = 1;
-        return bounds;
-
-    case ZSTD_c_dictIDFlag:
-        bounds.lowerBound = 0;
-        bounds.upperBound = 1;
-        return bounds;
-
-    case ZSTD_c_nbWorkers:
-        bounds.lowerBound = 0;
-#ifdef ZSTD_MULTITHREAD
-        bounds.upperBound = ZSTDMT_NBWORKERS_MAX;
-#else
-        bounds.upperBound = 0;
-#endif
-        return bounds;
-
-    case ZSTD_c_jobSize:
-        bounds.lowerBound = 0;
-#ifdef ZSTD_MULTITHREAD
-        bounds.upperBound = ZSTDMT_JOBSIZE_MAX;
-#else
-        bounds.upperBound = 0;
-#endif
-        return bounds;
-
-    case ZSTD_c_overlapLog:
-        bounds.lowerBound = ZSTD_OVERLAPLOG_MIN;
-        bounds.upperBound = ZSTD_OVERLAPLOG_MAX;
-        return bounds;
-
-    case ZSTD_c_enableLongDistanceMatching:
-        bounds.lowerBound = 0;
-        bounds.upperBound = 1;
-        return bounds;
-
-    case ZSTD_c_ldmHashLog:
-        bounds.lowerBound = ZSTD_LDM_HASHLOG_MIN;
-        bounds.upperBound = ZSTD_LDM_HASHLOG_MAX;
-        return bounds;
-
-    case ZSTD_c_ldmMinMatch:
-        bounds.lowerBound = ZSTD_LDM_MINMATCH_MIN;
-        bounds.upperBound = ZSTD_LDM_MINMATCH_MAX;
-        return bounds;
-
-    case ZSTD_c_ldmBucketSizeLog:
-        bounds.lowerBound = ZSTD_LDM_BUCKETSIZELOG_MIN;
-        bounds.upperBound = ZSTD_LDM_BUCKETSIZELOG_MAX;
-        return bounds;
-
-    case ZSTD_c_ldmHashRateLog:
-        bounds.lowerBound = ZSTD_LDM_HASHRATELOG_MIN;
-        bounds.upperBound = ZSTD_LDM_HASHRATELOG_MAX;
-        return bounds;
-
-    /* experimental parameters */
-    case ZSTD_c_rsyncable:
-        bounds.lowerBound = 0;
-        bounds.upperBound = 1;
-        return bounds;
-
-    case ZSTD_c_forceMaxWindow :
-        bounds.lowerBound = 0;
-        bounds.upperBound = 1;
-        return bounds;
-
-    case ZSTD_c_format:
-        ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
-        bounds.lowerBound = ZSTD_f_zstd1;
-        bounds.upperBound = ZSTD_f_zstd1_magicless;   /* note : how to ensure at compile time that this is the highest value enum ? */
-        return bounds;
-
-    case ZSTD_c_forceAttachDict:
-        ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceCopy);
-        bounds.lowerBound = ZSTD_dictDefaultAttach;
-        bounds.upperBound = ZSTD_dictForceCopy;       /* note : how to ensure at compile time that this is the highest value enum ? */
-        return bounds;
-
-    case ZSTD_c_literalCompressionMode:
-        ZSTD_STATIC_ASSERT(ZSTD_lcm_auto < ZSTD_lcm_huffman && ZSTD_lcm_huffman < ZSTD_lcm_uncompressed);
-        bounds.lowerBound = ZSTD_lcm_auto;
-        bounds.upperBound = ZSTD_lcm_uncompressed;
-        return bounds;
-
-    case ZSTD_c_targetCBlockSize:
-        bounds.lowerBound = ZSTD_TARGETCBLOCKSIZE_MIN;
-        bounds.upperBound = ZSTD_TARGETCBLOCKSIZE_MAX;
-        return bounds;
-
-    default:
-        {   ZSTD_bounds const boundError = { ERROR(parameter_unsupported), 0, 0 };
-            return boundError;
-        }
-    }
-}
-
-/* ZSTD_cParam_withinBounds:
- * @return 1 if value is within cParam bounds,
- * 0 otherwise */
-static int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
-{
-    ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
-    if (ZSTD_isError(bounds.error)) return 0;
-    if (value < bounds.lowerBound) return 0;
-    if (value > bounds.upperBound) return 0;
-    return 1;
-}
-
-/* ZSTD_cParam_clampBounds:
- * Clamps the value into the bounded range.
- */
-static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value)
-{
-    ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
-    if (ZSTD_isError(bounds.error)) return bounds.error;
-    if (*value < bounds.lowerBound) *value = bounds.lowerBound;
-    if (*value > bounds.upperBound) *value = bounds.upperBound;
-    return 0;
-}
-
-#define BOUNDCHECK(cParam, val) { \
-    RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
-                    parameter_outOfBound); \
-}
-
-
-static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
-{
-    switch(param)
-    {
-    case ZSTD_c_compressionLevel:
-    case ZSTD_c_hashLog:
-    case ZSTD_c_chainLog:
-    case ZSTD_c_searchLog:
-    case ZSTD_c_minMatch:
-    case ZSTD_c_targetLength:
-    case ZSTD_c_strategy:
-        return 1;
-
-    case ZSTD_c_format:
-    case ZSTD_c_windowLog:
-    case ZSTD_c_contentSizeFlag:
-    case ZSTD_c_checksumFlag:
-    case ZSTD_c_dictIDFlag:
-    case ZSTD_c_forceMaxWindow :
-    case ZSTD_c_nbWorkers:
-    case ZSTD_c_jobSize:
-    case ZSTD_c_overlapLog:
-    case ZSTD_c_rsyncable:
-    case ZSTD_c_enableLongDistanceMatching:
-    case ZSTD_c_ldmHashLog:
-    case ZSTD_c_ldmMinMatch:
-    case ZSTD_c_ldmBucketSizeLog:
-    case ZSTD_c_ldmHashRateLog:
-    case ZSTD_c_forceAttachDict:
-    case ZSTD_c_literalCompressionMode:
-    case ZSTD_c_targetCBlockSize:
-    default:
-        return 0;
-    }
-}
-
-size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
-{
-    DEBUGLOG(4, "ZSTD_CCtx_setParameter (%i, %i)", (int)param, value);
-    if (cctx->streamStage != zcss_init) {
-        if (ZSTD_isUpdateAuthorized(param)) {
-            cctx->cParamsChanged = 1;
-        } else {
-            RETURN_ERROR(stage_wrong);
-    }   }
-
-    switch(param)
-    {
-    case ZSTD_c_nbWorkers:
-        RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported,
-                        "MT not compatible with static alloc");
-        break;
-
-    case ZSTD_c_compressionLevel:
-    case ZSTD_c_windowLog:
-    case ZSTD_c_hashLog:
-    case ZSTD_c_chainLog:
-    case ZSTD_c_searchLog:
-    case ZSTD_c_minMatch:
-    case ZSTD_c_targetLength:
-    case ZSTD_c_strategy:
-    case ZSTD_c_ldmHashRateLog:
-    case ZSTD_c_format:
-    case ZSTD_c_contentSizeFlag:
-    case ZSTD_c_checksumFlag:
-    case ZSTD_c_dictIDFlag:
-    case ZSTD_c_forceMaxWindow:
-    case ZSTD_c_forceAttachDict:
-    case ZSTD_c_literalCompressionMode:
-    case ZSTD_c_jobSize:
-    case ZSTD_c_overlapLog:
-    case ZSTD_c_rsyncable:
-    case ZSTD_c_enableLongDistanceMatching:
-    case ZSTD_c_ldmHashLog:
-    case ZSTD_c_ldmMinMatch:
-    case ZSTD_c_ldmBucketSizeLog:
-    case ZSTD_c_targetCBlockSize:
-        break;
-
-    default: RETURN_ERROR(parameter_unsupported);
-    }
-    return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value);
-}
-
-size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
-                                    ZSTD_cParameter param, int value)
-{
-    DEBUGLOG(4, "ZSTD_CCtxParams_setParameter (%i, %i)", (int)param, value);
-    switch(param)
-    {
-    case ZSTD_c_format :
-        BOUNDCHECK(ZSTD_c_format, value);
-        CCtxParams->format = (ZSTD_format_e)value;
-        return (size_t)CCtxParams->format;
-
-    case ZSTD_c_compressionLevel : {
-        FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value));
-        if (value) {  /* 0 : does not change current level */
-            CCtxParams->compressionLevel = value;
-        }
-        if (CCtxParams->compressionLevel >= 0) return CCtxParams->compressionLevel;
-        return 0;  /* return type (size_t) cannot represent negative values */
-    }
-
-    case ZSTD_c_windowLog :
-        if (value!=0)   /* 0 => use default */
-            BOUNDCHECK(ZSTD_c_windowLog, value);
-        CCtxParams->cParams.windowLog = value;
-        return CCtxParams->cParams.windowLog;
-
-    case ZSTD_c_hashLog :
-        if (value!=0)   /* 0 => use default */
-            BOUNDCHECK(ZSTD_c_hashLog, value);
-        CCtxParams->cParams.hashLog = value;
-        return CCtxParams->cParams.hashLog;
-
-    case ZSTD_c_chainLog :
-        if (value!=0)   /* 0 => use default */
-            BOUNDCHECK(ZSTD_c_chainLog, value);
-        CCtxParams->cParams.chainLog = value;
-        return CCtxParams->cParams.chainLog;
-
-    case ZSTD_c_searchLog :
-        if (value!=0)   /* 0 => use default */
-            BOUNDCHECK(ZSTD_c_searchLog, value);
-        CCtxParams->cParams.searchLog = value;
-        return value;
-
-    case ZSTD_c_minMatch :
-        if (value!=0)   /* 0 => use default */
-            BOUNDCHECK(ZSTD_c_minMatch, value);
-        CCtxParams->cParams.minMatch = value;
-        return CCtxParams->cParams.minMatch;
-
-    case ZSTD_c_targetLength :
-        BOUNDCHECK(ZSTD_c_targetLength, value);
-        CCtxParams->cParams.targetLength = value;
-        return CCtxParams->cParams.targetLength;
-
-    case ZSTD_c_strategy :
-        if (value!=0)   /* 0 => use default */
-            BOUNDCHECK(ZSTD_c_strategy, value);
-        CCtxParams->cParams.strategy = (ZSTD_strategy)value;
-        return (size_t)CCtxParams->cParams.strategy;
-
-    case ZSTD_c_contentSizeFlag :
-        /* Content size written in frame header _when known_ (default:1) */
-        DEBUGLOG(4, "set content size flag = %u", (value!=0));
-        CCtxParams->fParams.contentSizeFlag = value != 0;
-        return CCtxParams->fParams.contentSizeFlag;
-
-    case ZSTD_c_checksumFlag :
-        /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
-        CCtxParams->fParams.checksumFlag = value != 0;
-        return CCtxParams->fParams.checksumFlag;
-
-    case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
-        DEBUGLOG(4, "set dictIDFlag = %u", (value!=0));
-        CCtxParams->fParams.noDictIDFlag = !value;
-        return !CCtxParams->fParams.noDictIDFlag;
-
-    case ZSTD_c_forceMaxWindow :
-        CCtxParams->forceWindow = (value != 0);
-        return CCtxParams->forceWindow;
-
-    case ZSTD_c_forceAttachDict : {
-        const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value;
-        BOUNDCHECK(ZSTD_c_forceAttachDict, pref);
-        CCtxParams->attachDictPref = pref;
-        return CCtxParams->attachDictPref;
-    }
-
-    case ZSTD_c_literalCompressionMode : {
-        const ZSTD_literalCompressionMode_e lcm = (ZSTD_literalCompressionMode_e)value;
-        BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm);
-        CCtxParams->literalCompressionMode = lcm;
-        return CCtxParams->literalCompressionMode;
-    }
-
-    case ZSTD_c_nbWorkers :
-#ifndef ZSTD_MULTITHREAD
-        RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
-        return 0;
-#else
-        FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value));
-        CCtxParams->nbWorkers = value;
-        return CCtxParams->nbWorkers;
-#endif
-
-    case ZSTD_c_jobSize :
-#ifndef ZSTD_MULTITHREAD
-        RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
-        return 0;
-#else
-        /* Adjust to the minimum non-default value. */
-        if (value != 0 && value < ZSTDMT_JOBSIZE_MIN)
-            value = ZSTDMT_JOBSIZE_MIN;
-        FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value));
-        assert(value >= 0);
-        CCtxParams->jobSize = value;
-        return CCtxParams->jobSize;
-#endif
-
-    case ZSTD_c_overlapLog :
-#ifndef ZSTD_MULTITHREAD
-        RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
-        return 0;
-#else
-        FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value));
-        CCtxParams->overlapLog = value;
-        return CCtxParams->overlapLog;
-#endif
-
-    case ZSTD_c_rsyncable :
-#ifndef ZSTD_MULTITHREAD
-        RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
-        return 0;
-#else
-        FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value));
-        CCtxParams->rsyncable = value;
-        return CCtxParams->rsyncable;
-#endif
-
-    case ZSTD_c_enableLongDistanceMatching :
-        CCtxParams->ldmParams.enableLdm = (value!=0);
-        return CCtxParams->ldmParams.enableLdm;
-
-    case ZSTD_c_ldmHashLog :
-        if (value!=0)   /* 0 ==> auto */
-            BOUNDCHECK(ZSTD_c_ldmHashLog, value);
-        CCtxParams->ldmParams.hashLog = value;
-        return CCtxParams->ldmParams.hashLog;
-
-    case ZSTD_c_ldmMinMatch :
-        if (value!=0)   /* 0 ==> default */
-            BOUNDCHECK(ZSTD_c_ldmMinMatch, value);
-        CCtxParams->ldmParams.minMatchLength = value;
-        return CCtxParams->ldmParams.minMatchLength;
-
-    case ZSTD_c_ldmBucketSizeLog :
-        if (value!=0)   /* 0 ==> default */
-            BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value);
-        CCtxParams->ldmParams.bucketSizeLog = value;
-        return CCtxParams->ldmParams.bucketSizeLog;
-
-    case ZSTD_c_ldmHashRateLog :
-        RETURN_ERROR_IF(value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN,
-                        parameter_outOfBound);
-        CCtxParams->ldmParams.hashRateLog = value;
-        return CCtxParams->ldmParams.hashRateLog;
-
-    case ZSTD_c_targetCBlockSize :
-        if (value!=0)   /* 0 ==> default */
-            BOUNDCHECK(ZSTD_c_targetCBlockSize, value);
-        CCtxParams->targetCBlockSize = value;
-        return CCtxParams->targetCBlockSize;
-
-    default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
-    }
-}
-
-size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value)
-{
-    return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value);
-}
-
-size_t ZSTD_CCtxParams_getParameter(
-        ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, int* value)
-{
-    switch(param)
-    {
-    case ZSTD_c_format :
-        *value = CCtxParams->format;
-        break;
-    case ZSTD_c_compressionLevel :
-        *value = CCtxParams->compressionLevel;
-        break;
-    case ZSTD_c_windowLog :
-        *value = (int)CCtxParams->cParams.windowLog;
-        break;
-    case ZSTD_c_hashLog :
-        *value = (int)CCtxParams->cParams.hashLog;
-        break;
-    case ZSTD_c_chainLog :
-        *value = (int)CCtxParams->cParams.chainLog;
-        break;
-    case ZSTD_c_searchLog :
-        *value = CCtxParams->cParams.searchLog;
-        break;
-    case ZSTD_c_minMatch :
-        *value = CCtxParams->cParams.minMatch;
-        break;
-    case ZSTD_c_targetLength :
-        *value = CCtxParams->cParams.targetLength;
-        break;
-    case ZSTD_c_strategy :
-        *value = (unsigned)CCtxParams->cParams.strategy;
-        break;
-    case ZSTD_c_contentSizeFlag :
-        *value = CCtxParams->fParams.contentSizeFlag;
-        break;
-    case ZSTD_c_checksumFlag :
-        *value = CCtxParams->fParams.checksumFlag;
-        break;
-    case ZSTD_c_dictIDFlag :
-        *value = !CCtxParams->fParams.noDictIDFlag;
-        break;
-    case ZSTD_c_forceMaxWindow :
-        *value = CCtxParams->forceWindow;
-        break;
-    case ZSTD_c_forceAttachDict :
-        *value = CCtxParams->attachDictPref;
-        break;
-    case ZSTD_c_literalCompressionMode :
-        *value = CCtxParams->literalCompressionMode;
-        break;
-    case ZSTD_c_nbWorkers :
-#ifndef ZSTD_MULTITHREAD
-        assert(CCtxParams->nbWorkers == 0);
-#endif
-        *value = CCtxParams->nbWorkers;
-        break;
-    case ZSTD_c_jobSize :
-#ifndef ZSTD_MULTITHREAD
-        RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
-#else
-        assert(CCtxParams->jobSize <= INT_MAX);
-        *value = (int)CCtxParams->jobSize;
-        break;
-#endif
-    case ZSTD_c_overlapLog :
-#ifndef ZSTD_MULTITHREAD
-        RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
-#else
-        *value = CCtxParams->overlapLog;
-        break;
-#endif
-    case ZSTD_c_rsyncable :
-#ifndef ZSTD_MULTITHREAD
-        RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
-#else
-        *value = CCtxParams->rsyncable;
-        break;
-#endif
-    case ZSTD_c_enableLongDistanceMatching :
-        *value = CCtxParams->ldmParams.enableLdm;
-        break;
-    case ZSTD_c_ldmHashLog :
-        *value = CCtxParams->ldmParams.hashLog;
-        break;
-    case ZSTD_c_ldmMinMatch :
-        *value = CCtxParams->ldmParams.minMatchLength;
-        break;
-    case ZSTD_c_ldmBucketSizeLog :
-        *value = CCtxParams->ldmParams.bucketSizeLog;
-        break;
-    case ZSTD_c_ldmHashRateLog :
-        *value = CCtxParams->ldmParams.hashRateLog;
-        break;
-    case ZSTD_c_targetCBlockSize :
-        *value = (int)CCtxParams->targetCBlockSize;
-        break;
-    default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
-    }
-    return 0;
-}
-
-/** ZSTD_CCtx_setParametersUsingCCtxParams() :
- *  just applies `params` into `cctx`
- *  no action is performed, parameters are merely stored.
- *  If ZSTDMT is enabled, parameters are pushed to cctx->mtctx.
- *    This is possible even if a compression is ongoing.
- *    In which case, new parameters will be applied on the fly, starting with next compression job.
- */
-size_t ZSTD_CCtx_setParametersUsingCCtxParams(
-        ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
-{
-    DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams");
-    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
-    RETURN_ERROR_IF(cctx->cdict, stage_wrong);
-
-    cctx->requestedParams = *params;
-    return 0;
-}
-
-ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
-{
-    DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
-    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
-    cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
-    return 0;
-}
-
-/**
- * Initializes the local dict using the requested parameters.
- * NOTE: This does not use the pledged src size, because it may be used for more
- * than one compression.
- */
-static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
-{
-    ZSTD_localDict* const dl = &cctx->localDict;
-    ZSTD_compressionParameters const cParams = ZSTD_getCParamsFromCCtxParams(
-            &cctx->requestedParams, 0, dl->dictSize);
-    if (dl->dict == NULL) {
-        /* No local dictionary. */
-        assert(dl->dictBuffer == NULL);
-        assert(dl->cdict == NULL);
-        assert(dl->dictSize == 0);
-        return 0;
-    }
-    if (dl->cdict != NULL) {
-        assert(cctx->cdict == dl->cdict);
-        /* Local dictionary already initialized. */
-        return 0;
-    }
-    assert(dl->dictSize > 0);
-    assert(cctx->cdict == NULL);
-    assert(cctx->prefixDict.dict == NULL);
-
-    dl->cdict = ZSTD_createCDict_advanced(
-            dl->dict,
-            dl->dictSize,
-            ZSTD_dlm_byRef,
-            dl->dictContentType,
-            cParams,
-            cctx->customMem);
-    RETURN_ERROR_IF(!dl->cdict, memory_allocation);
-    cctx->cdict = dl->cdict;
-    return 0;
-}
-
-size_t ZSTD_CCtx_loadDictionary_advanced(
-        ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
-        ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
-{
-    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
-    RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
-                    "no malloc for static CCtx");
-    DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
-    ZSTD_clearAllDicts(cctx);  /* in case one already exists */
-    if (dict == NULL || dictSize == 0)  /* no dictionary mode */
-        return 0;
-    if (dictLoadMethod == ZSTD_dlm_byRef) {
-        cctx->localDict.dict = dict;
-    } else {
-        void* dictBuffer = ZSTD_malloc(dictSize, cctx->customMem);
-        RETURN_ERROR_IF(!dictBuffer, memory_allocation);
-        memcpy(dictBuffer, dict, dictSize);
-        cctx->localDict.dictBuffer = dictBuffer;
-        cctx->localDict.dict = dictBuffer;
-    }
-    cctx->localDict.dictSize = dictSize;
-    cctx->localDict.dictContentType = dictContentType;
-    return 0;
-}
-
-ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(
-      ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
-{
-    return ZSTD_CCtx_loadDictionary_advanced(
-            cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
-}
-
-ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
-{
-    return ZSTD_CCtx_loadDictionary_advanced(
-            cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
-}
-
-
-size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
-{
-    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
-    /* Free the existing local cdict (if any) to save memory. */
-    ZSTD_clearAllDicts(cctx);
-    cctx->cdict = cdict;
-    return 0;
-}
-
-size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize)
-{
-    return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent);
-}
-
-size_t ZSTD_CCtx_refPrefix_advanced(
-        ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
-{
-    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
-    ZSTD_clearAllDicts(cctx);
-    cctx->prefixDict.dict = prefix;
-    cctx->prefixDict.dictSize = prefixSize;
-    cctx->prefixDict.dictContentType = dictContentType;
-    return 0;
-}
-
-/*! ZSTD_CCtx_reset() :
- *  Also dumps dictionary */
-size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
-{
-    if ( (reset == ZSTD_reset_session_only)
-      || (reset == ZSTD_reset_session_and_parameters) ) {
-        cctx->streamStage = zcss_init;
-        cctx->pledgedSrcSizePlusOne = 0;
-    }
-    if ( (reset == ZSTD_reset_parameters)
-      || (reset == ZSTD_reset_session_and_parameters) ) {
-        RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
-        ZSTD_clearAllDicts(cctx);
-        return ZSTD_CCtxParams_reset(&cctx->requestedParams);
-    }
-    return 0;
-}
-
-
-/** ZSTD_checkCParams() :
-    control CParam values remain within authorized range.
-    @return : 0, or an error code if one value is beyond authorized range */
-size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
-{
-    BOUNDCHECK(ZSTD_c_windowLog, (int)cParams.windowLog);
-    BOUNDCHECK(ZSTD_c_chainLog,  (int)cParams.chainLog);
-    BOUNDCHECK(ZSTD_c_hashLog,   (int)cParams.hashLog);
-    BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog);
-    BOUNDCHECK(ZSTD_c_minMatch,  (int)cParams.minMatch);
-    BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength);
-    BOUNDCHECK(ZSTD_c_strategy,  cParams.strategy);
-    return 0;
-}
-
-/** ZSTD_clampCParams() :
- *  make CParam values within valid range.
- *  @return : valid CParams */
-static ZSTD_compressionParameters
-ZSTD_clampCParams(ZSTD_compressionParameters cParams)
-{
-#   define CLAMP_TYPE(cParam, val, type) {                                \
-        ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);         \
-        if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound;      \
-        else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \
-    }
-#   define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned)
-    CLAMP(ZSTD_c_windowLog, cParams.windowLog);
-    CLAMP(ZSTD_c_chainLog,  cParams.chainLog);
-    CLAMP(ZSTD_c_hashLog,   cParams.hashLog);
-    CLAMP(ZSTD_c_searchLog, cParams.searchLog);
-    CLAMP(ZSTD_c_minMatch,  cParams.minMatch);
-    CLAMP(ZSTD_c_targetLength,cParams.targetLength);
-    CLAMP_TYPE(ZSTD_c_strategy,cParams.strategy, ZSTD_strategy);
-    return cParams;
-}
-
-/** ZSTD_cycleLog() :
- *  condition for correct operation : hashLog > 1 */
-static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
-{
-    U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
-    return hashLog - btScale;
-}
-
-/** ZSTD_adjustCParams_internal() :
- *  optimize `cPar` for a specified input (`srcSize` and `dictSize`).
- *  mostly downsize to reduce memory consumption and initialization latency.
- * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known.
- *  note : for the time being, `srcSize==0` means "unknown" too, for compatibility with older convention.
- *  condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */
-static ZSTD_compressionParameters
-ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
-                            unsigned long long srcSize,
-                            size_t dictSize)
-{
-    static const U64 minSrcSize = 513; /* (1<<9) + 1 */
-    static const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
-    assert(ZSTD_checkCParams(cPar)==0);
-
-    if (dictSize && (srcSize+1<2) /* ZSTD_CONTENTSIZE_UNKNOWN and 0 mean "unknown" */ )
-        srcSize = minSrcSize;  /* presumed small when there is a dictionary */
-    else if (srcSize == 0)
-        srcSize = ZSTD_CONTENTSIZE_UNKNOWN;  /* 0 == unknown : presumed large */
-
-    /* resize windowLog if input is small enough, to use less memory */
-    if ( (srcSize < maxWindowResize)
-      && (dictSize < maxWindowResize) )  {
-        U32 const tSize = (U32)(srcSize + dictSize);
-        static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;
-        U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :
-                            ZSTD_highbit32(tSize-1) + 1;
-        if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
-    }
-    if (cPar.hashLog > cPar.windowLog+1) cPar.hashLog = cPar.windowLog+1;
-    {   U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
-        if (cycleLog > cPar.windowLog)
-            cPar.chainLog -= (cycleLog - cPar.windowLog);
-    }
-
-    if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
-        cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN;  /* minimum wlog required for valid frame header */
-
-    return cPar;
-}
-
-ZSTD_compressionParameters
-ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
-                   unsigned long long srcSize,
-                   size_t dictSize)
-{
-    cPar = ZSTD_clampCParams(cPar);   /* resulting cPar is necessarily valid (all parameters within range) */
-    return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize);
-}
-
-ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
-        const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize)
-{
-    ZSTD_compressionParameters cParams = ZSTD_getCParams(CCtxParams->compressionLevel, srcSizeHint, dictSize);
-    if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
-    if (CCtxParams->cParams.windowLog) cParams.windowLog = CCtxParams->cParams.windowLog;
-    if (CCtxParams->cParams.hashLog) cParams.hashLog = CCtxParams->cParams.hashLog;
-    if (CCtxParams->cParams.chainLog) cParams.chainLog = CCtxParams->cParams.chainLog;
-    if (CCtxParams->cParams.searchLog) cParams.searchLog = CCtxParams->cParams.searchLog;
-    if (CCtxParams->cParams.minMatch) cParams.minMatch = CCtxParams->cParams.minMatch;
-    if (CCtxParams->cParams.targetLength) cParams.targetLength = CCtxParams->cParams.targetLength;
-    if (CCtxParams->cParams.strategy) cParams.strategy = CCtxParams->cParams.strategy;
-    assert(!ZSTD_checkCParams(cParams));
-    return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize);
-}
-
-static size_t
-ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
-                       const U32 forCCtx)
-{
-    size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
-    size_t const hSize = ((size_t)1) << cParams->hashLog;
-    U32    const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
-    size_t const h3Size = ((size_t)1) << hashLog3;
-    size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
-    size_t const optPotentialSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits)) * sizeof(U32)
-                          + (ZSTD_OPT_NUM+1) * (sizeof(ZSTD_match_t)+sizeof(ZSTD_optimal_t));
-    size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))
-                                ? optPotentialSpace
-                                : 0;
-    DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u",
-                (U32)chainSize, (U32)hSize, (U32)h3Size);
-    return tableSpace + optSpace;
-}
-
-size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
-{
-    RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
-    {   ZSTD_compressionParameters const cParams =
-                ZSTD_getCParamsFromCCtxParams(params, 0, 0);
-        size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
-        U32    const divider = (cParams.minMatch==3) ? 3 : 4;
-        size_t const maxNbSeq = blockSize / divider;
-        size_t const tokenSpace = WILDCOPY_OVERLENGTH + blockSize + 11*maxNbSeq;
-        size_t const entropySpace = HUF_WORKSPACE_SIZE;
-        size_t const blockStateSpace = 2 * sizeof(ZSTD_compressedBlockState_t);
-        size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 1);
-
-        size_t const ldmSpace = ZSTD_ldm_getTableSize(params->ldmParams);
-        size_t const ldmSeqSpace = ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize) * sizeof(rawSeq);
-
-        size_t const neededSpace = entropySpace + blockStateSpace + tokenSpace +
-                                   matchStateSize + ldmSpace + ldmSeqSpace;
-
-        DEBUGLOG(5, "sizeof(ZSTD_CCtx) : %u", (U32)sizeof(ZSTD_CCtx));
-        DEBUGLOG(5, "estimate workSpace : %u", (U32)neededSpace);
-        return sizeof(ZSTD_CCtx) + neededSpace;
-    }
-}
-
-size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
-{
-    ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
-    return ZSTD_estimateCCtxSize_usingCCtxParams(&params);
-}
-
-static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel)
-{
-    ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0);
-    return ZSTD_estimateCCtxSize_usingCParams(cParams);
-}
-
-size_t ZSTD_estimateCCtxSize(int compressionLevel)
-{
-    int level;
-    size_t memBudget = 0;
-    for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
-        size_t const newMB = ZSTD_estimateCCtxSize_internal(level);
-        if (newMB > memBudget) memBudget = newMB;
-    }
-    return memBudget;
-}
-
-size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
-{
-    RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
-    {   ZSTD_compressionParameters const cParams =
-                ZSTD_getCParamsFromCCtxParams(params, 0, 0);
-        size_t const CCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(params);
-        size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
-        size_t const inBuffSize = ((size_t)1 << cParams.windowLog) + blockSize;
-        size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;
-        size_t const streamingSize = inBuffSize + outBuffSize;
-
-        return CCtxSize + streamingSize;
-    }
-}
-
-size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
-{
-    ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
-    return ZSTD_estimateCStreamSize_usingCCtxParams(&params);
-}
-
-static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel)
-{
-    ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0);
-    return ZSTD_estimateCStreamSize_usingCParams(cParams);
-}
-
-size_t ZSTD_estimateCStreamSize(int compressionLevel)
-{
-    int level;
-    size_t memBudget = 0;
-    for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
-        size_t const newMB = ZSTD_estimateCStreamSize_internal(level);
-        if (newMB > memBudget) memBudget = newMB;
-    }
-    return memBudget;
-}
-
-/* ZSTD_getFrameProgression():
- * tells how much data has been consumed (input) and produced (output) for current frame.
- * able to count progression inside worker threads (non-blocking mode).
- */
-ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx)
-{
-#ifdef ZSTD_MULTITHREAD
-    if (cctx->appliedParams.nbWorkers > 0) {
-        return ZSTDMT_getFrameProgression(cctx->mtctx);
-    }
-#endif
-    {   ZSTD_frameProgression fp;
-        size_t const buffered = (cctx->inBuff == NULL) ? 0 :
-                                cctx->inBuffPos - cctx->inToCompress;
-        if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress);
-        assert(buffered <= ZSTD_BLOCKSIZE_MAX);
-        fp.ingested = cctx->consumedSrcSize + buffered;
-        fp.consumed = cctx->consumedSrcSize;
-        fp.produced = cctx->producedCSize;
-        fp.flushed  = cctx->producedCSize;   /* simplified; some data might still be left within streaming output buffer */
-        fp.currentJobID = 0;
-        fp.nbActiveWorkers = 0;
-        return fp;
-}   }
-
-/*! ZSTD_toFlushNow()
- *  Only useful for multithreading scenarios currently (nbWorkers >= 1).
- */
-size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx)
-{
-#ifdef ZSTD_MULTITHREAD
-    if (cctx->appliedParams.nbWorkers > 0) {
-        return ZSTDMT_toFlushNow(cctx->mtctx);
-    }
-#endif
-    (void)cctx;
-    return 0;   /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */
-}
-
-
-
-static U32 ZSTD_equivalentCParams(ZSTD_compressionParameters cParams1,
-                                  ZSTD_compressionParameters cParams2)
-{
-    return (cParams1.hashLog  == cParams2.hashLog)
-         & (cParams1.chainLog == cParams2.chainLog)
-         & (cParams1.strategy == cParams2.strategy)   /* opt parser space */
-         & ((cParams1.minMatch==3) == (cParams2.minMatch==3));  /* hashlog3 space */
-}
-
-static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1,
-                                    ZSTD_compressionParameters cParams2)
-{
-    (void)cParams1;
-    (void)cParams2;
-    assert(cParams1.windowLog    == cParams2.windowLog);
-    assert(cParams1.chainLog     == cParams2.chainLog);
-    assert(cParams1.hashLog      == cParams2.hashLog);
-    assert(cParams1.searchLog    == cParams2.searchLog);
-    assert(cParams1.minMatch     == cParams2.minMatch);
-    assert(cParams1.targetLength == cParams2.targetLength);
-    assert(cParams1.strategy     == cParams2.strategy);
-}
-
-/** The parameters are equivalent if ldm is not enabled in both sets or
- *  all the parameters are equivalent. */
-static U32 ZSTD_equivalentLdmParams(ldmParams_t ldmParams1,
-                                    ldmParams_t ldmParams2)
-{
-    return (!ldmParams1.enableLdm && !ldmParams2.enableLdm) ||
-           (ldmParams1.enableLdm == ldmParams2.enableLdm &&
-            ldmParams1.hashLog == ldmParams2.hashLog &&
-            ldmParams1.bucketSizeLog == ldmParams2.bucketSizeLog &&
-            ldmParams1.minMatchLength == ldmParams2.minMatchLength &&
-            ldmParams1.hashRateLog == ldmParams2.hashRateLog);
-}
-
-typedef enum { ZSTDb_not_buffered, ZSTDb_buffered } ZSTD_buffered_policy_e;
-
-/* ZSTD_sufficientBuff() :
- * check internal buffers exist for streaming if buffPol == ZSTDb_buffered .
- * Note : they are assumed to be correctly sized if ZSTD_equivalentCParams()==1 */
-static U32 ZSTD_sufficientBuff(size_t bufferSize1, size_t maxNbSeq1,
-                            size_t maxNbLit1,
-                            ZSTD_buffered_policy_e buffPol2,
-                            ZSTD_compressionParameters cParams2,
-                            U64 pledgedSrcSize)
-{
-    size_t const windowSize2 = MAX(1, (size_t)MIN(((U64)1 << cParams2.windowLog), pledgedSrcSize));
-    size_t const blockSize2 = MIN(ZSTD_BLOCKSIZE_MAX, windowSize2);
-    size_t const maxNbSeq2 = blockSize2 / ((cParams2.minMatch == 3) ? 3 : 4);
-    size_t const maxNbLit2 = blockSize2;
-    size_t const neededBufferSize2 = (buffPol2==ZSTDb_buffered) ? windowSize2 + blockSize2 : 0;
-    DEBUGLOG(4, "ZSTD_sufficientBuff: is neededBufferSize2=%u <= bufferSize1=%u",
-                (U32)neededBufferSize2, (U32)bufferSize1);
-    DEBUGLOG(4, "ZSTD_sufficientBuff: is maxNbSeq2=%u <= maxNbSeq1=%u",
-                (U32)maxNbSeq2, (U32)maxNbSeq1);
-    DEBUGLOG(4, "ZSTD_sufficientBuff: is maxNbLit2=%u <= maxNbLit1=%u",
-                (U32)maxNbLit2, (U32)maxNbLit1);
-    return (maxNbLit2 <= maxNbLit1)
-         & (maxNbSeq2 <= maxNbSeq1)
-         & (neededBufferSize2 <= bufferSize1);
-}
-
-/** Equivalence for resetCCtx purposes */
-static U32 ZSTD_equivalentParams(ZSTD_CCtx_params params1,
-                                 ZSTD_CCtx_params params2,
-                                 size_t buffSize1,
-                                 size_t maxNbSeq1, size_t maxNbLit1,
-                                 ZSTD_buffered_policy_e buffPol2,
-                                 U64 pledgedSrcSize)
-{
-    DEBUGLOG(4, "ZSTD_equivalentParams: pledgedSrcSize=%u", (U32)pledgedSrcSize);
-    if (!ZSTD_equivalentCParams(params1.cParams, params2.cParams)) {
-      DEBUGLOG(4, "ZSTD_equivalentCParams() == 0");
-      return 0;
-    }
-    if (!ZSTD_equivalentLdmParams(params1.ldmParams, params2.ldmParams)) {
-      DEBUGLOG(4, "ZSTD_equivalentLdmParams() == 0");
-      return 0;
-    }
-    if (!ZSTD_sufficientBuff(buffSize1, maxNbSeq1, maxNbLit1, buffPol2,
-                             params2.cParams, pledgedSrcSize)) {
-      DEBUGLOG(4, "ZSTD_sufficientBuff() == 0");
-      return 0;
-    }
-    return 1;
-}
-
-static void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
-{
-    int i;
-    for (i = 0; i < ZSTD_REP_NUM; ++i)
-        bs->rep[i] = repStartValue[i];
-    bs->entropy.huf.repeatMode = HUF_repeat_none;
-    bs->entropy.fse.offcode_repeatMode = FSE_repeat_none;
-    bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none;
-    bs->entropy.fse.litlength_repeatMode = FSE_repeat_none;
-}
-
-/*! ZSTD_invalidateMatchState()
- *  Invalidate all the matches in the match finder tables.
- *  Requires nextSrc and base to be set (can be NULL).
- */
-static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
-{
-    ZSTD_window_clear(&ms->window);
-
-    ms->nextToUpdate = ms->window.dictLimit;
-    ms->loadedDictEnd = 0;
-    ms->opt.litLengthSum = 0;  /* force reset of btopt stats */
-    ms->dictMatchState = NULL;
-}
-
-/*! ZSTD_continueCCtx() :
- *  reuse CCtx without reset (note : requires no dictionary) */
-static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_CCtx_params params, U64 pledgedSrcSize)
-{
-    size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
-    size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
-    DEBUGLOG(4, "ZSTD_continueCCtx: re-use context in place");
-
-    cctx->blockSize = blockSize;   /* previous block size could be different even for same windowLog, due to pledgedSrcSize */
-    cctx->appliedParams = params;
-    cctx->blockState.matchState.cParams = params.cParams;
-    cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
-    cctx->consumedSrcSize = 0;
-    cctx->producedCSize = 0;
-    if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
-        cctx->appliedParams.fParams.contentSizeFlag = 0;
-    DEBUGLOG(4, "pledged content size : %u ; flag : %u",
-        (U32)pledgedSrcSize, cctx->appliedParams.fParams.contentSizeFlag);
-    cctx->stage = ZSTDcs_init;
-    cctx->dictID = 0;
-    if (params.ldmParams.enableLdm)
-        ZSTD_window_clear(&cctx->ldmState.window);
-    ZSTD_referenceExternalSequences(cctx, NULL, 0);
-    ZSTD_invalidateMatchState(&cctx->blockState.matchState);
-    ZSTD_reset_compressedBlockState(cctx->blockState.prevCBlock);
-    XXH64_reset(&cctx->xxhState, 0);
-    return 0;
-}
-
-typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset } ZSTD_compResetPolicy_e;
-
-typedef enum { ZSTD_resetTarget_CDict, ZSTD_resetTarget_CCtx } ZSTD_resetTarget_e;
-
-static void*
-ZSTD_reset_matchState(ZSTD_matchState_t* ms,
-                      void* ptr,
-                const ZSTD_compressionParameters* cParams,
-                      ZSTD_compResetPolicy_e const crp, ZSTD_resetTarget_e const forWho)
-{
-    size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
-    size_t const hSize = ((size_t)1) << cParams->hashLog;
-    U32    const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
-    size_t const h3Size = ((size_t)1) << hashLog3;
-    size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
-
-    assert(((size_t)ptr & 3) == 0);
-
-    ms->hashLog3 = hashLog3;
-    memset(&ms->window, 0, sizeof(ms->window));
-    ms->window.dictLimit = 1;    /* start from 1, so that 1st position is valid */
-    ms->window.lowLimit = 1;     /* it ensures first and later CCtx usages compress the same */
-    ms->window.nextSrc = ms->window.base + 1;   /* see issue #1241 */
-    ZSTD_invalidateMatchState(ms);
-
-    /* opt parser space */
-    if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) {
-        DEBUGLOG(4, "reserving optimal parser space");
-        ms->opt.litFreq = (unsigned*)ptr;
-        ms->opt.litLengthFreq = ms->opt.litFreq + (1<<Litbits);
-        ms->opt.matchLengthFreq = ms->opt.litLengthFreq + (MaxLL+1);
-        ms->opt.offCodeFreq = ms->opt.matchLengthFreq + (MaxML+1);
-        ptr = ms->opt.offCodeFreq + (MaxOff+1);
-        ms->opt.matchTable = (ZSTD_match_t*)ptr;
-        ptr = ms->opt.matchTable + ZSTD_OPT_NUM+1;
-        ms->opt.priceTable = (ZSTD_optimal_t*)ptr;
-        ptr = ms->opt.priceTable + ZSTD_OPT_NUM+1;
-    }
-
-    /* table Space */
-    DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_noMemset);
-    assert(((size_t)ptr & 3) == 0);  /* ensure ptr is properly aligned */
-    if (crp!=ZSTDcrp_noMemset) memset(ptr, 0, tableSpace);   /* reset tables only */
-    ms->hashTable = (U32*)(ptr);
-    ms->chainTable = ms->hashTable + hSize;
-    ms->hashTable3 = ms->chainTable + chainSize;
-    ptr = ms->hashTable3 + h3Size;
-
-    ms->cParams = *cParams;
-
-    assert(((size_t)ptr & 3) == 0);
-    return ptr;
-}
-
-/* ZSTD_indexTooCloseToMax() :
- * minor optimization : prefer memset() rather than reduceIndex()
- * which is measurably slow in some circumstances (reported for Visual Studio).
- * Works when re-using a context for a lot of smallish inputs :
- * if all inputs are smaller than ZSTD_INDEXOVERFLOW_MARGIN,
- * memset() will be triggered before reduceIndex().
- */
-#define ZSTD_INDEXOVERFLOW_MARGIN (16 MB)
-static int ZSTD_indexTooCloseToMax(ZSTD_window_t w)
-{
-    return (size_t)(w.nextSrc - w.base) > (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN);
-}
-
-#define ZSTD_WORKSPACETOOLARGE_FACTOR 3 /* define "workspace is too large" as this number of times larger than needed */
-#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128  /* when workspace is continuously too large
-                                         * during at least this number of times,
-                                         * context's memory usage is considered wasteful,
-                                         * because it's sized to handle a worst case scenario which rarely happens.
-                                         * In which case, resize it down to free some memory */
-
-/*! ZSTD_resetCCtx_internal() :
-    note : `params` are assumed fully validated at this stage */
-static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
-                                      ZSTD_CCtx_params params,
-                                      U64 const pledgedSrcSize,
-                                      ZSTD_compResetPolicy_e const crp,
-                                      ZSTD_buffered_policy_e const zbuff)
-{
-    DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u",
-                (U32)pledgedSrcSize, params.cParams.windowLog);
-    assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
-
-    if (crp == ZSTDcrp_continue) {
-        if (ZSTD_equivalentParams(zc->appliedParams, params,
-                                  zc->inBuffSize,
-                                  zc->seqStore.maxNbSeq, zc->seqStore.maxNbLit,
-                                  zbuff, pledgedSrcSize) ) {
-            DEBUGLOG(4, "ZSTD_equivalentParams()==1 -> consider continue mode");
-            zc->workSpaceOversizedDuration += (zc->workSpaceOversizedDuration > 0);   /* if it was too large, it still is */
-            if (zc->workSpaceOversizedDuration <= ZSTD_WORKSPACETOOLARGE_MAXDURATION) {
-                DEBUGLOG(4, "continue mode confirmed (wLog1=%u, blockSize1=%zu)",
-                            zc->appliedParams.cParams.windowLog, zc->blockSize);
-                if (ZSTD_indexTooCloseToMax(zc->blockState.matchState.window)) {
-                    /* prefer a reset, faster than a rescale */
-                    ZSTD_reset_matchState(&zc->blockState.matchState,
-                                           zc->entropyWorkspace + HUF_WORKSPACE_SIZE_U32,
-                                          &params.cParams,
-                                           crp, ZSTD_resetTarget_CCtx);
-                }
-                return ZSTD_continueCCtx(zc, params, pledgedSrcSize);
-    }   }   }
-    DEBUGLOG(4, "ZSTD_equivalentParams()==0 -> reset CCtx");
-
-    if (params.ldmParams.enableLdm) {
-        /* Adjust long distance matching parameters */
-        ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
-        assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
-        assert(params.ldmParams.hashRateLog < 32);
-        zc->ldmState.hashPower = ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength);
-    }
-
-    {   size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
-        size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
-        U32    const divider = (params.cParams.minMatch==3) ? 3 : 4;
-        size_t const maxNbSeq = blockSize / divider;
-        size_t const tokenSpace = WILDCOPY_OVERLENGTH + blockSize + 11*maxNbSeq;
-        size_t const buffOutSize = (zbuff==ZSTDb_buffered) ? ZSTD_compressBound(blockSize)+1 : 0;
-        size_t const buffInSize = (zbuff==ZSTDb_buffered) ? windowSize + blockSize : 0;
-        size_t const matchStateSize = ZSTD_sizeof_matchState(&params.cParams, /* forCCtx */ 1);
-        size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params.ldmParams, blockSize);
-        void* ptr;   /* used to partition workSpace */
-
-        /* Check if workSpace is large enough, alloc a new one if needed */
-        {   size_t const entropySpace = HUF_WORKSPACE_SIZE;
-            size_t const blockStateSpace = 2 * sizeof(ZSTD_compressedBlockState_t);
-            size_t const bufferSpace = buffInSize + buffOutSize;
-            size_t const ldmSpace = ZSTD_ldm_getTableSize(params.ldmParams);
-            size_t const ldmSeqSpace = maxNbLdmSeq * sizeof(rawSeq);
-
-            size_t const neededSpace = entropySpace + blockStateSpace + ldmSpace +
-                                       ldmSeqSpace + matchStateSize + tokenSpace +
-                                       bufferSpace;
-
-            int const workSpaceTooSmall = zc->workSpaceSize < neededSpace;
-            int const workSpaceTooLarge = zc->workSpaceSize > ZSTD_WORKSPACETOOLARGE_FACTOR * neededSpace;
-            int const workSpaceWasteful = workSpaceTooLarge && (zc->workSpaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION);
-            zc->workSpaceOversizedDuration = workSpaceTooLarge ? zc->workSpaceOversizedDuration+1 : 0;
-
-            DEBUGLOG(4, "Need %zuKB workspace, including %zuKB for match state, and %zuKB for buffers",
-                        neededSpace>>10, matchStateSize>>10, bufferSpace>>10);
-            DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
-
-            if (workSpaceTooSmall || workSpaceWasteful) {
-                DEBUGLOG(4, "Resize workSpaceSize from %zuKB to %zuKB",
-                            zc->workSpaceSize >> 10,
-                            neededSpace >> 10);
-
-                RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize");
-
-                zc->workSpaceSize = 0;
-                ZSTD_free(zc->workSpace, zc->customMem);
-                zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
-                RETURN_ERROR_IF(zc->workSpace == NULL, memory_allocation);
-                zc->workSpaceSize = neededSpace;
-                zc->workSpaceOversizedDuration = 0;
-
-                /* Statically sized space.
-                 * entropyWorkspace never moves,
-                 * though prev/next block swap places */
-                assert(((size_t)zc->workSpace & 3) == 0);   /* ensure correct alignment */
-                assert(zc->workSpaceSize >= 2 * sizeof(ZSTD_compressedBlockState_t));
-                zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)zc->workSpace;
-                zc->blockState.nextCBlock = zc->blockState.prevCBlock + 1;
-                ptr = zc->blockState.nextCBlock + 1;
-                zc->entropyWorkspace = (U32*)ptr;
-        }   }
-
-        /* init params */
-        zc->appliedParams = params;
-        zc->blockState.matchState.cParams = params.cParams;
-        zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
-        zc->consumedSrcSize = 0;
-        zc->producedCSize = 0;
-        if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
-            zc->appliedParams.fParams.contentSizeFlag = 0;
-        DEBUGLOG(4, "pledged content size : %u ; flag : %u",
-            (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
-        zc->blockSize = blockSize;
-
-        XXH64_reset(&zc->xxhState, 0);
-        zc->stage = ZSTDcs_init;
-        zc->dictID = 0;
-
-        ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
-
-        ptr = ZSTD_reset_matchState(&zc->blockState.matchState,
-                                     zc->entropyWorkspace + HUF_WORKSPACE_SIZE_U32,
-                                    &params.cParams,
-                                     crp, ZSTD_resetTarget_CCtx);
-
-        /* ldm hash table */
-        /* initialize bucketOffsets table later for pointer alignment */
-        if (params.ldmParams.enableLdm) {
-            size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog;
-            memset(ptr, 0, ldmHSize * sizeof(ldmEntry_t));
-            assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */
-            zc->ldmState.hashTable = (ldmEntry_t*)ptr;
-            ptr = zc->ldmState.hashTable + ldmHSize;
-            zc->ldmSequences = (rawSeq*)ptr;
-            ptr = zc->ldmSequences + maxNbLdmSeq;
-            zc->maxNbLdmSequences = maxNbLdmSeq;
-
-            memset(&zc->ldmState.window, 0, sizeof(zc->ldmState.window));
-        }
-        assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */
-
-        /* sequences storage */
-        zc->seqStore.maxNbSeq = maxNbSeq;
-        zc->seqStore.sequencesStart = (seqDef*)ptr;
-        ptr = zc->seqStore.sequencesStart + maxNbSeq;
-        zc->seqStore.llCode = (BYTE*) ptr;
-        zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq;
-        zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq;
-        zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
-        /* ZSTD_wildcopy() is used to copy into the literals buffer,
-         * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
-         */
-        zc->seqStore.maxNbLit = blockSize;
-        ptr = zc->seqStore.litStart + blockSize + WILDCOPY_OVERLENGTH;
-
-        /* ldm bucketOffsets table */
-        if (params.ldmParams.enableLdm) {
-            size_t const ldmBucketSize =
-                  ((size_t)1) << (params.ldmParams.hashLog -
-                                  params.ldmParams.bucketSizeLog);
-            memset(ptr, 0, ldmBucketSize);
-            zc->ldmState.bucketOffsets = (BYTE*)ptr;
-            ptr = zc->ldmState.bucketOffsets + ldmBucketSize;
-            ZSTD_window_clear(&zc->ldmState.window);
-        }
-        ZSTD_referenceExternalSequences(zc, NULL, 0);
-
-        /* buffers */
-        zc->inBuffSize = buffInSize;
-        zc->inBuff = (char*)ptr;
-        zc->outBuffSize = buffOutSize;
-        zc->outBuff = zc->inBuff + buffInSize;
-
-        return 0;
-    }
-}
-
-/* ZSTD_invalidateRepCodes() :
- * ensures next compression will not use repcodes from previous block.
- * Note : only works with regular variant;
- *        do not use with extDict variant ! */
-void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
-    int i;
-    for (i=0; i<ZSTD_REP_NUM; i++) cctx->blockState.prevCBlock->rep[i] = 0;
-    assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
-}
-
-/* These are the approximate sizes for each strategy past which copying the
- * dictionary tables into the working context is faster than using them
- * in-place.
- */
-static const size_t attachDictSizeCutoffs[ZSTD_STRATEGY_MAX+1] = {
-    8 KB,  /* unused */
-    8 KB,  /* ZSTD_fast */
-    16 KB, /* ZSTD_dfast */
-    32 KB, /* ZSTD_greedy */
-    32 KB, /* ZSTD_lazy */
-    32 KB, /* ZSTD_lazy2 */
-    32 KB, /* ZSTD_btlazy2 */
-    32 KB, /* ZSTD_btopt */
-    8 KB,  /* ZSTD_btultra */
-    8 KB   /* ZSTD_btultra2 */
-};
-
-static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict,
-                                 ZSTD_CCtx_params params,
-                                 U64 pledgedSrcSize)
-{
-    size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy];
-    return ( pledgedSrcSize <= cutoff
-          || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
-          || params.attachDictPref == ZSTD_dictForceAttach )
-        && params.attachDictPref != ZSTD_dictForceCopy
-        && !params.forceWindow; /* dictMatchState isn't correctly
-                                 * handled in _enforceMaxDist */
-}
-
-static size_t
-ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,
-                        const ZSTD_CDict* cdict,
-                        ZSTD_CCtx_params params,
-                        U64 pledgedSrcSize,
-                        ZSTD_buffered_policy_e zbuff)
-{
-    {   const ZSTD_compressionParameters* const cdict_cParams = &cdict->matchState.cParams;
-        unsigned const windowLog = params.cParams.windowLog;
-        assert(windowLog != 0);
-        /* Resize working context table params for input only, since the dict
-         * has its own tables. */
-        params.cParams = ZSTD_adjustCParams_internal(*cdict_cParams, pledgedSrcSize, 0);
-        params.cParams.windowLog = windowLog;
-        ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
-                                ZSTDcrp_continue, zbuff);
-        assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
-    }
-
-    {   const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc
-                                  - cdict->matchState.window.base);
-        const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit;
-        if (cdictLen == 0) {
-            /* don't even attach dictionaries with no contents */
-            DEBUGLOG(4, "skipping attaching empty dictionary");
-        } else {
-            DEBUGLOG(4, "attaching dictionary into context");
-            cctx->blockState.matchState.dictMatchState = &cdict->matchState;
-
-            /* prep working match state so dict matches never have negative indices
-             * when they are translated to the working context's index space. */
-            if (cctx->blockState.matchState.window.dictLimit < cdictEnd) {
-                cctx->blockState.matchState.window.nextSrc =
-                    cctx->blockState.matchState.window.base + cdictEnd;
-                ZSTD_window_clear(&cctx->blockState.matchState.window);
-            }
-            /* loadedDictEnd is expressed within the referential of the active context */
-            cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit;
-    }   }
-
-    cctx->dictID = cdict->dictID;
-
-    /* copy block state */
-    memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
-
-    return 0;
-}
-
-static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
-                            const ZSTD_CDict* cdict,
-                            ZSTD_CCtx_params params,
-                            U64 pledgedSrcSize,
-                            ZSTD_buffered_policy_e zbuff)
-{
-    const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
-
-    DEBUGLOG(4, "copying dictionary into context");
-
-    {   unsigned const windowLog = params.cParams.windowLog;
-        assert(windowLog != 0);
-        /* Copy only compression parameters related to tables. */
-        params.cParams = *cdict_cParams;
-        params.cParams.windowLog = windowLog;
-        ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
-                                ZSTDcrp_noMemset, zbuff);
-        assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
-        assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog);
-        assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog);
-    }
-
-    /* copy tables */
-    {   size_t const chainSize = (cdict_cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict_cParams->chainLog);
-        size_t const hSize =  (size_t)1 << cdict_cParams->hashLog;
-        size_t const tableSpace = (chainSize + hSize) * sizeof(U32);
-        assert((U32*)cctx->blockState.matchState.chainTable == (U32*)cctx->blockState.matchState.hashTable + hSize);  /* chainTable must follow hashTable */
-        assert((U32*)cctx->blockState.matchState.hashTable3 == (U32*)cctx->blockState.matchState.chainTable + chainSize);
-        assert((U32*)cdict->matchState.chainTable == (U32*)cdict->matchState.hashTable + hSize);  /* chainTable must follow hashTable */
-        assert((U32*)cdict->matchState.hashTable3 == (U32*)cdict->matchState.chainTable + chainSize);
-        memcpy(cctx->blockState.matchState.hashTable, cdict->matchState.hashTable, tableSpace);   /* presumes all tables follow each other */
-    }
-
-    /* Zero the hashTable3, since the cdict never fills it */
-    {   size_t const h3Size = (size_t)1 << cctx->blockState.matchState.hashLog3;
-        assert(cdict->matchState.hashLog3 == 0);
-        memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));
-    }
-
-    /* copy dictionary offsets */
-    {   ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
-        ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
-        dstMatchState->window       = srcMatchState->window;
-        dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
-        dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
-    }
-
-    cctx->dictID = cdict->dictID;
-
-    /* copy block state */
-    memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
-
-    return 0;
-}
-
-/* We have a choice between copying the dictionary context into the working
- * context, or referencing the dictionary context from the working context
- * in-place. We decide here which strategy to use. */
-static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
-                            const ZSTD_CDict* cdict,
-                            ZSTD_CCtx_params params,
-                            U64 pledgedSrcSize,
-                            ZSTD_buffered_policy_e zbuff)
-{
-
-    DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)",
-                (unsigned)pledgedSrcSize);
-
-    if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) {
-        return ZSTD_resetCCtx_byAttachingCDict(
-            cctx, cdict, params, pledgedSrcSize, zbuff);
-    } else {
-        return ZSTD_resetCCtx_byCopyingCDict(
-            cctx, cdict, params, pledgedSrcSize, zbuff);
-    }
-}
-
-/*! ZSTD_copyCCtx_internal() :
- *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
- *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
- *  The "context", in this case, refers to the hash and chain tables,
- *  entropy tables, and dictionary references.
- * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx.
- * @return : 0, or an error code */
-static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
-                            const ZSTD_CCtx* srcCCtx,
-                            ZSTD_frameParameters fParams,
-                            U64 pledgedSrcSize,
-                            ZSTD_buffered_policy_e zbuff)
-{
-    DEBUGLOG(5, "ZSTD_copyCCtx_internal");
-    RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong);
-
-    memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
-    {   ZSTD_CCtx_params params = dstCCtx->requestedParams;
-        /* Copy only compression parameters related to tables. */
-        params.cParams = srcCCtx->appliedParams.cParams;
-        params.fParams = fParams;
-        ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize,
-                                ZSTDcrp_noMemset, zbuff);
-        assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog);
-        assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy);
-        assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog);
-        assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog);
-        assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3);
-    }
-
-    /* copy tables */
-    {   size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog);
-        size_t const hSize =  (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
-        size_t const h3Size = (size_t)1 << srcCCtx->blockState.matchState.hashLog3;
-        size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
-        assert((U32*)dstCCtx->blockState.matchState.chainTable == (U32*)dstCCtx->blockState.matchState.hashTable + hSize);  /* chainTable must follow hashTable */
-        assert((U32*)dstCCtx->blockState.matchState.hashTable3 == (U32*)dstCCtx->blockState.matchState.chainTable + chainSize);
-        memcpy(dstCCtx->blockState.matchState.hashTable, srcCCtx->blockState.matchState.hashTable, tableSpace);   /* presumes all tables follow each other */
-    }
-
-    /* copy dictionary offsets */
-    {
-        const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
-        ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
-        dstMatchState->window       = srcMatchState->window;
-        dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
-        dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
-    }
-    dstCCtx->dictID = srcCCtx->dictID;
-
-    /* copy block state */
-    memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock));
-
-    return 0;
-}
-
-/*! ZSTD_copyCCtx() :
- *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
- *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
- *  pledgedSrcSize==0 means "unknown".
-*   @return : 0, or an error code */
-size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
-{
-    ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
-    ZSTD_buffered_policy_e const zbuff = (ZSTD_buffered_policy_e)(srcCCtx->inBuffSize>0);
-    ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1);
-    if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
-    fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN);
-
-    return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx,
-                                fParams, pledgedSrcSize,
-                                zbuff);
-}
-
-
-#define ZSTD_ROWSIZE 16
-/*! ZSTD_reduceTable() :
- *  reduce table indexes by `reducerValue`, or squash to zero.
- *  PreserveMark preserves "unsorted mark" for btlazy2 strategy.
- *  It must be set to a clear 0/1 value, to remove branch during inlining.
- *  Presume table size is a multiple of ZSTD_ROWSIZE
- *  to help auto-vectorization */
-FORCE_INLINE_TEMPLATE void
-ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark)
-{
-    int const nbRows = (int)size / ZSTD_ROWSIZE;
-    int cellNb = 0;
-    int rowNb;
-    assert((size & (ZSTD_ROWSIZE-1)) == 0);  /* multiple of ZSTD_ROWSIZE */
-    assert(size < (1U<<31));   /* can be casted to int */
-    for (rowNb=0 ; rowNb < nbRows ; rowNb++) {
-        int column;
-        for (column=0; column<ZSTD_ROWSIZE; column++) {
-            if (preserveMark) {
-                U32 const adder = (table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) ? reducerValue : 0;
-                table[cellNb] += adder;
-            }
-            if (table[cellNb] < reducerValue) table[cellNb] = 0;
-            else table[cellNb] -= reducerValue;
-            cellNb++;
-    }   }
-}
-
-static void ZSTD_reduceTable(U32* const table, U32 const size, U32 const reducerValue)
-{
-    ZSTD_reduceTable_internal(table, size, reducerValue, 0);
-}
-
-static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const reducerValue)
-{
-    ZSTD_reduceTable_internal(table, size, reducerValue, 1);
-}
-
-/*! ZSTD_reduceIndex() :
-*   rescale all indexes to avoid future overflow (indexes are U32) */
-static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue)
-{
-    {   U32 const hSize = (U32)1 << params->cParams.hashLog;
-        ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
-    }
-
-    if (params->cParams.strategy != ZSTD_fast) {
-        U32 const chainSize = (U32)1 << params->cParams.chainLog;
-        if (params->cParams.strategy == ZSTD_btlazy2)
-            ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue);
-        else
-            ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue);
-    }
-
-    if (ms->hashLog3) {
-        U32 const h3Size = (U32)1 << ms->hashLog3;
-        ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue);
-    }
-}
-
-
-/*-*******************************************************
-*  Block entropic compression
-*********************************************************/
-
-/* See doc/zstd_compression_format.md for detailed format description */
-
-static size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
-{
-    U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3);
-    RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity,
-                    dstSize_tooSmall);
-    MEM_writeLE24(dst, cBlockHeader24);
-    memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
-    return ZSTD_blockHeaderSize + srcSize;
-}
-
-static size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
-{
-    BYTE* const ostart = (BYTE* const)dst;
-    U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
-
-    RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall);
-
-    switch(flSize)
-    {
-        case 1: /* 2 - 1 - 5 */
-            ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3));
-            break;
-        case 2: /* 2 - 2 - 12 */
-            MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4)));
-            break;
-        case 3: /* 2 - 2 - 20 */
-            MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4)));
-            break;
-        default:   /* not necessary : flSize is {1,2,3} */
-            assert(0);
-    }
-
-    memcpy(ostart + flSize, src, srcSize);
-    return srcSize + flSize;
-}
-
-static size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
-{
-    BYTE* const ostart = (BYTE* const)dst;
-    U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
-
-    (void)dstCapacity;  /* dstCapacity already guaranteed to be >=4, hence large enough */
-
-    switch(flSize)
-    {
-        case 1: /* 2 - 1 - 5 */
-            ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3));
-            break;
-        case 2: /* 2 - 2 - 12 */
-            MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4)));
-            break;
-        case 3: /* 2 - 2 - 20 */
-            MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4)));
-            break;
-        default:   /* not necessary : flSize is {1,2,3} */
-            assert(0);
-    }
-
-    ostart[flSize] = *(const BYTE*)src;
-    return flSize+1;
-}
-
-
-/* ZSTD_minGain() :
- * minimum compression required
- * to generate a compress block or a compressed literals section.
- * note : use same formula for both situations */
-static size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
-{
-    U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6;
-    ZSTD_STATIC_ASSERT(ZSTD_btultra == 8);
-    assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
-    return (srcSize >> minlog) + 2;
-}
-
-static size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
-                                     ZSTD_hufCTables_t* nextHuf,
-                                     ZSTD_strategy strategy, int disableLiteralCompression,
-                                     void* dst, size_t dstCapacity,
-                               const void* src, size_t srcSize,
-                                     void* workspace, size_t wkspSize,
-                               const int bmi2)
-{
-    size_t const minGain = ZSTD_minGain(srcSize, strategy);
-    size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
-    BYTE*  const ostart = (BYTE*)dst;
-    U32 singleStream = srcSize < 256;
-    symbolEncodingType_e hType = set_compressed;
-    size_t cLitSize;
-
-    DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i)",
-                disableLiteralCompression);
-
-    /* Prepare nextEntropy assuming reusing the existing table */
-    memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
-
-    if (disableLiteralCompression)
-        return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
-
-    /* small ? don't even attempt compression (speed opt) */
-#   define COMPRESS_LITERALS_SIZE_MIN 63
-    {   size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
-        if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
-    }
-
-    RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression");
-    {   HUF_repeat repeat = prevHuf->repeatMode;
-        int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
-        if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
-        cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
-                                      workspace, wkspSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2)
-                                : HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
-                                      workspace, wkspSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
-        if (repeat != HUF_repeat_none) {
-            /* reused the existing table */
-            hType = set_repeat;
-        }
-    }
-
-    if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) {
-        memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
-        return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
-    }
-    if (cLitSize==1) {
-        memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
-        return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
-    }
-
-    if (hType == set_compressed) {
-        /* using a newly constructed table */
-        nextHuf->repeatMode = HUF_repeat_check;
-    }
-
-    /* Build header */
-    switch(lhSize)
-    {
-    case 3: /* 2 - 2 - 10 - 10 */
-        {   U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
-            MEM_writeLE24(ostart, lhc);
-            break;
-        }
-    case 4: /* 2 - 2 - 14 - 14 */
-        {   U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
-            MEM_writeLE32(ostart, lhc);
-            break;
-        }
-    case 5: /* 2 - 2 - 18 - 18 */
-        {   U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
-            MEM_writeLE32(ostart, lhc);
-            ostart[4] = (BYTE)(cLitSize >> 10);
-            break;
-        }
-    default:  /* not possible : lhSize is {3,4,5} */
-        assert(0);
-    }
-    return lhSize+cLitSize;
-}
-
-
-void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
-{
-    const seqDef* const sequences = seqStorePtr->sequencesStart;
-    BYTE* const llCodeTable = seqStorePtr->llCode;
-    BYTE* const ofCodeTable = seqStorePtr->ofCode;
-    BYTE* const mlCodeTable = seqStorePtr->mlCode;
-    U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
-    U32 u;
-    assert(nbSeq <= seqStorePtr->maxNbSeq);
-    for (u=0; u<nbSeq; u++) {
-        U32 const llv = sequences[u].litLength;
-        U32 const mlv = sequences[u].matchLength;
-        llCodeTable[u] = (BYTE)ZSTD_LLcode(llv);
-        ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
-        mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);
-    }
-    if (seqStorePtr->longLengthID==1)
-        llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
-    if (seqStorePtr->longLengthID==2)
-        mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
-}
-
-
-/**
- * -log2(x / 256) lookup table for x in [0, 256).
- * If x == 0: Return 0
- * Else: Return floor(-log2(x / 256) * 256)
- */
-static unsigned const kInverseProbabilityLog256[256] = {
-    0,    2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162,
-    1130, 1100, 1073, 1047, 1024, 1001, 980,  960,  941,  923,  906,  889,
-    874,  859,  844,  830,  817,  804,  791,  779,  768,  756,  745,  734,
-    724,  714,  704,  694,  685,  676,  667,  658,  650,  642,  633,  626,
-    618,  610,  603,  595,  588,  581,  574,  567,  561,  554,  548,  542,
-    535,  529,  523,  517,  512,  506,  500,  495,  489,  484,  478,  473,
-    468,  463,  458,  453,  448,  443,  438,  434,  429,  424,  420,  415,
-    411,  407,  402,  398,  394,  390,  386,  382,  377,  373,  370,  366,
-    362,  358,  354,  350,  347,  343,  339,  336,  332,  329,  325,  322,
-    318,  315,  311,  308,  305,  302,  298,  295,  292,  289,  286,  282,
-    279,  276,  273,  270,  267,  264,  261,  258,  256,  253,  250,  247,
-    244,  241,  239,  236,  233,  230,  228,  225,  222,  220,  217,  215,
-    212,  209,  207,  204,  202,  199,  197,  194,  192,  190,  187,  185,
-    182,  180,  178,  175,  173,  171,  168,  166,  164,  162,  159,  157,
-    155,  153,  151,  149,  146,  144,  142,  140,  138,  136,  134,  132,
-    130,  128,  126,  123,  121,  119,  117,  115,  114,  112,  110,  108,
-    106,  104,  102,  100,  98,   96,   94,   93,   91,   89,   87,   85,
-    83,   82,   80,   78,   76,   74,   73,   71,   69,   67,   66,   64,
-    62,   61,   59,   57,   55,   54,   52,   50,   49,   47,   46,   44,
-    42,   41,   39,   37,   36,   34,   33,   31,   30,   28,   26,   25,
-    23,   22,   20,   19,   17,   16,   14,   13,   11,   10,   8,    7,
-    5,    4,    2,    1,
-};
-
-
-/**
- * Returns the cost in bits of encoding the distribution described by count
- * using the entropy bound.
- */
-static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total)
-{
-    unsigned cost = 0;
-    unsigned s;
-    for (s = 0; s <= max; ++s) {
-        unsigned norm = (unsigned)((256 * count[s]) / total);
-        if (count[s] != 0 && norm == 0)
-            norm = 1;
-        assert(count[s] < total);
-        cost += count[s] * kInverseProbabilityLog256[norm];
-    }
-    return cost >> 8;
-}
-
-
-/**
- * Returns the cost in bits of encoding the distribution in count using the
- * table described by norm. The max symbol support by norm is assumed >= max.
- * norm must be valid for every symbol with non-zero probability in count.
- */
-static size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
-                                    unsigned const* count, unsigned const max)
-{
-    unsigned const shift = 8 - accuracyLog;
-    size_t cost = 0;
-    unsigned s;
-    assert(accuracyLog <= 8);
-    for (s = 0; s <= max; ++s) {
-        unsigned const normAcc = norm[s] != -1 ? norm[s] : 1;
-        unsigned const norm256 = normAcc << shift;
-        assert(norm256 > 0);
-        assert(norm256 < 256);
-        cost += count[s] * kInverseProbabilityLog256[norm256];
-    }
-    return cost >> 8;
-}
-
-
-static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) {
-  void const* ptr = ctable;
-  U16 const* u16ptr = (U16 const*)ptr;
-  U32 const maxSymbolValue = MEM_read16(u16ptr + 1);
-  return maxSymbolValue;
-}
-
-
-/**
- * Returns the cost in bits of encoding the distribution in count using ctable.
- * Returns an error if ctable cannot represent all the symbols in count.
- */
-static size_t ZSTD_fseBitCost(
-    FSE_CTable const* ctable,
-    unsigned const* count,
-    unsigned const max)
-{
-    unsigned const kAccuracyLog = 8;
-    size_t cost = 0;
-    unsigned s;
-    FSE_CState_t cstate;
-    FSE_initCState(&cstate, ctable);
-    RETURN_ERROR_IF(ZSTD_getFSEMaxSymbolValue(ctable) < max, GENERIC,
-                    "Repeat FSE_CTable has maxSymbolValue %u < %u",
-                    ZSTD_getFSEMaxSymbolValue(ctable), max);
-    for (s = 0; s <= max; ++s) {
-        unsigned const tableLog = cstate.stateLog;
-        unsigned const badCost = (tableLog + 1) << kAccuracyLog;
-        unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog);
-        if (count[s] == 0)
-            continue;
-        RETURN_ERROR_IF(bitCost >= badCost, GENERIC,
-                        "Repeat FSE_CTable has Prob[%u] == 0", s);
-        cost += count[s] * bitCost;
-    }
-    return cost >> kAccuracyLog;
-}
-
-/**
- * Returns the cost in bytes of encoding the normalized count header.
- * Returns an error if any of the helper functions return an error.
- */
-static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
-                              size_t const nbSeq, unsigned const FSELog)
-{
-    BYTE wksp[FSE_NCOUNTBOUND];
-    S16 norm[MaxSeq + 1];
-    const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
-    FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max));
-    return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
-}
-
-
-typedef enum {
-    ZSTD_defaultDisallowed = 0,
-    ZSTD_defaultAllowed = 1
-} ZSTD_defaultPolicy_e;
-
-MEM_STATIC symbolEncodingType_e
-ZSTD_selectEncodingType(
-        FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
-        size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
-        FSE_CTable const* prevCTable,
-        short const* defaultNorm, U32 defaultNormLog,
-        ZSTD_defaultPolicy_e const isDefaultAllowed,
-        ZSTD_strategy const strategy)
-{
-    ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
-    if (mostFrequent == nbSeq) {
-        *repeatMode = FSE_repeat_none;
-        if (isDefaultAllowed && nbSeq <= 2) {
-            /* Prefer set_basic over set_rle when there are 2 or less symbols,
-             * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
-             * If basic encoding isn't possible, always choose RLE.
-             */
-            DEBUGLOG(5, "Selected set_basic");
-            return set_basic;
-        }
-        DEBUGLOG(5, "Selected set_rle");
-        return set_rle;
-    }
-    if (strategy < ZSTD_lazy) {
-        if (isDefaultAllowed) {
-            size_t const staticFse_nbSeq_max = 1000;
-            size_t const mult = 10 - strategy;
-            size_t const baseLog = 3;
-            size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog;  /* 28-36 for offset, 56-72 for lengths */
-            assert(defaultNormLog >= 5 && defaultNormLog <= 6);  /* xx_DEFAULTNORMLOG */
-            assert(mult <= 9 && mult >= 7);
-            if ( (*repeatMode == FSE_repeat_valid)
-              && (nbSeq < staticFse_nbSeq_max) ) {
-                DEBUGLOG(5, "Selected set_repeat");
-                return set_repeat;
-            }
-            if ( (nbSeq < dynamicFse_nbSeq_min)
-              || (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) {
-                DEBUGLOG(5, "Selected set_basic");
-                /* The format allows default tables to be repeated, but it isn't useful.
-                 * When using simple heuristics to select encoding type, we don't want
-                 * to confuse these tables with dictionaries. When running more careful
-                 * analysis, we don't need to waste time checking both repeating tables
-                 * and default tables.
-                 */
-                *repeatMode = FSE_repeat_none;
-                return set_basic;
-            }
-        }
-    } else {
-        size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC);
-        size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC);
-        size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog);
-        size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq);
-
-        if (isDefaultAllowed) {
-            assert(!ZSTD_isError(basicCost));
-            assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost)));
-        }
-        assert(!ZSTD_isError(NCountCost));
-        assert(compressedCost < ERROR(maxCode));
-        DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u",
-                    (unsigned)basicCost, (unsigned)repeatCost, (unsigned)compressedCost);
-        if (basicCost <= repeatCost && basicCost <= compressedCost) {
-            DEBUGLOG(5, "Selected set_basic");
-            assert(isDefaultAllowed);
-            *repeatMode = FSE_repeat_none;
-            return set_basic;
-        }
-        if (repeatCost <= compressedCost) {
-            DEBUGLOG(5, "Selected set_repeat");
-            assert(!ZSTD_isError(repeatCost));
-            return set_repeat;
-        }
-        assert(compressedCost < basicCost && compressedCost < repeatCost);
-    }
-    DEBUGLOG(5, "Selected set_compressed");
-    *repeatMode = FSE_repeat_check;
-    return set_compressed;
-}
-
-MEM_STATIC size_t
-ZSTD_buildCTable(void* dst, size_t dstCapacity,
-                FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
-                unsigned* count, U32 max,
-                const BYTE* codeTable, size_t nbSeq,
-                const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
-                const FSE_CTable* prevCTable, size_t prevCTableSize,
-                void* workspace, size_t workspaceSize)
-{
-    BYTE* op = (BYTE*)dst;
-    const BYTE* const oend = op + dstCapacity;
-    DEBUGLOG(6, "ZSTD_buildCTable (dstCapacity=%u)", (unsigned)dstCapacity);
-
-    switch (type) {
-    case set_rle:
-        FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max));
-        RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall);
-        *op = codeTable[0];
-        return 1;
-    case set_repeat:
-        memcpy(nextCTable, prevCTable, prevCTableSize);
-        return 0;
-    case set_basic:
-        FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, workspace, workspaceSize));  /* note : could be pre-calculated */
-        return 0;
-    case set_compressed: {
-        S16 norm[MaxSeq + 1];
-        size_t nbSeq_1 = nbSeq;
-        const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
-        if (count[codeTable[nbSeq-1]] > 1) {
-            count[codeTable[nbSeq-1]]--;
-            nbSeq_1--;
-        }
-        assert(nbSeq_1 > 1);
-        FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max));
-        {   size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog);   /* overflow protected */
-            FORWARD_IF_ERROR(NCountSize);
-            FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, workspace, workspaceSize));
-            return NCountSize;
-        }
-    }
-    default: assert(0); RETURN_ERROR(GENERIC);
-    }
-}
-
-FORCE_INLINE_TEMPLATE size_t
-ZSTD_encodeSequences_body(
-            void* dst, size_t dstCapacity,
-            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
-            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
-            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
-            seqDef const* sequences, size_t nbSeq, int longOffsets)
-{
-    BIT_CStream_t blockStream;
-    FSE_CState_t  stateMatchLength;
-    FSE_CState_t  stateOffsetBits;
-    FSE_CState_t  stateLitLength;
-
-    RETURN_ERROR_IF(
-        ERR_isError(BIT_initCStream(&blockStream, dst, dstCapacity)),
-        dstSize_tooSmall, "not enough space remaining");
-    DEBUGLOG(6, "available space for bitstream : %i  (dstCapacity=%u)",
-                (int)(blockStream.endPtr - blockStream.startPtr),
-                (unsigned)dstCapacity);
-
-    /* first symbols */
-    FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
-    FSE_initCState2(&stateOffsetBits,  CTable_OffsetBits,  ofCodeTable[nbSeq-1]);
-    FSE_initCState2(&stateLitLength,   CTable_LitLength,   llCodeTable[nbSeq-1]);
-    BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
-    if (MEM_32bits()) BIT_flushBits(&blockStream);
-    BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]);
-    if (MEM_32bits()) BIT_flushBits(&blockStream);
-    if (longOffsets) {
-        U32 const ofBits = ofCodeTable[nbSeq-1];
-        int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
-        if (extraBits) {
-            BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits);
-            BIT_flushBits(&blockStream);
-        }
-        BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits,
-                    ofBits - extraBits);
-    } else {
-        BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);
-    }
-    BIT_flushBits(&blockStream);
-
-    {   size_t n;
-        for (n=nbSeq-2 ; n<nbSeq ; n--) {      /* intentional underflow */
-            BYTE const llCode = llCodeTable[n];
-            BYTE const ofCode = ofCodeTable[n];
-            BYTE const mlCode = mlCodeTable[n];
-            U32  const llBits = LL_bits[llCode];
-            U32  const ofBits = ofCode;
-            U32  const mlBits = ML_bits[mlCode];
-            DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
-                        (unsigned)sequences[n].litLength,
-                        (unsigned)sequences[n].matchLength + MINMATCH,
-                        (unsigned)sequences[n].offset);
-                                                                            /* 32b*/  /* 64b*/
-                                                                            /* (7)*/  /* (7)*/
-            FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode);       /* 15 */  /* 15 */
-            FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode);      /* 24 */  /* 24 */
-            if (MEM_32bits()) BIT_flushBits(&blockStream);                  /* (7)*/
-            FSE_encodeSymbol(&blockStream, &stateLitLength, llCode);        /* 16 */  /* 33 */
-            if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))
-                BIT_flushBits(&blockStream);                                /* (7)*/
-            BIT_addBits(&blockStream, sequences[n].litLength, llBits);
-            if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
-            BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
-            if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
-            if (longOffsets) {
-                int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
-                if (extraBits) {
-                    BIT_addBits(&blockStream, sequences[n].offset, extraBits);
-                    BIT_flushBits(&blockStream);                            /* (7)*/
-                }
-                BIT_addBits(&blockStream, sequences[n].offset >> extraBits,
-                            ofBits - extraBits);                            /* 31 */
-            } else {
-                BIT_addBits(&blockStream, sequences[n].offset, ofBits);     /* 31 */
-            }
-            BIT_flushBits(&blockStream);                                    /* (7)*/
-            DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr));
-    }   }
-
-    DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog);
-    FSE_flushCState(&blockStream, &stateMatchLength);
-    DEBUGLOG(6, "ZSTD_encodeSequences: flushing Off state with %u bits", stateOffsetBits.stateLog);
-    FSE_flushCState(&blockStream, &stateOffsetBits);
-    DEBUGLOG(6, "ZSTD_encodeSequences: flushing LL state with %u bits", stateLitLength.stateLog);
-    FSE_flushCState(&blockStream, &stateLitLength);
-
-    {   size_t const streamSize = BIT_closeCStream(&blockStream);
-        RETURN_ERROR_IF(streamSize==0, dstSize_tooSmall, "not enough space");
-        return streamSize;
-    }
-}
-
-static size_t
-ZSTD_encodeSequences_default(
-            void* dst, size_t dstCapacity,
-            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
-            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
-            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
-            seqDef const* sequences, size_t nbSeq, int longOffsets)
-{
-    return ZSTD_encodeSequences_body(dst, dstCapacity,
-                                    CTable_MatchLength, mlCodeTable,
-                                    CTable_OffsetBits, ofCodeTable,
-                                    CTable_LitLength, llCodeTable,
-                                    sequences, nbSeq, longOffsets);
-}
-
-
-#if DYNAMIC_BMI2
-
-static TARGET_ATTRIBUTE("bmi2") size_t
-ZSTD_encodeSequences_bmi2(
-            void* dst, size_t dstCapacity,
-            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
-            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
-            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
-            seqDef const* sequences, size_t nbSeq, int longOffsets)
-{
-    return ZSTD_encodeSequences_body(dst, dstCapacity,
-                                    CTable_MatchLength, mlCodeTable,
-                                    CTable_OffsetBits, ofCodeTable,
-                                    CTable_LitLength, llCodeTable,
-                                    sequences, nbSeq, longOffsets);
-}
-
-#endif
-
-static size_t ZSTD_encodeSequences(
-            void* dst, size_t dstCapacity,
-            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
-            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
-            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
-            seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
-{
-    DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity);
-#if DYNAMIC_BMI2
-    if (bmi2) {
-        return ZSTD_encodeSequences_bmi2(dst, dstCapacity,
-                                         CTable_MatchLength, mlCodeTable,
-                                         CTable_OffsetBits, ofCodeTable,
-                                         CTable_LitLength, llCodeTable,
-                                         sequences, nbSeq, longOffsets);
-    }
-#endif
-    (void)bmi2;
-    return ZSTD_encodeSequences_default(dst, dstCapacity,
-                                        CTable_MatchLength, mlCodeTable,
-                                        CTable_OffsetBits, ofCodeTable,
-                                        CTable_LitLength, llCodeTable,
-                                        sequences, nbSeq, longOffsets);
-}
-
-static int ZSTD_disableLiteralsCompression(const ZSTD_CCtx_params* cctxParams)
-{
-    switch (cctxParams->literalCompressionMode) {
-    case ZSTD_lcm_huffman:
-        return 0;
-    case ZSTD_lcm_uncompressed:
-        return 1;
-    default:
-        assert(0 /* impossible: pre-validated */);
-        /* fall-through */
-    case ZSTD_lcm_auto:
-        return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
-    }
-}
-
-/* ZSTD_compressSequences_internal():
- * actually compresses both literals and sequences */
-MEM_STATIC size_t
-ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
-                          const ZSTD_entropyCTables_t* prevEntropy,
-                                ZSTD_entropyCTables_t* nextEntropy,
-                          const ZSTD_CCtx_params* cctxParams,
-                                void* dst, size_t dstCapacity,
-                                void* workspace, size_t wkspSize,
-                          const int bmi2)
-{
-    const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
-    ZSTD_strategy const strategy = cctxParams->cParams.strategy;
-    unsigned count[MaxSeq+1];
-    FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
-    FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
-    FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
-    U32 LLtype, Offtype, MLtype;   /* compressed, raw or rle */
-    const seqDef* const sequences = seqStorePtr->sequencesStart;
-    const BYTE* const ofCodeTable = seqStorePtr->ofCode;
-    const BYTE* const llCodeTable = seqStorePtr->llCode;
-    const BYTE* const mlCodeTable = seqStorePtr->mlCode;
-    BYTE* const ostart = (BYTE*)dst;
-    BYTE* const oend = ostart + dstCapacity;
-    BYTE* op = ostart;
-    size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
-    BYTE* seqHead;
-    BYTE* lastNCount = NULL;
-
-    ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
-    DEBUGLOG(5, "ZSTD_compressSequences_internal");
-
-    /* Compress literals */
-    {   const BYTE* const literals = seqStorePtr->litStart;
-        size_t const litSize = seqStorePtr->lit - literals;
-        size_t const cSize = ZSTD_compressLiterals(
-                                    &prevEntropy->huf, &nextEntropy->huf,
-                                    cctxParams->cParams.strategy,
-                                    ZSTD_disableLiteralsCompression(cctxParams),
-                                    op, dstCapacity,
-                                    literals, litSize,
-                                    workspace, wkspSize,
-                                    bmi2);
-        FORWARD_IF_ERROR(cSize);
-        assert(cSize <= dstCapacity);
-        op += cSize;
-    }
-
-    /* Sequences Header */
-    RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
-                    dstSize_tooSmall);
-    if (nbSeq < 0x7F)
-        *op++ = (BYTE)nbSeq;
-    else if (nbSeq < LONGNBSEQ)
-        op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
-    else
-        op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
-    assert(op <= oend);
-    if (nbSeq==0) {
-        /* Copy the old tables over as if we repeated them */
-        memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
-        return op - ostart;
-    }
-
-    /* seqHead : flags for FSE encoding type */
-    seqHead = op++;
-    assert(op <= oend);
-
-    /* convert length/distances into codes */
-    ZSTD_seqToCodes(seqStorePtr);
-    /* build CTable for Literal Lengths */
-    {   unsigned max = MaxLL;
-        size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace, wkspSize);   /* can't fail */
-        DEBUGLOG(5, "Building LL table");
-        nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode;
-        LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode,
-                                        count, max, mostFrequent, nbSeq,
-                                        LLFSELog, prevEntropy->fse.litlengthCTable,
-                                        LL_defaultNorm, LL_defaultNormLog,
-                                        ZSTD_defaultAllowed, strategy);
-        assert(set_basic < set_compressed && set_rle < set_compressed);
-        assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
-        {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
-                                                    count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
-                                                    prevEntropy->fse.litlengthCTable, sizeof(prevEntropy->fse.litlengthCTable),
-                                                    workspace, wkspSize);
-            FORWARD_IF_ERROR(countSize);
-            if (LLtype == set_compressed)
-                lastNCount = op;
-            op += countSize;
-            assert(op <= oend);
-    }   }
-    /* build CTable for Offsets */
-    {   unsigned max = MaxOff;
-        size_t const mostFrequent = HIST_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace, wkspSize);  /* can't fail */
-        /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
-        ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
-        DEBUGLOG(5, "Building OF table");
-        nextEntropy->fse.offcode_repeatMode = prevEntropy->fse.offcode_repeatMode;
-        Offtype = ZSTD_selectEncodingType(&nextEntropy->fse.offcode_repeatMode,
-                                        count, max, mostFrequent, nbSeq,
-                                        OffFSELog, prevEntropy->fse.offcodeCTable,
-                                        OF_defaultNorm, OF_defaultNormLog,
-                                        defaultPolicy, strategy);
-        assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
-        {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
-                                                    count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
-                                                    prevEntropy->fse.offcodeCTable, sizeof(prevEntropy->fse.offcodeCTable),
-                                                    workspace, wkspSize);
-            FORWARD_IF_ERROR(countSize);
-            if (Offtype == set_compressed)
-                lastNCount = op;
-            op += countSize;
-            assert(op <= oend);
-    }   }
-    /* build CTable for MatchLengths */
-    {   unsigned max = MaxML;
-        size_t const mostFrequent = HIST_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace, wkspSize);   /* can't fail */
-        DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
-        nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode;
-        MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode,
-                                        count, max, mostFrequent, nbSeq,
-                                        MLFSELog, prevEntropy->fse.matchlengthCTable,
-                                        ML_defaultNorm, ML_defaultNormLog,
-                                        ZSTD_defaultAllowed, strategy);
-        assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
-        {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
-                                                    count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
-                                                    prevEntropy->fse.matchlengthCTable, sizeof(prevEntropy->fse.matchlengthCTable),
-                                                    workspace, wkspSize);
-            FORWARD_IF_ERROR(countSize);
-            if (MLtype == set_compressed)
-                lastNCount = op;
-            op += countSize;
-            assert(op <= oend);
-    }   }
-
-    *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
-
-    {   size_t const bitstreamSize = ZSTD_encodeSequences(
-                                        op, oend - op,
-                                        CTable_MatchLength, mlCodeTable,
-                                        CTable_OffsetBits, ofCodeTable,
-                                        CTable_LitLength, llCodeTable,
-                                        sequences, nbSeq,
-                                        longOffsets, bmi2);
-        FORWARD_IF_ERROR(bitstreamSize);
-        op += bitstreamSize;
-        assert(op <= oend);
-        /* zstd versions <= 1.3.4 mistakenly report corruption when
-         * FSE_readNCount() receives a buffer < 4 bytes.
-         * Fixed by https://github.com/facebook/zstd/pull/1146.
-         * This can happen when the last set_compressed table present is 2
-         * bytes and the bitstream is only one byte.
-         * In this exceedingly rare case, we will simply emit an uncompressed
-         * block, since it isn't worth optimizing.
-         */
-        if (lastNCount && (op - lastNCount) < 4) {
-            /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
-            assert(op - lastNCount == 3);
-            DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
-                        "emitting an uncompressed block.");
-            return 0;
-        }
-    }
-
-    DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart));
-    return op - ostart;
-}
-
-MEM_STATIC size_t
-ZSTD_compressSequences(seqStore_t* seqStorePtr,
-                       const ZSTD_entropyCTables_t* prevEntropy,
-                             ZSTD_entropyCTables_t* nextEntropy,
-                       const ZSTD_CCtx_params* cctxParams,
-                             void* dst, size_t dstCapacity,
-                             size_t srcSize,
-                             void* workspace, size_t wkspSize,
-                             int bmi2)
-{
-    size_t const cSize = ZSTD_compressSequences_internal(
-                            seqStorePtr, prevEntropy, nextEntropy, cctxParams,
-                            dst, dstCapacity,
-                            workspace, wkspSize, bmi2);
-    if (cSize == 0) return 0;
-    /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
-     * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
-     */
-    if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
-        return 0;  /* block not compressed */
-    FORWARD_IF_ERROR(cSize);
-
-    /* Check compressibility */
-    {   size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
-        if (cSize >= maxCSize) return 0;  /* block not compressed */
-    }
-
-    return cSize;
-}
-
-/* ZSTD_selectBlockCompressor() :
- * Not static, but internal use only (used by long distance matcher)
- * assumption : strat is a valid strategy */
-ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode)
-{
-    static const ZSTD_blockCompressor blockCompressor[3][ZSTD_STRATEGY_MAX+1] = {
-        { ZSTD_compressBlock_fast  /* default for 0 */,
-          ZSTD_compressBlock_fast,
-          ZSTD_compressBlock_doubleFast,
-          ZSTD_compressBlock_greedy,
-          ZSTD_compressBlock_lazy,
-          ZSTD_compressBlock_lazy2,
-          ZSTD_compressBlock_btlazy2,
-          ZSTD_compressBlock_btopt,
-          ZSTD_compressBlock_btultra,
-          ZSTD_compressBlock_btultra2 },
-        { ZSTD_compressBlock_fast_extDict  /* default for 0 */,
-          ZSTD_compressBlock_fast_extDict,
-          ZSTD_compressBlock_doubleFast_extDict,
-          ZSTD_compressBlock_greedy_extDict,
-          ZSTD_compressBlock_lazy_extDict,
-          ZSTD_compressBlock_lazy2_extDict,
-          ZSTD_compressBlock_btlazy2_extDict,
-          ZSTD_compressBlock_btopt_extDict,
-          ZSTD_compressBlock_btultra_extDict,
-          ZSTD_compressBlock_btultra_extDict },
-        { ZSTD_compressBlock_fast_dictMatchState  /* default for 0 */,
-          ZSTD_compressBlock_fast_dictMatchState,
-          ZSTD_compressBlock_doubleFast_dictMatchState,
-          ZSTD_compressBlock_greedy_dictMatchState,
-          ZSTD_compressBlock_lazy_dictMatchState,
-          ZSTD_compressBlock_lazy2_dictMatchState,
-          ZSTD_compressBlock_btlazy2_dictMatchState,
-          ZSTD_compressBlock_btopt_dictMatchState,
-          ZSTD_compressBlock_btultra_dictMatchState,
-          ZSTD_compressBlock_btultra_dictMatchState }
-    };
-    ZSTD_blockCompressor selectedCompressor;
-    ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
-
-    assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
-    selectedCompressor = blockCompressor[(int)dictMode][(int)strat];
-    assert(selectedCompressor != NULL);
-    return selectedCompressor;
-}
-
-static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
-                                   const BYTE* anchor, size_t lastLLSize)
-{
-    memcpy(seqStorePtr->lit, anchor, lastLLSize);
-    seqStorePtr->lit += lastLLSize;
-}
-
-void ZSTD_resetSeqStore(seqStore_t* ssPtr)
-{
-    ssPtr->lit = ssPtr->litStart;
-    ssPtr->sequences = ssPtr->sequencesStart;
-    ssPtr->longLengthID = 0;
-}
-
-typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e;
-
-static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
-{
-    ZSTD_matchState_t* const ms = &zc->blockState.matchState;
-    DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize);
-    assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
-    /* Assert that we have correctly flushed the ctx params into the ms's copy */
-    ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);
-    if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
-        ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch);
-        return ZSTDbss_noCompress; /* don't even attempt compression below a certain srcSize */
-    }
-    ZSTD_resetSeqStore(&(zc->seqStore));
-    /* required for optimal parser to read stats from dictionary */
-    ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy;
-    /* tell the optimal parser how we expect to compress literals */
-    ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode;
-    /* a gap between an attached dict and the current window is not safe,
-     * they must remain adjacent,
-     * and when that stops being the case, the dict must be unset */
-    assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit);
-
-    /* limited update after a very long match */
-    {   const BYTE* const base = ms->window.base;
-        const BYTE* const istart = (const BYTE*)src;
-        const U32 current = (U32)(istart-base);
-        if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1));   /* ensure no overflow */
-        if (current > ms->nextToUpdate + 384)
-            ms->nextToUpdate = current - MIN(192, (U32)(current - ms->nextToUpdate - 384));
-    }
-
-    /* select and store sequences */
-    {   ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms);
-        size_t lastLLSize;
-        {   int i;
-            for (i = 0; i < ZSTD_REP_NUM; ++i)
-                zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i];
-        }
-        if (zc->externSeqStore.pos < zc->externSeqStore.size) {
-            assert(!zc->appliedParams.ldmParams.enableLdm);
-            /* Updates ldmSeqStore.pos */
-            lastLLSize =
-                ZSTD_ldm_blockCompress(&zc->externSeqStore,
-                                       ms, &zc->seqStore,
-                                       zc->blockState.nextCBlock->rep,
-                                       src, srcSize);
-            assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
-        } else if (zc->appliedParams.ldmParams.enableLdm) {
-            rawSeqStore_t ldmSeqStore = {NULL, 0, 0, 0};
-
-            ldmSeqStore.seq = zc->ldmSequences;
-            ldmSeqStore.capacity = zc->maxNbLdmSequences;
-            /* Updates ldmSeqStore.size */
-            FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
-                                               &zc->appliedParams.ldmParams,
-                                               src, srcSize));
-            /* Updates ldmSeqStore.pos */
-            lastLLSize =
-                ZSTD_ldm_blockCompress(&ldmSeqStore,
-                                       ms, &zc->seqStore,
-                                       zc->blockState.nextCBlock->rep,
-                                       src, srcSize);
-            assert(ldmSeqStore.pos == ldmSeqStore.size);
-        } else {   /* not long range mode */
-            ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, dictMode);
-            lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
-        }
-        {   const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
-            ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
-    }   }
-    return ZSTDbss_compress;
-}
-
-static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
-                                        void* dst, size_t dstCapacity,
-                                        const void* src, size_t srcSize)
-{
-    size_t cSize;
-    DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
-                (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate);
-
-    {   const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
-        FORWARD_IF_ERROR(bss);
-        if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; }
-    }
-
-    /* encode sequences and literals */
-    cSize = ZSTD_compressSequences(&zc->seqStore,
-            &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
-            &zc->appliedParams,
-            dst, dstCapacity,
-            srcSize,
-            zc->entropyWorkspace, HUF_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
-            zc->bmi2);
-
-out:
-    if (!ZSTD_isError(cSize) && cSize != 0) {
-        /* confirm repcodes and entropy tables when emitting a compressed block */
-        ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock;
-        zc->blockState.prevCBlock = zc->blockState.nextCBlock;
-        zc->blockState.nextCBlock = tmp;
-    }
-    /* We check that dictionaries have offset codes available for the first
-     * block. After the first block, the offcode table might not have large
-     * enough codes to represent the offsets in the data.
-     */
-    if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
-        zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
-
-    return cSize;
-}
-
-
-static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, void const* ip, void const* iend)
-{
-    if (ZSTD_window_needOverflowCorrection(ms->window, iend)) {
-        U32 const maxDist = (U32)1 << params->cParams.windowLog;
-        U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy);
-        U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip);
-        ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
-        ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
-        ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
-        ZSTD_reduceIndex(ms, params, correction);
-        if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
-        else ms->nextToUpdate -= correction;
-        /* invalidate dictionaries on overflow correction */
-        ms->loadedDictEnd = 0;
-        ms->dictMatchState = NULL;
-    }
-}
-
-
-/*! ZSTD_compress_frameChunk() :
-*   Compress a chunk of data into one or multiple blocks.
-*   All blocks will be terminated, all input will be consumed.
-*   Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
-*   Frame is supposed already started (header already produced)
-*   @return : compressed size, or an error code
-*/
-static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
-                                     void* dst, size_t dstCapacity,
-                               const void* src, size_t srcSize,
-                                     U32 lastFrameChunk)
-{
-    size_t blockSize = cctx->blockSize;
-    size_t remaining = srcSize;
-    const BYTE* ip = (const BYTE*)src;
-    BYTE* const ostart = (BYTE*)dst;
-    BYTE* op = ostart;
-    U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
-    assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX);
-
-    DEBUGLOG(5, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
-    if (cctx->appliedParams.fParams.checksumFlag && srcSize)
-        XXH64_update(&cctx->xxhState, src, srcSize);
-
-    while (remaining) {
-        ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
-        U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
-
-        RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE,
-                        dstSize_tooSmall,
-                        "not enough space to store compressed block");
-        if (remaining < blockSize) blockSize = remaining;
-
-        ZSTD_overflowCorrectIfNeeded(ms, &cctx->appliedParams, ip, ip + blockSize);
-        ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
-
-        /* Ensure hash/chain table insertion resumes no sooner than lowlimit */
-        if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit;
-
-        {   size_t cSize = ZSTD_compressBlock_internal(cctx,
-                                op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
-                                ip, blockSize);
-            FORWARD_IF_ERROR(cSize);
-
-            if (cSize == 0) {  /* block is not compressible */
-                cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
-                FORWARD_IF_ERROR(cSize);
-            } else {
-                U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
-                MEM_writeLE24(op, cBlockHeader24);
-                cSize += ZSTD_blockHeaderSize;
-            }
-
-            ip += blockSize;
-            assert(remaining >= blockSize);
-            remaining -= blockSize;
-            op += cSize;
-            assert(dstCapacity >= cSize);
-            dstCapacity -= cSize;
-            DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
-                        (unsigned)cSize);
-    }   }
-
-    if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
-    return (size_t)(op-ostart);
-}
-
-
-static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
-                                    ZSTD_CCtx_params params, U64 pledgedSrcSize, U32 dictID)
-{   BYTE* const op = (BYTE*)dst;
-    U32   const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536);   /* 0-3 */
-    U32   const dictIDSizeCode = params.fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength;   /* 0-3 */
-    U32   const checksumFlag = params.fParams.checksumFlag>0;
-    U32   const windowSize = (U32)1 << params.cParams.windowLog;
-    U32   const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
-    BYTE  const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
-    U32   const fcsCode = params.fParams.contentSizeFlag ?
-                     (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0;  /* 0-3 */
-    BYTE  const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
-    size_t pos=0;
-
-    assert(!(params.fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
-    RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall);
-    DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
-                !params.fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
-
-    if (params.format == ZSTD_f_zstd1) {
-        MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
-        pos = 4;
-    }
-    op[pos++] = frameHeaderDescriptionByte;
-    if (!singleSegment) op[pos++] = windowLogByte;
-    switch(dictIDSizeCode)
-    {
-        default:  assert(0); /* impossible */
-        case 0 : break;
-        case 1 : op[pos] = (BYTE)(dictID); pos++; break;
-        case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break;
-        case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break;
-    }
-    switch(fcsCode)
-    {
-        default:  assert(0); /* impossible */
-        case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break;
-        case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break;
-        case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break;
-        case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break;
-    }
-    return pos;
-}
-
-/* ZSTD_writeLastEmptyBlock() :
- * output an empty Block with end-of-frame mark to complete a frame
- * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
- *           or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
- */
-size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
-{
-    RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall);
-    {   U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1);  /* 0 size */
-        MEM_writeLE24(dst, cBlockHeader24);
-        return ZSTD_blockHeaderSize;
-    }
-}
-
-size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
-{
-    RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong);
-    RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm,
-                    parameter_unsupported);
-    cctx->externSeqStore.seq = seq;
-    cctx->externSeqStore.size = nbSeq;
-    cctx->externSeqStore.capacity = nbSeq;
-    cctx->externSeqStore.pos = 0;
-    return 0;
-}
-
-
-static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
-                              void* dst, size_t dstCapacity,
-                        const void* src, size_t srcSize,
-                               U32 frame, U32 lastFrameChunk)
-{
-    ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
-    size_t fhSize = 0;
-
-    DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
-                cctx->stage, (unsigned)srcSize);
-    RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong,
-                    "missing init (ZSTD_compressBegin)");
-
-    if (frame && (cctx->stage==ZSTDcs_init)) {
-        fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams,
-                                       cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
-        FORWARD_IF_ERROR(fhSize);
-        assert(fhSize <= dstCapacity);
-        dstCapacity -= fhSize;
-        dst = (char*)dst + fhSize;
-        cctx->stage = ZSTDcs_ongoing;
-    }
-
-    if (!srcSize) return fhSize;  /* do not generate an empty block if no input */
-
-    if (!ZSTD_window_update(&ms->window, src, srcSize)) {
-        ms->nextToUpdate = ms->window.dictLimit;
-    }
-    if (cctx->appliedParams.ldmParams.enableLdm) {
-        ZSTD_window_update(&cctx->ldmState.window, src, srcSize);
-    }
-
-    if (!frame) {
-        /* overflow check and correction for block mode */
-        ZSTD_overflowCorrectIfNeeded(ms, &cctx->appliedParams, src, (BYTE const*)src + srcSize);
-    }
-
-    DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
-    {   size_t const cSize = frame ?
-                             ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
-                             ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize);
-        FORWARD_IF_ERROR(cSize);
-        cctx->consumedSrcSize += srcSize;
-        cctx->producedCSize += (cSize + fhSize);
-        assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
-        if (cctx->pledgedSrcSizePlusOne != 0) {  /* control src size */
-            ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
-            RETURN_ERROR_IF(
-                cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne,
-                srcSize_wrong,
-                "error : pledgedSrcSize = %u, while realSrcSize >= %u",
-                (unsigned)cctx->pledgedSrcSizePlusOne-1,
-                (unsigned)cctx->consumedSrcSize);
-        }
-        return cSize + fhSize;
-    }
-}
-
-size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
-                              void* dst, size_t dstCapacity,
-                        const void* src, size_t srcSize)
-{
-    DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize);
-    return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
-}
-
-
-size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
-{
-    ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams;
-    assert(!ZSTD_checkCParams(cParams));
-    return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog);
-}
-
-size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
-{
-    size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
-    RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong);
-
-    return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
-}
-
-/*! ZSTD_loadDictionaryContent() :
- *  @return : 0, or an error code
- */
-static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
-                                         ZSTD_CCtx_params const* params,
-                                         const void* src, size_t srcSize,
-                                         ZSTD_dictTableLoadMethod_e dtlm)
-{
-    const BYTE* ip = (const BYTE*) src;
-    const BYTE* const iend = ip + srcSize;
-
-    ZSTD_window_update(&ms->window, src, srcSize);
-    ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
-
-    /* Assert that we the ms params match the params we're being given */
-    ZSTD_assertEqualCParams(params->cParams, ms->cParams);
-
-    if (srcSize <= HASH_READ_SIZE) return 0;
-
-    while (iend - ip > HASH_READ_SIZE) {
-        size_t const remaining = iend - ip;
-        size_t const chunk = MIN(remaining, ZSTD_CHUNKSIZE_MAX);
-        const BYTE* const ichunk = ip + chunk;
-
-        ZSTD_overflowCorrectIfNeeded(ms, params, ip, ichunk);
-
-        switch(params->cParams.strategy)
-        {
-        case ZSTD_fast:
-            ZSTD_fillHashTable(ms, ichunk, dtlm);
-            break;
-        case ZSTD_dfast:
-            ZSTD_fillDoubleHashTable(ms, ichunk, dtlm);
-            break;
-
-        case ZSTD_greedy:
-        case ZSTD_lazy:
-        case ZSTD_lazy2:
-            if (chunk >= HASH_READ_SIZE)
-                ZSTD_insertAndFindFirstIndex(ms, ichunk-HASH_READ_SIZE);
-            break;
-
-        case ZSTD_btlazy2:   /* we want the dictionary table fully sorted */
-        case ZSTD_btopt:
-        case ZSTD_btultra:
-        case ZSTD_btultra2:
-            if (chunk >= HASH_READ_SIZE)
-                ZSTD_updateTree(ms, ichunk-HASH_READ_SIZE, ichunk);
-            break;
-
-        default:
-            assert(0);  /* not possible : not a valid strategy id */
-        }
-
-        ip = ichunk;
-    }
-
-    ms->nextToUpdate = (U32)(iend - ms->window.base);
-    return 0;
-}
-
-
-/* Dictionaries that assign zero probability to symbols that show up causes problems
-   when FSE encoding.  Refuse dictionaries that assign zero probability to symbols
-   that we may encounter during compression.
-   NOTE: This behavior is not standard and could be improved in the future. */
-static size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) {
-    U32 s;
-    RETURN_ERROR_IF(dictMaxSymbolValue < maxSymbolValue, dictionary_corrupted);
-    for (s = 0; s <= maxSymbolValue; ++s) {
-        RETURN_ERROR_IF(normalizedCounter[s] == 0, dictionary_corrupted);
-    }
-    return 0;
-}
-
-
-/* Dictionary format :
- * See :
- * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
- */
-/*! ZSTD_loadZstdDictionary() :
- * @return : dictID, or an error code
- *  assumptions : magic number supposed already checked
- *                dictSize supposed > 8
- */
-static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
-                                      ZSTD_matchState_t* ms,
-                                      ZSTD_CCtx_params const* params,
-                                      const void* dict, size_t dictSize,
-                                      ZSTD_dictTableLoadMethod_e dtlm,
-                                      void* workspace)
-{
-    const BYTE* dictPtr = (const BYTE*)dict;
-    const BYTE* const dictEnd = dictPtr + dictSize;
-    short offcodeNCount[MaxOff+1];
-    unsigned offcodeMaxValue = MaxOff;
-    size_t dictID;
-
-    ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
-    assert(dictSize > 8);
-    assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY);
-
-    dictPtr += 4;   /* skip magic number */
-    dictID = params->fParams.noDictIDFlag ? 0 :  MEM_readLE32(dictPtr);
-    dictPtr += 4;
-
-    {   unsigned maxSymbolValue = 255;
-        size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr, dictEnd-dictPtr);
-        RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted);
-        RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted);
-        dictPtr += hufHeaderSize;
-    }
-
-    {   unsigned offcodeLog;
-        size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
-        RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted);
-        RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted);
-        /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
-        /* fill all offset symbols to avoid garbage at end of table */
-        RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
-                bs->entropy.fse.offcodeCTable,
-                offcodeNCount, MaxOff, offcodeLog,
-                workspace, HUF_WORKSPACE_SIZE)),
-            dictionary_corrupted);
-        dictPtr += offcodeHeaderSize;
-    }
-
-    {   short matchlengthNCount[MaxML+1];
-        unsigned matchlengthMaxValue = MaxML, matchlengthLog;
-        size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
-        RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted);
-        RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted);
-        /* Every match length code must have non-zero probability */
-        FORWARD_IF_ERROR( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
-        RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
-                bs->entropy.fse.matchlengthCTable,
-                matchlengthNCount, matchlengthMaxValue, matchlengthLog,
-                workspace, HUF_WORKSPACE_SIZE)),
-            dictionary_corrupted);
-        dictPtr += matchlengthHeaderSize;
-    }
-
-    {   short litlengthNCount[MaxLL+1];
-        unsigned litlengthMaxValue = MaxLL, litlengthLog;
-        size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
-        RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted);
-        RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted);
-        /* Every literal length code must have non-zero probability */
-        FORWARD_IF_ERROR( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
-        RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
-                bs->entropy.fse.litlengthCTable,
-                litlengthNCount, litlengthMaxValue, litlengthLog,
-                workspace, HUF_WORKSPACE_SIZE)),
-            dictionary_corrupted);
-        dictPtr += litlengthHeaderSize;
-    }
-
-    RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted);
-    bs->rep[0] = MEM_readLE32(dictPtr+0);
-    bs->rep[1] = MEM_readLE32(dictPtr+4);
-    bs->rep[2] = MEM_readLE32(dictPtr+8);
-    dictPtr += 12;
-
-    {   size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
-        U32 offcodeMax = MaxOff;
-        if (dictContentSize <= ((U32)-1) - 128 KB) {
-            U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
-            offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
-        }
-        /* All offset values <= dictContentSize + 128 KB must be representable */
-        FORWARD_IF_ERROR(ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
-        /* All repCodes must be <= dictContentSize and != 0*/
-        {   U32 u;
-            for (u=0; u<3; u++) {
-                RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted);
-                RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted);
-        }   }
-
-        bs->entropy.huf.repeatMode = HUF_repeat_valid;
-        bs->entropy.fse.offcode_repeatMode = FSE_repeat_valid;
-        bs->entropy.fse.matchlength_repeatMode = FSE_repeat_valid;
-        bs->entropy.fse.litlength_repeatMode = FSE_repeat_valid;
-        FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(ms, params, dictPtr, dictContentSize, dtlm));
-        return dictID;
-    }
-}
-
-/** ZSTD_compress_insertDictionary() :
-*   @return : dictID, or an error code */
-static size_t
-ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
-                               ZSTD_matchState_t* ms,
-                         const ZSTD_CCtx_params* params,
-                         const void* dict, size_t dictSize,
-                               ZSTD_dictContentType_e dictContentType,
-                               ZSTD_dictTableLoadMethod_e dtlm,
-                               void* workspace)
-{
-    DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
-    if ((dict==NULL) || (dictSize<=8)) return 0;
-
-    ZSTD_reset_compressedBlockState(bs);
-
-    /* dict restricted modes */
-    if (dictContentType == ZSTD_dct_rawContent)
-        return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm);
-
-    if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
-        if (dictContentType == ZSTD_dct_auto) {
-            DEBUGLOG(4, "raw content dictionary detected");
-            return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm);
-        }
-        RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong);
-        assert(0);   /* impossible */
-    }
-
-    /* dict as full zstd dictionary */
-    return ZSTD_loadZstdDictionary(bs, ms, params, dict, dictSize, dtlm, workspace);
-}
-
-/*! ZSTD_compressBegin_internal() :
- * @return : 0, or an error code */
-static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
-                                    const void* dict, size_t dictSize,
-                                    ZSTD_dictContentType_e dictContentType,
-                                    ZSTD_dictTableLoadMethod_e dtlm,
-                                    const ZSTD_CDict* cdict,
-                                    ZSTD_CCtx_params params, U64 pledgedSrcSize,
-                                    ZSTD_buffered_policy_e zbuff)
-{
-    DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params.cParams.windowLog);
-    /* params are supposed to be fully validated at this point */
-    assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
-    assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
-
-    if (cdict && cdict->dictContentSize>0) {
-        return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
-    }
-
-    FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
-                                     ZSTDcrp_continue, zbuff) );
-    {   size_t const dictID = ZSTD_compress_insertDictionary(
-                cctx->blockState.prevCBlock, &cctx->blockState.matchState,
-                &params, dict, dictSize, dictContentType, dtlm, cctx->entropyWorkspace);
-        FORWARD_IF_ERROR(dictID);
-        assert(dictID <= UINT_MAX);
-        cctx->dictID = (U32)dictID;
-    }
-    return 0;
-}
-
-size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
-                                    const void* dict, size_t dictSize,
-                                    ZSTD_dictContentType_e dictContentType,
-                                    ZSTD_dictTableLoadMethod_e dtlm,
-                                    const ZSTD_CDict* cdict,
-                                    ZSTD_CCtx_params params,
-                                    unsigned long long pledgedSrcSize)
-{
-    DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params.cParams.windowLog);
-    /* compression parameters verification and optimization */
-    FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );
-    return ZSTD_compressBegin_internal(cctx,
-                                       dict, dictSize, dictContentType, dtlm,
-                                       cdict,
-                                       params, pledgedSrcSize,
-                                       ZSTDb_not_buffered);
-}
-
-/*! ZSTD_compressBegin_advanced() :
-*   @return : 0, or an error code */
-size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
-                             const void* dict, size_t dictSize,
-                                   ZSTD_parameters params, unsigned long long pledgedSrcSize)
-{
-    ZSTD_CCtx_params const cctxParams =
-            ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
-    return ZSTD_compressBegin_advanced_internal(cctx,
-                                            dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast,
-                                            NULL /*cdict*/,
-                                            cctxParams, pledgedSrcSize);
-}
-
-size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
-{
-    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize);
-    ZSTD_CCtx_params const cctxParams =
-            ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
-    DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize);
-    return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
-                                       cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
-}
-
-size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
-{
-    return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
-}
-
-
-/*! ZSTD_writeEpilogue() :
-*   Ends a frame.
-*   @return : nb of bytes written into dst (or an error code) */
-static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
-{
-    BYTE* const ostart = (BYTE*)dst;
-    BYTE* op = ostart;
-    size_t fhSize = 0;
-
-    DEBUGLOG(4, "ZSTD_writeEpilogue");
-    RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
-
-    /* special case : empty frame */
-    if (cctx->stage == ZSTDcs_init) {
-        fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams, 0, 0);
-        FORWARD_IF_ERROR(fhSize);
-        dstCapacity -= fhSize;
-        op += fhSize;
-        cctx->stage = ZSTDcs_ongoing;
-    }
-
-    if (cctx->stage != ZSTDcs_ending) {
-        /* write one last empty block, make it the "last" block */
-        U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
-        RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall);
-        MEM_writeLE32(op, cBlockHeader24);
-        op += ZSTD_blockHeaderSize;
-        dstCapacity -= ZSTD_blockHeaderSize;
-    }
-
-    if (cctx->appliedParams.fParams.checksumFlag) {
-        U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
-        RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall);
-        DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum);
-        MEM_writeLE32(op, checksum);
-        op += 4;
-    }
-
-    cctx->stage = ZSTDcs_created;  /* return to "created but no init" status */
-    return op-ostart;
-}
-
-size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
-                         void* dst, size_t dstCapacity,
-                   const void* src, size_t srcSize)
-{
-    size_t endResult;
-    size_t const cSize = ZSTD_compressContinue_internal(cctx,
-                                dst, dstCapacity, src, srcSize,
-                                1 /* frame mode */, 1 /* last chunk */);
-    FORWARD_IF_ERROR(cSize);
-    endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
-    FORWARD_IF_ERROR(endResult);
-    assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
-    if (cctx->pledgedSrcSizePlusOne != 0) {  /* control src size */
-        ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
-        DEBUGLOG(4, "end of frame : controlling src size");
-        RETURN_ERROR_IF(
-            cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1,
-            srcSize_wrong,
-             "error : pledgedSrcSize = %u, while realSrcSize = %u",
-            (unsigned)cctx->pledgedSrcSizePlusOne-1,
-            (unsigned)cctx->consumedSrcSize);
-    }
-    return cSize + endResult;
-}
-
-
-static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx,
-                                      void* dst, size_t dstCapacity,
-                                const void* src, size_t srcSize,
-                                const void* dict,size_t dictSize,
-                                      ZSTD_parameters params)
-{
-    ZSTD_CCtx_params const cctxParams =
-            ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
-    DEBUGLOG(4, "ZSTD_compress_internal");
-    return ZSTD_compress_advanced_internal(cctx,
-                                           dst, dstCapacity,
-                                           src, srcSize,
-                                           dict, dictSize,
-                                           cctxParams);
-}
-
-size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
-                               void* dst, size_t dstCapacity,
-                         const void* src, size_t srcSize,
-                         const void* dict,size_t dictSize,
-                               ZSTD_parameters params)
-{
-    DEBUGLOG(4, "ZSTD_compress_advanced");
-    FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams));
-    return ZSTD_compress_internal(cctx,
-                                  dst, dstCapacity,
-                                  src, srcSize,
-                                  dict, dictSize,
-                                  params);
-}
-
-/* Internal */
-size_t ZSTD_compress_advanced_internal(
-        ZSTD_CCtx* cctx,
-        void* dst, size_t dstCapacity,
-        const void* src, size_t srcSize,
-        const void* dict,size_t dictSize,
-        ZSTD_CCtx_params params)
-{
-    DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
-    FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
-                         dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
-                         params, srcSize, ZSTDb_not_buffered) );
-    return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
-}
-
-size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
-                               void* dst, size_t dstCapacity,
-                         const void* src, size_t srcSize,
-                         const void* dict, size_t dictSize,
-                               int compressionLevel)
-{
-    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, srcSize + (!srcSize), dict ? dictSize : 0);
-    ZSTD_CCtx_params cctxParams = ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
-    assert(params.fParams.contentSizeFlag == 1);
-    return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, cctxParams);
-}
-
-size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
-                         void* dst, size_t dstCapacity,
-                   const void* src, size_t srcSize,
-                         int compressionLevel)
-{
-    DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize);
-    assert(cctx != NULL);
-    return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
-}
-
-size_t ZSTD_compress(void* dst, size_t dstCapacity,
-               const void* src, size_t srcSize,
-                     int compressionLevel)
-{
-    size_t result;
-    ZSTD_CCtx ctxBody;
-    ZSTD_initCCtx(&ctxBody, ZSTD_defaultCMem);
-    result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel);
-    ZSTD_freeCCtxContent(&ctxBody);   /* can't free ctxBody itself, as it's on stack; free only heap content */
-    return result;
-}
-
-
-/* =====  Dictionary API  ===== */
-
-/*! ZSTD_estimateCDictSize_advanced() :
- *  Estimate amount of memory that will be needed to create a dictionary with following arguments */
-size_t ZSTD_estimateCDictSize_advanced(
-        size_t dictSize, ZSTD_compressionParameters cParams,
-        ZSTD_dictLoadMethod_e dictLoadMethod)
-{
-    DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict));
-    return sizeof(ZSTD_CDict) + HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0)
-           + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
-}
-
-size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
-{
-    ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
-    return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy);
-}
-
-size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
-{
-    if (cdict==NULL) return 0;   /* support sizeof on NULL */
-    DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict));
-    return cdict->workspaceSize + (cdict->dictBuffer ? cdict->dictContentSize : 0) + sizeof(*cdict);
-}
-
-static size_t ZSTD_initCDict_internal(
-                    ZSTD_CDict* cdict,
-              const void* dictBuffer, size_t dictSize,
-                    ZSTD_dictLoadMethod_e dictLoadMethod,
-                    ZSTD_dictContentType_e dictContentType,
-                    ZSTD_compressionParameters cParams)
-{
-    DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType);
-    assert(!ZSTD_checkCParams(cParams));
-    cdict->matchState.cParams = cParams;
-    if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
-        cdict->dictBuffer = NULL;
-        cdict->dictContent = dictBuffer;
-    } else {
-        void* const internalBuffer = ZSTD_malloc(dictSize, cdict->customMem);
-        cdict->dictBuffer = internalBuffer;
-        cdict->dictContent = internalBuffer;
-        RETURN_ERROR_IF(!internalBuffer, memory_allocation);
-        memcpy(internalBuffer, dictBuffer, dictSize);
-    }
-    cdict->dictContentSize = dictSize;
-
-    /* Reset the state to no dictionary */
-    ZSTD_reset_compressedBlockState(&cdict->cBlockState);
-    {   void* const end = ZSTD_reset_matchState(&cdict->matchState,
-                            (U32*)cdict->workspace + HUF_WORKSPACE_SIZE_U32,
-                            &cParams,
-                             ZSTDcrp_continue, ZSTD_resetTarget_CDict);
-        assert(end == (char*)cdict->workspace + cdict->workspaceSize);
-        (void)end;
-    }
-    /* (Maybe) load the dictionary
-     * Skips loading the dictionary if it is <= 8 bytes.
-     */
-    {   ZSTD_CCtx_params params;
-        memset(&params, 0, sizeof(params));
-        params.compressionLevel = ZSTD_CLEVEL_DEFAULT;
-        params.fParams.contentSizeFlag = 1;
-        params.cParams = cParams;
-        {   size_t const dictID = ZSTD_compress_insertDictionary(
-                    &cdict->cBlockState, &cdict->matchState, &params,
-                    cdict->dictContent, cdict->dictContentSize,
-                    dictContentType, ZSTD_dtlm_full, cdict->workspace);
-            FORWARD_IF_ERROR(dictID);
-            assert(dictID <= (size_t)(U32)-1);
-            cdict->dictID = (U32)dictID;
-        }
-    }
-
-    return 0;
-}
-
-ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
-                                      ZSTD_dictLoadMethod_e dictLoadMethod,
-                                      ZSTD_dictContentType_e dictContentType,
-                                      ZSTD_compressionParameters cParams, ZSTD_customMem customMem)
-{
-    DEBUGLOG(3, "ZSTD_createCDict_advanced, mode %u", (unsigned)dictContentType);
-    if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
-
-    {   ZSTD_CDict* const cdict = (ZSTD_CDict*)ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
-        size_t const workspaceSize = HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);
-        void* const workspace = ZSTD_malloc(workspaceSize, customMem);
-
-        if (!cdict || !workspace) {
-            ZSTD_free(cdict, customMem);
-            ZSTD_free(workspace, customMem);
-            return NULL;
-        }
-        cdict->customMem = customMem;
-        cdict->workspace = workspace;
-        cdict->workspaceSize = workspaceSize;
-        if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
-                                        dictBuffer, dictSize,
-                                        dictLoadMethod, dictContentType,
-                                        cParams) )) {
-            ZSTD_freeCDict(cdict);
-            return NULL;
-        }
-
-        return cdict;
-    }
-}
-
-ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
-{
-    ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
-    return ZSTD_createCDict_advanced(dict, dictSize,
-                                     ZSTD_dlm_byCopy, ZSTD_dct_auto,
-                                     cParams, ZSTD_defaultCMem);
-}
-
-ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
-{
-    ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
-    return ZSTD_createCDict_advanced(dict, dictSize,
-                                     ZSTD_dlm_byRef, ZSTD_dct_auto,
-                                     cParams, ZSTD_defaultCMem);
-}
-
-size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
-{
-    if (cdict==NULL) return 0;   /* support free on NULL */
-    {   ZSTD_customMem const cMem = cdict->customMem;
-        ZSTD_free(cdict->workspace, cMem);
-        ZSTD_free(cdict->dictBuffer, cMem);
-        ZSTD_free(cdict, cMem);
-        return 0;
-    }
-}
-
-/*! ZSTD_initStaticCDict_advanced() :
- *  Generate a digested dictionary in provided memory area.
- *  workspace: The memory area to emplace the dictionary into.
- *             Provided pointer must 8-bytes aligned.
- *             It must outlive dictionary usage.
- *  workspaceSize: Use ZSTD_estimateCDictSize()
- *                 to determine how large workspace must be.
- *  cParams : use ZSTD_getCParams() to transform a compression level
- *            into its relevants cParams.
- * @return : pointer to ZSTD_CDict*, or NULL if error (size too small)
- *  Note : there is no corresponding "free" function.
- *         Since workspace was allocated externally, it must be freed externally.
- */
-const ZSTD_CDict* ZSTD_initStaticCDict(
-                                 void* workspace, size_t workspaceSize,
-                           const void* dict, size_t dictSize,
-                                 ZSTD_dictLoadMethod_e dictLoadMethod,
-                                 ZSTD_dictContentType_e dictContentType,
-                                 ZSTD_compressionParameters cParams)
-{
-    size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);
-    size_t const neededSize = sizeof(ZSTD_CDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize)
-                            + HUF_WORKSPACE_SIZE + matchStateSize;
-    ZSTD_CDict* const cdict = (ZSTD_CDict*) workspace;
-    void* ptr;
-    if ((size_t)workspace & 7) return NULL;  /* 8-aligned */
-    DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
-        (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));
-    if (workspaceSize < neededSize) return NULL;
-
-    if (dictLoadMethod == ZSTD_dlm_byCopy) {
-        memcpy(cdict+1, dict, dictSize);
-        dict = cdict+1;
-        ptr = (char*)workspace + sizeof(ZSTD_CDict) + dictSize;
-    } else {
-        ptr = cdict+1;
-    }
-    cdict->workspace = ptr;
-    cdict->workspaceSize = HUF_WORKSPACE_SIZE + matchStateSize;
-
-    if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
-                                              dict, dictSize,
-                                              ZSTD_dlm_byRef, dictContentType,
-                                              cParams) ))
-        return NULL;
-
-    return cdict;
-}
-
-ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)
-{
-    assert(cdict != NULL);
-    return cdict->matchState.cParams;
-}
-
-/* ZSTD_compressBegin_usingCDict_advanced() :
- * cdict must be != NULL */
-size_t ZSTD_compressBegin_usingCDict_advanced(
-    ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
-    ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
-{
-    DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced");
-    RETURN_ERROR_IF(cdict==NULL, dictionary_wrong);
-    {   ZSTD_CCtx_params params = cctx->requestedParams;
-        params.cParams = ZSTD_getCParamsFromCDict(cdict);
-        /* Increase window log to fit the entire dictionary and source if the
-         * source size is known. Limit the increase to 19, which is the
-         * window log for compression level 1 with the largest source size.
-         */
-        if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
-            U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19);
-            U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1;
-            params.cParams.windowLog = MAX(params.cParams.windowLog, limitedSrcLog);
-        }
-        params.fParams = fParams;
-        return ZSTD_compressBegin_internal(cctx,
-                                           NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,
-                                           cdict,
-                                           params, pledgedSrcSize,
-                                           ZSTDb_not_buffered);
-    }
-}
-
-/* ZSTD_compressBegin_usingCDict() :
- * pledgedSrcSize=0 means "unknown"
- * if pledgedSrcSize>0, it will enable contentSizeFlag */
-size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
-{
-    ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
-    DEBUGLOG(4, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag);
-    return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
-}
-
-size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
-                                void* dst, size_t dstCapacity,
-                                const void* src, size_t srcSize,
-                                const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
-{
-    FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize));   /* will check if cdict != NULL */
-    return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
-}
-
-/*! ZSTD_compress_usingCDict() :
- *  Compression using a digested Dictionary.
- *  Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
- *  Note that compression parameters are decided at CDict creation time
- *  while frame parameters are hardcoded */
-size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
-                                void* dst, size_t dstCapacity,
-                                const void* src, size_t srcSize,
-                                const ZSTD_CDict* cdict)
-{
-    ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
-    return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
-}
-
-
-
-/* ******************************************************************
-*  Streaming
-********************************************************************/
-
-ZSTD_CStream* ZSTD_createCStream(void)
-{
-    DEBUGLOG(3, "ZSTD_createCStream");
-    return ZSTD_createCStream_advanced(ZSTD_defaultCMem);
-}
-
-ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize)
-{
-    return ZSTD_initStaticCCtx(workspace, workspaceSize);
-}
-
-ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)
-{   /* CStream and CCtx are now same object */
-    return ZSTD_createCCtx_advanced(customMem);
-}
-
-size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
-{
-    return ZSTD_freeCCtx(zcs);   /* same object */
-}
-
-
-
-/*======   Initialization   ======*/
-
-size_t ZSTD_CStreamInSize(void)  { return ZSTD_BLOCKSIZE_MAX; }
-
-size_t ZSTD_CStreamOutSize(void)
-{
-    return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
-}
-
-static size_t ZSTD_resetCStream_internal(ZSTD_CStream* cctx,
-                    const void* const dict, size_t const dictSize, ZSTD_dictContentType_e const dictContentType,
-                    const ZSTD_CDict* const cdict,
-                    ZSTD_CCtx_params params, unsigned long long const pledgedSrcSize)
-{
-    DEBUGLOG(4, "ZSTD_resetCStream_internal");
-    /* Finalize the compression parameters */
-    params.cParams = ZSTD_getCParamsFromCCtxParams(&params, pledgedSrcSize, dictSize);
-    /* params are supposed to be fully validated at this point */
-    assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
-    assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
-
-    FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
-                                         dict, dictSize, dictContentType, ZSTD_dtlm_fast,
-                                         cdict,
-                                         params, pledgedSrcSize,
-                                         ZSTDb_buffered) );
-
-    cctx->inToCompress = 0;
-    cctx->inBuffPos = 0;
-    cctx->inBuffTarget = cctx->blockSize
-                      + (cctx->blockSize == pledgedSrcSize);   /* for small input: avoid automatic flush on reaching end of block, since it would require to add a 3-bytes null block to end frame */
-    cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0;
-    cctx->streamStage = zcss_load;
-    cctx->frameEnded = 0;
-    return 0;   /* ready to go */
-}
-
-/* ZSTD_resetCStream():
- * pledgedSrcSize == 0 means "unknown" */
-size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss)
-{
-    /* temporary : 0 interpreted as "unknown" during transition period.
-     * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
-     * 0 will be interpreted as "empty" in the future.
-     */
-    U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
-    DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize);
-    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
-    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
-    return 0;
-}
-
-/*! ZSTD_initCStream_internal() :
- *  Note : for lib/compress only. Used by zstdmt_compress.c.
- *  Assumption 1 : params are valid
- *  Assumption 2 : either dict, or cdict, is defined, not both */
-size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
-                    const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
-                    ZSTD_CCtx_params params, unsigned long long pledgedSrcSize)
-{
-    DEBUGLOG(4, "ZSTD_initCStream_internal");
-    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
-    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
-    assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
-    zcs->requestedParams = params;
-    assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
-    if (dict) {
-        FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) );
-    } else {
-        /* Dictionary is cleared if !cdict */
-        FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) );
-    }
-    return 0;
-}
-
-/* ZSTD_initCStream_usingCDict_advanced() :
- * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
-size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
-                                            const ZSTD_CDict* cdict,
-                                            ZSTD_frameParameters fParams,
-                                            unsigned long long pledgedSrcSize)
-{
-    DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced");
-    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
-    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
-    zcs->requestedParams.fParams = fParams;
-    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) );
-    return 0;
-}
-
-/* note : cdict must outlive compression session */
-size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
-{
-    DEBUGLOG(4, "ZSTD_initCStream_usingCDict");
-    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
-    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) );
-    return 0;
-}
-
-
-/* ZSTD_initCStream_advanced() :
- * pledgedSrcSize must be exact.
- * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
- * dict is loaded with default parameters ZSTD_dm_auto and ZSTD_dlm_byCopy. */
-size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
-                                 const void* dict, size_t dictSize,
-                                 ZSTD_parameters params, unsigned long long pss)
-{
-    /* for compatibility with older programs relying on this behavior.
-     * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN.
-     * This line will be removed in the future.
-     */
-    U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
-    DEBUGLOG(4, "ZSTD_initCStream_advanced");
-    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
-    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
-    FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );
-    zcs->requestedParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
-    FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) );
-    return 0;
-}
-
-size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
-{
-    DEBUGLOG(4, "ZSTD_initCStream_usingDict");
-    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
-    FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) );
-    FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) );
-    return 0;
-}
-
-size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
-{
-    /* temporary : 0 interpreted as "unknown" during transition period.
-     * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
-     * 0 will be interpreted as "empty" in the future.
-     */
-    U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
-    DEBUGLOG(4, "ZSTD_initCStream_srcSize");
-    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
-    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) );
-    FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) );
-    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
-    return 0;
-}
-
-size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
-{
-    DEBUGLOG(4, "ZSTD_initCStream");
-    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
-    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) );
-    FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) );
-    return 0;
-}
-
-/*======   Compression   ======*/
-
-static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx)
-{
-    size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos;
-    if (hintInSize==0) hintInSize = cctx->blockSize;
-    return hintInSize;
-}
-
-static size_t ZSTD_limitCopy(void* dst, size_t dstCapacity,
-                       const void* src, size_t srcSize)
-{
-    size_t const length = MIN(dstCapacity, srcSize);
-    if (length) memcpy(dst, src, length);
-    return length;
-}
-
-/** ZSTD_compressStream_generic():
- *  internal function for all *compressStream*() variants
- *  non-static, because can be called from zstdmt_compress.c
- * @return : hint size for next input */
-static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
-                                          ZSTD_outBuffer* output,
-                                          ZSTD_inBuffer* input,
-                                          ZSTD_EndDirective const flushMode)
-{
-    const char* const istart = (const char*)input->src;
-    const char* const iend = istart + input->size;
-    const char* ip = istart + input->pos;
-    char* const ostart = (char*)output->dst;
-    char* const oend = ostart + output->size;
-    char* op = ostart + output->pos;
-    U32 someMoreWork = 1;
-
-    /* check expectations */
-    DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode);
-    assert(zcs->inBuff != NULL);
-    assert(zcs->inBuffSize > 0);
-    assert(zcs->outBuff !=  NULL);
-    assert(zcs->outBuffSize > 0);
-    assert(output->pos <= output->size);
-    assert(input->pos <= input->size);
-
-    while (someMoreWork) {
-        switch(zcs->streamStage)
-        {
-        case zcss_init:
-            RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!");
-
-        case zcss_load:
-            if ( (flushMode == ZSTD_e_end)
-              && ((size_t)(oend-op) >= ZSTD_compressBound(iend-ip))  /* enough dstCapacity */
-              && (zcs->inBuffPos == 0) ) {
-                /* shortcut to compression pass directly into output buffer */
-                size_t const cSize = ZSTD_compressEnd(zcs,
-                                                op, oend-op, ip, iend-ip);
-                DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
-                FORWARD_IF_ERROR(cSize);
-                ip = iend;
-                op += cSize;
-                zcs->frameEnded = 1;
-                ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
-                someMoreWork = 0; break;
-            }
-            /* complete loading into inBuffer */
-            {   size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
-                size_t const loaded = ZSTD_limitCopy(
-                                        zcs->inBuff + zcs->inBuffPos, toLoad,
-                                        ip, iend-ip);
-                zcs->inBuffPos += loaded;
-                ip += loaded;
-                if ( (flushMode == ZSTD_e_continue)
-                  && (zcs->inBuffPos < zcs->inBuffTarget) ) {
-                    /* not enough input to fill full block : stop here */
-                    someMoreWork = 0; break;
-                }
-                if ( (flushMode == ZSTD_e_flush)
-                  && (zcs->inBuffPos == zcs->inToCompress) ) {
-                    /* empty */
-                    someMoreWork = 0; break;
-                }
-            }
-            /* compress current block (note : this stage cannot be stopped in the middle) */
-            DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode);
-            {   void* cDst;
-                size_t cSize;
-                size_t const iSize = zcs->inBuffPos - zcs->inToCompress;
-                size_t oSize = oend-op;
-                unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);
-                if (oSize >= ZSTD_compressBound(iSize))
-                    cDst = op;   /* compress into output buffer, to skip flush stage */
-                else
-                    cDst = zcs->outBuff, oSize = zcs->outBuffSize;
-                cSize = lastBlock ?
-                        ZSTD_compressEnd(zcs, cDst, oSize,
-                                    zcs->inBuff + zcs->inToCompress, iSize) :
-                        ZSTD_compressContinue(zcs, cDst, oSize,
-                                    zcs->inBuff + zcs->inToCompress, iSize);
-                FORWARD_IF_ERROR(cSize);
-                zcs->frameEnded = lastBlock;
-                /* prepare next block */
-                zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
-                if (zcs->inBuffTarget > zcs->inBuffSize)
-                    zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
-                DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
-                         (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize);
-                if (!lastBlock)
-                    assert(zcs->inBuffTarget <= zcs->inBuffSize);
-                zcs->inToCompress = zcs->inBuffPos;
-                if (cDst == op) {  /* no need to flush */
-                    op += cSize;
-                    if (zcs->frameEnded) {
-                        DEBUGLOG(5, "Frame completed directly in outBuffer");
-                        someMoreWork = 0;
-                        ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
-                    }
-                    break;
-                }
-                zcs->outBuffContentSize = cSize;
-                zcs->outBuffFlushedSize = 0;
-                zcs->streamStage = zcss_flush; /* pass-through to flush stage */
-            }
-	    /* fall-through */
-        case zcss_flush:
-            DEBUGLOG(5, "flush stage");
-            {   size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
-                size_t const flushed = ZSTD_limitCopy(op, (size_t)(oend-op),
-                            zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
-                DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
-                            (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed);
-                op += flushed;
-                zcs->outBuffFlushedSize += flushed;
-                if (toFlush!=flushed) {
-                    /* flush not fully completed, presumably because dst is too small */
-                    assert(op==oend);
-                    someMoreWork = 0;
-                    break;
-                }
-                zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
-                if (zcs->frameEnded) {
-                    DEBUGLOG(5, "Frame completed on flush");
-                    someMoreWork = 0;
-                    ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
-                    break;
-                }
-                zcs->streamStage = zcss_load;
-                break;
-            }
-
-        default: /* impossible */
-            assert(0);
-        }
-    }
-
-    input->pos = ip - istart;
-    output->pos = op - ostart;
-    if (zcs->frameEnded) return 0;
-    return ZSTD_nextInputSizeHint(zcs);
-}
-
-static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx)
-{
-#ifdef ZSTD_MULTITHREAD
-    if (cctx->appliedParams.nbWorkers >= 1) {
-        assert(cctx->mtctx != NULL);
-        return ZSTDMT_nextInputSizeHint(cctx->mtctx);
-    }
-#endif
-    return ZSTD_nextInputSizeHint(cctx);
-
-}
-
-size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
-{
-    FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) );
-    return ZSTD_nextInputSizeHint_MTorST(zcs);
-}
-
-
-size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
-                             ZSTD_outBuffer* output,
-                             ZSTD_inBuffer* input,
-                             ZSTD_EndDirective endOp)
-{
-    DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp);
-    /* check conditions */
-    RETURN_ERROR_IF(output->pos > output->size, GENERIC);
-    RETURN_ERROR_IF(input->pos  > input->size, GENERIC);
-    assert(cctx!=NULL);
-
-    /* transparent initialization stage */
-    if (cctx->streamStage == zcss_init) {
-        ZSTD_CCtx_params params = cctx->requestedParams;
-        ZSTD_prefixDict const prefixDict = cctx->prefixDict;
-        FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) ); /* Init the local dict if present. */
-        memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));   /* single usage */
-        assert(prefixDict.dict==NULL || cctx->cdict==NULL);    /* only one can be set */
-        DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage");
-        if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = input->size + 1;  /* auto-fix pledgedSrcSize */
-        params.cParams = ZSTD_getCParamsFromCCtxParams(
-                &cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, 0 /*dictSize*/);
-
-
-#ifdef ZSTD_MULTITHREAD
-        if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) {
-            params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */
-        }
-        if (params.nbWorkers > 0) {
-            /* mt context creation */
-            if (cctx->mtctx == NULL) {
-                DEBUGLOG(4, "ZSTD_compressStream2: creating new mtctx for nbWorkers=%u",
-                            params.nbWorkers);
-                cctx->mtctx = ZSTDMT_createCCtx_advanced(params.nbWorkers, cctx->customMem);
-                RETURN_ERROR_IF(cctx->mtctx == NULL, memory_allocation);
-            }
-            /* mt compression */
-            DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbWorkers=%u", params.nbWorkers);
-            FORWARD_IF_ERROR( ZSTDMT_initCStream_internal(
-                        cctx->mtctx,
-                        prefixDict.dict, prefixDict.dictSize, ZSTD_dct_rawContent,
-                        cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) );
-            cctx->streamStage = zcss_load;
-            cctx->appliedParams.nbWorkers = params.nbWorkers;
-        } else
-#endif
-        {   FORWARD_IF_ERROR( ZSTD_resetCStream_internal(cctx,
-                            prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType,
-                            cctx->cdict,
-                            params, cctx->pledgedSrcSizePlusOne-1) );
-            assert(cctx->streamStage == zcss_load);
-            assert(cctx->appliedParams.nbWorkers == 0);
-    }   }
-    /* end of transparent initialization stage */
-
-    /* compression stage */
-#ifdef ZSTD_MULTITHREAD
-    if (cctx->appliedParams.nbWorkers > 0) {
-        int const forceMaxProgress = (endOp == ZSTD_e_flush || endOp == ZSTD_e_end);
-        size_t flushMin;
-        assert(forceMaxProgress || endOp == ZSTD_e_continue /* Protection for a new flush type */);
-        if (cctx->cParamsChanged) {
-            ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams);
-            cctx->cParamsChanged = 0;
-        }
-        do {
-            flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);
-            if ( ZSTD_isError(flushMin)
-              || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */
-                ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
-            }
-            FORWARD_IF_ERROR(flushMin);
-        } while (forceMaxProgress && flushMin != 0 && output->pos < output->size);
-        DEBUGLOG(5, "completed ZSTD_compressStream2 delegating to ZSTDMT_compressStream_generic");
-        /* Either we don't require maximum forward progress, we've finished the
-         * flush, or we are out of output space.
-         */
-        assert(!forceMaxProgress || flushMin == 0 || output->pos == output->size);
-        return flushMin;
-    }
-#endif
-    FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) );
-    DEBUGLOG(5, "completed ZSTD_compressStream2");
-    return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
-}
-
-size_t ZSTD_compressStream2_simpleArgs (
-                            ZSTD_CCtx* cctx,
-                            void* dst, size_t dstCapacity, size_t* dstPos,
-                      const void* src, size_t srcSize, size_t* srcPos,
-                            ZSTD_EndDirective endOp)
-{
-    ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
-    ZSTD_inBuffer  input  = { src, srcSize, *srcPos };
-    /* ZSTD_compressStream2() will check validity of dstPos and srcPos */
-    size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp);
-    *dstPos = output.pos;
-    *srcPos = input.pos;
-    return cErr;
-}
-
-size_t ZSTD_compress2(ZSTD_CCtx* cctx,
-                      void* dst, size_t dstCapacity,
-                      const void* src, size_t srcSize)
-{
-    ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
-    {   size_t oPos = 0;
-        size_t iPos = 0;
-        size_t const result = ZSTD_compressStream2_simpleArgs(cctx,
-                                        dst, dstCapacity, &oPos,
-                                        src, srcSize, &iPos,
-                                        ZSTD_e_end);
-        FORWARD_IF_ERROR(result);
-        if (result != 0) {  /* compression not completed, due to lack of output space */
-            assert(oPos == dstCapacity);
-            RETURN_ERROR(dstSize_tooSmall);
-        }
-        assert(iPos == srcSize);   /* all input is expected consumed */
-        return oPos;
-    }
-}
-
-/*======   Finalize   ======*/
-
-/*! ZSTD_flushStream() :
- * @return : amount of data remaining to flush */
-size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
-{
-    ZSTD_inBuffer input = { NULL, 0, 0 };
-    return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush);
-}
-
-
-size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
-{
-    ZSTD_inBuffer input = { NULL, 0, 0 };
-    size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end);
-    FORWARD_IF_ERROR( remainingToFlush );
-    if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush;   /* minimal estimation */
-    /* single thread mode : attempt to calculate remaining to flush more precisely */
-    {   size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
-        size_t const checksumSize = (size_t)(zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4);
-        size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize;
-        DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush);
-        return toFlush;
-    }
-}
-
-
-/*-=====  Pre-defined compression levels  =====-*/
-
-#define ZSTD_MAX_CLEVEL     22
-int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
-int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; }
-
-static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
-{   /* "default" - for any srcSize > 256 KB */
-    /* W,  C,  H,  S,  L, TL, strat */
-    { 19, 12, 13,  1,  6,  1, ZSTD_fast    },  /* base for negative levels */
-    { 19, 13, 14,  1,  7,  0, ZSTD_fast    },  /* level  1 */
-    { 20, 15, 16,  1,  6,  0, ZSTD_fast    },  /* level  2 */
-    { 21, 16, 17,  1,  5,  1, ZSTD_dfast   },  /* level  3 */
-    { 21, 18, 18,  1,  5,  1, ZSTD_dfast   },  /* level  4 */
-    { 21, 18, 19,  2,  5,  2, ZSTD_greedy  },  /* level  5 */
-    { 21, 19, 19,  3,  5,  4, ZSTD_greedy  },  /* level  6 */
-    { 21, 19, 19,  3,  5,  8, ZSTD_lazy    },  /* level  7 */
-    { 21, 19, 19,  3,  5, 16, ZSTD_lazy2   },  /* level  8 */
-    { 21, 19, 20,  4,  5, 16, ZSTD_lazy2   },  /* level  9 */
-    { 22, 20, 21,  4,  5, 16, ZSTD_lazy2   },  /* level 10 */
-    { 22, 21, 22,  4,  5, 16, ZSTD_lazy2   },  /* level 11 */
-    { 22, 21, 22,  5,  5, 16, ZSTD_lazy2   },  /* level 12 */
-    { 22, 21, 22,  5,  5, 32, ZSTD_btlazy2 },  /* level 13 */
-    { 22, 22, 23,  5,  5, 32, ZSTD_btlazy2 },  /* level 14 */
-    { 22, 23, 23,  6,  5, 32, ZSTD_btlazy2 },  /* level 15 */
-    { 22, 22, 22,  5,  5, 48, ZSTD_btopt   },  /* level 16 */
-    { 23, 23, 22,  5,  4, 64, ZSTD_btopt   },  /* level 17 */
-    { 23, 23, 22,  6,  3, 64, ZSTD_btultra },  /* level 18 */
-    { 23, 24, 22,  7,  3,256, ZSTD_btultra2},  /* level 19 */
-    { 25, 25, 23,  7,  3,256, ZSTD_btultra2},  /* level 20 */
-    { 26, 26, 24,  7,  3,512, ZSTD_btultra2},  /* level 21 */
-    { 27, 27, 25,  9,  3,999, ZSTD_btultra2},  /* level 22 */
-},
-{   /* for srcSize <= 256 KB */
-    /* W,  C,  H,  S,  L,  T, strat */
-    { 18, 12, 13,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
-    { 18, 13, 14,  1,  6,  0, ZSTD_fast    },  /* level  1 */
-    { 18, 14, 14,  1,  5,  1, ZSTD_dfast   },  /* level  2 */
-    { 18, 16, 16,  1,  4,  1, ZSTD_dfast   },  /* level  3 */
-    { 18, 16, 17,  2,  5,  2, ZSTD_greedy  },  /* level  4.*/
-    { 18, 18, 18,  3,  5,  2, ZSTD_greedy  },  /* level  5.*/
-    { 18, 18, 19,  3,  5,  4, ZSTD_lazy    },  /* level  6.*/
-    { 18, 18, 19,  4,  4,  4, ZSTD_lazy    },  /* level  7 */
-    { 18, 18, 19,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
-    { 18, 18, 19,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
-    { 18, 18, 19,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
-    { 18, 18, 19,  5,  4, 12, ZSTD_btlazy2 },  /* level 11.*/
-    { 18, 19, 19,  7,  4, 12, ZSTD_btlazy2 },  /* level 12.*/
-    { 18, 18, 19,  4,  4, 16, ZSTD_btopt   },  /* level 13 */
-    { 18, 18, 19,  4,  3, 32, ZSTD_btopt   },  /* level 14.*/
-    { 18, 18, 19,  6,  3,128, ZSTD_btopt   },  /* level 15.*/
-    { 18, 19, 19,  6,  3,128, ZSTD_btultra },  /* level 16.*/
-    { 18, 19, 19,  8,  3,256, ZSTD_btultra },  /* level 17.*/
-    { 18, 19, 19,  6,  3,128, ZSTD_btultra2},  /* level 18.*/
-    { 18, 19, 19,  8,  3,256, ZSTD_btultra2},  /* level 19.*/
-    { 18, 19, 19, 10,  3,512, ZSTD_btultra2},  /* level 20.*/
-    { 18, 19, 19, 12,  3,512, ZSTD_btultra2},  /* level 21.*/
-    { 18, 19, 19, 13,  3,999, ZSTD_btultra2},  /* level 22.*/
-},
-{   /* for srcSize <= 128 KB */
-    /* W,  C,  H,  S,  L,  T, strat */
-    { 17, 12, 12,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
-    { 17, 12, 13,  1,  6,  0, ZSTD_fast    },  /* level  1 */
-    { 17, 13, 15,  1,  5,  0, ZSTD_fast    },  /* level  2 */
-    { 17, 15, 16,  2,  5,  1, ZSTD_dfast   },  /* level  3 */
-    { 17, 17, 17,  2,  4,  1, ZSTD_dfast   },  /* level  4 */
-    { 17, 16, 17,  3,  4,  2, ZSTD_greedy  },  /* level  5 */
-    { 17, 17, 17,  3,  4,  4, ZSTD_lazy    },  /* level  6 */
-    { 17, 17, 17,  3,  4,  8, ZSTD_lazy2   },  /* level  7 */
-    { 17, 17, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
-    { 17, 17, 17,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
-    { 17, 17, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
-    { 17, 17, 17,  5,  4,  8, ZSTD_btlazy2 },  /* level 11 */
-    { 17, 18, 17,  7,  4, 12, ZSTD_btlazy2 },  /* level 12 */
-    { 17, 18, 17,  3,  4, 12, ZSTD_btopt   },  /* level 13.*/
-    { 17, 18, 17,  4,  3, 32, ZSTD_btopt   },  /* level 14.*/
-    { 17, 18, 17,  6,  3,256, ZSTD_btopt   },  /* level 15.*/
-    { 17, 18, 17,  6,  3,128, ZSTD_btultra },  /* level 16.*/
-    { 17, 18, 17,  8,  3,256, ZSTD_btultra },  /* level 17.*/
-    { 17, 18, 17, 10,  3,512, ZSTD_btultra },  /* level 18.*/
-    { 17, 18, 17,  5,  3,256, ZSTD_btultra2},  /* level 19.*/
-    { 17, 18, 17,  7,  3,512, ZSTD_btultra2},  /* level 20.*/
-    { 17, 18, 17,  9,  3,512, ZSTD_btultra2},  /* level 21.*/
-    { 17, 18, 17, 11,  3,999, ZSTD_btultra2},  /* level 22.*/
-},
-{   /* for srcSize <= 16 KB */
-    /* W,  C,  H,  S,  L,  T, strat */
-    { 14, 12, 13,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
-    { 14, 14, 15,  1,  5,  0, ZSTD_fast    },  /* level  1 */
-    { 14, 14, 15,  1,  4,  0, ZSTD_fast    },  /* level  2 */
-    { 14, 14, 15,  2,  4,  1, ZSTD_dfast   },  /* level  3 */
-    { 14, 14, 14,  4,  4,  2, ZSTD_greedy  },  /* level  4 */
-    { 14, 14, 14,  3,  4,  4, ZSTD_lazy    },  /* level  5.*/
-    { 14, 14, 14,  4,  4,  8, ZSTD_lazy2   },  /* level  6 */
-    { 14, 14, 14,  6,  4,  8, ZSTD_lazy2   },  /* level  7 */
-    { 14, 14, 14,  8,  4,  8, ZSTD_lazy2   },  /* level  8.*/
-    { 14, 15, 14,  5,  4,  8, ZSTD_btlazy2 },  /* level  9.*/
-    { 14, 15, 14,  9,  4,  8, ZSTD_btlazy2 },  /* level 10.*/
-    { 14, 15, 14,  3,  4, 12, ZSTD_btopt   },  /* level 11.*/
-    { 14, 15, 14,  4,  3, 24, ZSTD_btopt   },  /* level 12.*/
-    { 14, 15, 14,  5,  3, 32, ZSTD_btultra },  /* level 13.*/
-    { 14, 15, 15,  6,  3, 64, ZSTD_btultra },  /* level 14.*/
-    { 14, 15, 15,  7,  3,256, ZSTD_btultra },  /* level 15.*/
-    { 14, 15, 15,  5,  3, 48, ZSTD_btultra2},  /* level 16.*/
-    { 14, 15, 15,  6,  3,128, ZSTD_btultra2},  /* level 17.*/
-    { 14, 15, 15,  7,  3,256, ZSTD_btultra2},  /* level 18.*/
-    { 14, 15, 15,  8,  3,256, ZSTD_btultra2},  /* level 19.*/
-    { 14, 15, 15,  8,  3,512, ZSTD_btultra2},  /* level 20.*/
-    { 14, 15, 15,  9,  3,512, ZSTD_btultra2},  /* level 21.*/
-    { 14, 15, 15, 10,  3,999, ZSTD_btultra2},  /* level 22.*/
-},
-};
-
-/*! ZSTD_getCParams() :
- * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
- *  Size values are optional, provide 0 if not known or unused */
-ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
-{
-    size_t const addedSize = srcSizeHint ? 0 : 500;
-    U64 const rSize = srcSizeHint+dictSize ? srcSizeHint+dictSize+addedSize : ZSTD_CONTENTSIZE_UNKNOWN;  /* intentional overflow for srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN */
-    U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);
-    int row = compressionLevel;
-    DEBUGLOG(5, "ZSTD_getCParams (cLevel=%i)", compressionLevel);
-    if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT;   /* 0 == default */
-    if (compressionLevel < 0) row = 0;   /* entry 0 is baseline for fast mode */
-    if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
-    {   ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
-        if (compressionLevel < 0) cp.targetLength = (unsigned)(-compressionLevel);   /* acceleration factor */
-        return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize);               /* refine parameters based on srcSize & dictSize */
-    }
-}
-
-/*! ZSTD_getParams() :
- *  same idea as ZSTD_getCParams()
- * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
- *  Fields of `ZSTD_frameParameters` are set to default values */
-ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
-    ZSTD_parameters params;
-    ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSizeHint, dictSize);
-    DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel);
-    memset(&params, 0, sizeof(params));
-    params.cParams = cParams;
-    params.fParams.contentSizeFlag = 1;
-    return params;
-}
diff --git a/vendor/github.com/DataDog/zstd/zstd_compress_internal.h b/vendor/github.com/DataDog/zstd/zstd_compress_internal.h
deleted file mode 100644
index 5495899..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_compress_internal.h
+++ /dev/null
@@ -1,907 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-/* This header contains definitions
- * that shall **only** be used by modules within lib/compress.
- */
-
-#ifndef ZSTD_COMPRESS_H
-#define ZSTD_COMPRESS_H
-
-/*-*************************************
-*  Dependencies
-***************************************/
-#include "zstd_internal.h"
-#ifdef ZSTD_MULTITHREAD
-#  include "zstdmt_compress.h"
-#endif
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-/*-*************************************
-*  Constants
-***************************************/
-#define kSearchStrength      8
-#define HASH_READ_SIZE       8
-#define ZSTD_DUBT_UNSORTED_MARK 1   /* For btlazy2 strategy, index ZSTD_DUBT_UNSORTED_MARK==1 means "unsorted".
-                                       It could be confused for a real successor at index "1", if sorted as larger than its predecessor.
-                                       It's not a big deal though : candidate will just be sorted again.
-                                       Additionally, candidate position 1 will be lost.
-                                       But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
-                                       The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table re-use with a different strategy.
-                                       This constant is required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */
-
-
-/*-*************************************
-*  Context memory management
-***************************************/
-typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
-typedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage;
-
-typedef struct ZSTD_prefixDict_s {
-    const void* dict;
-    size_t dictSize;
-    ZSTD_dictContentType_e dictContentType;
-} ZSTD_prefixDict;
-
-typedef struct {
-    void* dictBuffer;
-    void const* dict;
-    size_t dictSize;
-    ZSTD_dictContentType_e dictContentType;
-    ZSTD_CDict* cdict;
-} ZSTD_localDict;
-
-typedef struct {
-    U32 CTable[HUF_CTABLE_SIZE_U32(255)];
-    HUF_repeat repeatMode;
-} ZSTD_hufCTables_t;
-
-typedef struct {
-    FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
-    FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
-    FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
-    FSE_repeat offcode_repeatMode;
-    FSE_repeat matchlength_repeatMode;
-    FSE_repeat litlength_repeatMode;
-} ZSTD_fseCTables_t;
-
-typedef struct {
-    ZSTD_hufCTables_t huf;
-    ZSTD_fseCTables_t fse;
-} ZSTD_entropyCTables_t;
-
-typedef struct {
-    U32 off;
-    U32 len;
-} ZSTD_match_t;
-
-typedef struct {
-    int price;
-    U32 off;
-    U32 mlen;
-    U32 litlen;
-    U32 rep[ZSTD_REP_NUM];
-} ZSTD_optimal_t;
-
-typedef enum { zop_dynamic=0, zop_predef } ZSTD_OptPrice_e;
-
-typedef struct {
-    /* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */
-    unsigned* litFreq;           /* table of literals statistics, of size 256 */
-    unsigned* litLengthFreq;     /* table of litLength statistics, of size (MaxLL+1) */
-    unsigned* matchLengthFreq;   /* table of matchLength statistics, of size (MaxML+1) */
-    unsigned* offCodeFreq;       /* table of offCode statistics, of size (MaxOff+1) */
-    ZSTD_match_t* matchTable;    /* list of found matches, of size ZSTD_OPT_NUM+1 */
-    ZSTD_optimal_t* priceTable;  /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */
-
-    U32  litSum;                 /* nb of literals */
-    U32  litLengthSum;           /* nb of litLength codes */
-    U32  matchLengthSum;         /* nb of matchLength codes */
-    U32  offCodeSum;             /* nb of offset codes */
-    U32  litSumBasePrice;        /* to compare to log2(litfreq) */
-    U32  litLengthSumBasePrice;  /* to compare to log2(llfreq)  */
-    U32  matchLengthSumBasePrice;/* to compare to log2(mlfreq)  */
-    U32  offCodeSumBasePrice;    /* to compare to log2(offreq)  */
-    ZSTD_OptPrice_e priceType;   /* prices can be determined dynamically, or follow a pre-defined cost structure */
-    const ZSTD_entropyCTables_t* symbolCosts;  /* pre-calculated dictionary statistics */
-    ZSTD_literalCompressionMode_e literalCompressionMode;
-} optState_t;
-
-typedef struct {
-  ZSTD_entropyCTables_t entropy;
-  U32 rep[ZSTD_REP_NUM];
-} ZSTD_compressedBlockState_t;
-
-typedef struct {
-    BYTE const* nextSrc;    /* next block here to continue on current prefix */
-    BYTE const* base;       /* All regular indexes relative to this position */
-    BYTE const* dictBase;   /* extDict indexes relative to this position */
-    U32 dictLimit;          /* below that point, need extDict */
-    U32 lowLimit;           /* below that point, no more valid data */
-} ZSTD_window_t;
-
-typedef struct ZSTD_matchState_t ZSTD_matchState_t;
-struct ZSTD_matchState_t {
-    ZSTD_window_t window;   /* State for window round buffer management */
-    U32 loadedDictEnd;      /* index of end of dictionary, within context's referential. When dict referential is copied into active context (i.e. not attached), effectively same value as dictSize, since referential starts from zero */
-    U32 nextToUpdate;       /* index from which to continue table update */
-    U32 hashLog3;           /* dispatch table : larger == faster, more memory */
-    U32* hashTable;
-    U32* hashTable3;
-    U32* chainTable;
-    optState_t opt;         /* optimal parser state */
-    const ZSTD_matchState_t* dictMatchState;
-    ZSTD_compressionParameters cParams;
-};
-
-typedef struct {
-    ZSTD_compressedBlockState_t* prevCBlock;
-    ZSTD_compressedBlockState_t* nextCBlock;
-    ZSTD_matchState_t matchState;
-} ZSTD_blockState_t;
-
-typedef struct {
-    U32 offset;
-    U32 checksum;
-} ldmEntry_t;
-
-typedef struct {
-    ZSTD_window_t window;   /* State for the window round buffer management */
-    ldmEntry_t* hashTable;
-    BYTE* bucketOffsets;    /* Next position in bucket to insert entry */
-    U64 hashPower;          /* Used to compute the rolling hash.
-                             * Depends on ldmParams.minMatchLength */
-} ldmState_t;
-
-typedef struct {
-    U32 enableLdm;          /* 1 if enable long distance matching */
-    U32 hashLog;            /* Log size of hashTable */
-    U32 bucketSizeLog;      /* Log bucket size for collision resolution, at most 8 */
-    U32 minMatchLength;     /* Minimum match length */
-    U32 hashRateLog;       /* Log number of entries to skip */
-    U32 windowLog;          /* Window log for the LDM */
-} ldmParams_t;
-
-typedef struct {
-    U32 offset;
-    U32 litLength;
-    U32 matchLength;
-} rawSeq;
-
-typedef struct {
-  rawSeq* seq;     /* The start of the sequences */
-  size_t pos;      /* The position where reading stopped. <= size. */
-  size_t size;     /* The number of sequences. <= capacity. */
-  size_t capacity; /* The capacity starting from `seq` pointer */
-} rawSeqStore_t;
-
-struct ZSTD_CCtx_params_s {
-    ZSTD_format_e format;
-    ZSTD_compressionParameters cParams;
-    ZSTD_frameParameters fParams;
-
-    int compressionLevel;
-    int forceWindow;           /* force back-references to respect limit of
-                                * 1<<wLog, even for dictionary */
-    size_t targetCBlockSize;   /* Tries to fit compressed block size to be around targetCBlockSize.
-                                * No target when targetCBlockSize == 0.
-                                * There is no guarantee on compressed block size */
-
-    ZSTD_dictAttachPref_e attachDictPref;
-    ZSTD_literalCompressionMode_e literalCompressionMode;
-
-    /* Multithreading: used to pass parameters to mtctx */
-    int nbWorkers;
-    size_t jobSize;
-    int overlapLog;
-    int rsyncable;
-
-    /* Long distance matching parameters */
-    ldmParams_t ldmParams;
-
-    /* Internal use, for createCCtxParams() and freeCCtxParams() only */
-    ZSTD_customMem customMem;
-};  /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
-
-struct ZSTD_CCtx_s {
-    ZSTD_compressionStage_e stage;
-    int cParamsChanged;                  /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */
-    int bmi2;                            /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
-    ZSTD_CCtx_params requestedParams;
-    ZSTD_CCtx_params appliedParams;
-    U32   dictID;
-
-    int workSpaceOversizedDuration;
-    void* workSpace;
-    size_t workSpaceSize;
-    size_t blockSize;
-    unsigned long long pledgedSrcSizePlusOne;  /* this way, 0 (default) == unknown */
-    unsigned long long consumedSrcSize;
-    unsigned long long producedCSize;
-    XXH64_state_t xxhState;
-    ZSTD_customMem customMem;
-    size_t staticSize;
-
-    seqStore_t seqStore;      /* sequences storage ptrs */
-    ldmState_t ldmState;      /* long distance matching state */
-    rawSeq* ldmSequences;     /* Storage for the ldm output sequences */
-    size_t maxNbLdmSequences;
-    rawSeqStore_t externSeqStore; /* Mutable reference to external sequences */
-    ZSTD_blockState_t blockState;
-    U32* entropyWorkspace;  /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
-
-    /* streaming */
-    char*  inBuff;
-    size_t inBuffSize;
-    size_t inToCompress;
-    size_t inBuffPos;
-    size_t inBuffTarget;
-    char*  outBuff;
-    size_t outBuffSize;
-    size_t outBuffContentSize;
-    size_t outBuffFlushedSize;
-    ZSTD_cStreamStage streamStage;
-    U32    frameEnded;
-
-    /* Dictionary */
-    ZSTD_localDict localDict;
-    const ZSTD_CDict* cdict;
-    ZSTD_prefixDict prefixDict;   /* single-usage dictionary */
-
-    /* Multi-threading */
-#ifdef ZSTD_MULTITHREAD
-    ZSTDMT_CCtx* mtctx;
-#endif
-};
-
-typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e;
-
-typedef enum { ZSTD_noDict = 0, ZSTD_extDict = 1, ZSTD_dictMatchState = 2 } ZSTD_dictMode_e;
-
-
-typedef size_t (*ZSTD_blockCompressor) (
-        ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize);
-ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode);
-
-
-MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
-{
-    static const BYTE LL_Code[64] = {  0,  1,  2,  3,  4,  5,  6,  7,
-                                       8,  9, 10, 11, 12, 13, 14, 15,
-                                      16, 16, 17, 17, 18, 18, 19, 19,
-                                      20, 20, 20, 20, 21, 21, 21, 21,
-                                      22, 22, 22, 22, 22, 22, 22, 22,
-                                      23, 23, 23, 23, 23, 23, 23, 23,
-                                      24, 24, 24, 24, 24, 24, 24, 24,
-                                      24, 24, 24, 24, 24, 24, 24, 24 };
-    static const U32 LL_deltaCode = 19;
-    return (litLength > 63) ? ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
-}
-
-/* ZSTD_MLcode() :
- * note : mlBase = matchLength - MINMATCH;
- *        because it's the format it's stored in seqStore->sequences */
-MEM_STATIC U32 ZSTD_MLcode(U32 mlBase)
-{
-    static const BYTE ML_Code[128] = { 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
-                                      16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-                                      32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
-                                      38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
-                                      40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
-                                      41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
-                                      42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
-                                      42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 };
-    static const U32 ML_deltaCode = 36;
-    return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase];
-}
-
-/*! ZSTD_storeSeq() :
- *  Store a sequence (literal length, literals, offset code and match length code) into seqStore_t.
- *  `offsetCode` : distance to match + 3 (values 1-3 are repCodes).
- *  `mlBase` : matchLength - MINMATCH
-*/
-MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t mlBase)
-{
-#if defined(DEBUGLEVEL) && (DEBUGLEVEL >= 6)
-    static const BYTE* g_start = NULL;
-    if (g_start==NULL) g_start = (const BYTE*)literals;  /* note : index only works for compression within a single segment */
-    {   U32 const pos = (U32)((const BYTE*)literals - g_start);
-        DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u",
-               pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offsetCode);
-    }
-#endif
-    assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
-    /* copy Literals */
-    assert(seqStorePtr->maxNbLit <= 128 KB);
-    assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit);
-    ZSTD_wildcopy(seqStorePtr->lit, literals, litLength, ZSTD_no_overlap);
-    seqStorePtr->lit += litLength;
-
-    /* literal Length */
-    if (litLength>0xFFFF) {
-        assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
-        seqStorePtr->longLengthID = 1;
-        seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
-    }
-    seqStorePtr->sequences[0].litLength = (U16)litLength;
-
-    /* match offset */
-    seqStorePtr->sequences[0].offset = offsetCode + 1;
-
-    /* match Length */
-    if (mlBase>0xFFFF) {
-        assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
-        seqStorePtr->longLengthID = 2;
-        seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
-    }
-    seqStorePtr->sequences[0].matchLength = (U16)mlBase;
-
-    seqStorePtr->sequences++;
-}
-
-
-/*-*************************************
-*  Match length counter
-***************************************/
-static unsigned ZSTD_NbCommonBytes (size_t val)
-{
-    if (MEM_isLittleEndian()) {
-        if (MEM_64bits()) {
-#       if defined(_MSC_VER) && defined(_WIN64)
-            unsigned long r = 0;
-            _BitScanForward64( &r, (U64)val );
-            return (unsigned)(r>>3);
-#       elif defined(__GNUC__) && (__GNUC__ >= 4)
-            return (__builtin_ctzll((U64)val) >> 3);
-#       else
-            static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
-                                                     0, 3, 1, 3, 1, 4, 2, 7,
-                                                     0, 2, 3, 6, 1, 5, 3, 5,
-                                                     1, 3, 4, 4, 2, 5, 6, 7,
-                                                     7, 0, 1, 2, 3, 3, 4, 6,
-                                                     2, 6, 5, 5, 3, 4, 5, 6,
-                                                     7, 1, 2, 4, 6, 4, 4, 5,
-                                                     7, 2, 6, 5, 7, 6, 7, 7 };
-            return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
-#       endif
-        } else { /* 32 bits */
-#       if defined(_MSC_VER)
-            unsigned long r=0;
-            _BitScanForward( &r, (U32)val );
-            return (unsigned)(r>>3);
-#       elif defined(__GNUC__) && (__GNUC__ >= 3)
-            return (__builtin_ctz((U32)val) >> 3);
-#       else
-            static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
-                                                     3, 2, 2, 1, 3, 2, 0, 1,
-                                                     3, 3, 1, 2, 2, 2, 2, 0,
-                                                     3, 1, 2, 0, 1, 0, 1, 1 };
-            return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
-#       endif
-        }
-    } else {  /* Big Endian CPU */
-        if (MEM_64bits()) {
-#       if defined(_MSC_VER) && defined(_WIN64)
-            unsigned long r = 0;
-            _BitScanReverse64( &r, val );
-            return (unsigned)(r>>3);
-#       elif defined(__GNUC__) && (__GNUC__ >= 4)
-            return (__builtin_clzll(val) >> 3);
-#       else
-            unsigned r;
-            const unsigned n32 = sizeof(size_t)*4;   /* calculate this way due to compiler complaining in 32-bits mode */
-            if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
-            if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
-            r += (!val);
-            return r;
-#       endif
-        } else { /* 32 bits */
-#       if defined(_MSC_VER)
-            unsigned long r = 0;
-            _BitScanReverse( &r, (unsigned long)val );
-            return (unsigned)(r>>3);
-#       elif defined(__GNUC__) && (__GNUC__ >= 3)
-            return (__builtin_clz((U32)val) >> 3);
-#       else
-            unsigned r;
-            if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
-            r += (!val);
-            return r;
-#       endif
-    }   }
-}
-
-
-MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
-{
-    const BYTE* const pStart = pIn;
-    const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1);
-
-    if (pIn < pInLoopLimit) {
-        { size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
-          if (diff) return ZSTD_NbCommonBytes(diff); }
-        pIn+=sizeof(size_t); pMatch+=sizeof(size_t);
-        while (pIn < pInLoopLimit) {
-            size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
-            if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; }
-            pIn += ZSTD_NbCommonBytes(diff);
-            return (size_t)(pIn - pStart);
-    }   }
-    if (MEM_64bits() && (pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; }
-    if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; }
-    if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
-    return (size_t)(pIn - pStart);
-}
-
-/** ZSTD_count_2segments() :
- *  can count match length with `ip` & `match` in 2 different segments.
- *  convention : on reaching mEnd, match count continue starting from iStart
- */
-MEM_STATIC size_t
-ZSTD_count_2segments(const BYTE* ip, const BYTE* match,
-                     const BYTE* iEnd, const BYTE* mEnd, const BYTE* iStart)
-{
-    const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd);
-    size_t const matchLength = ZSTD_count(ip, match, vEnd);
-    if (match + matchLength != mEnd) return matchLength;
-    DEBUGLOG(7, "ZSTD_count_2segments: found a 2-parts match (current length==%zu)", matchLength);
-    DEBUGLOG(7, "distance from match beginning to end dictionary = %zi", mEnd - match);
-    DEBUGLOG(7, "distance from current pos to end buffer = %zi", iEnd - ip);
-    DEBUGLOG(7, "next byte : ip==%02X, istart==%02X", ip[matchLength], *iStart);
-    DEBUGLOG(7, "final match length = %zu", matchLength + ZSTD_count(ip+matchLength, iStart, iEnd));
-    return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
-}
-
-
-/*-*************************************
- *  Hashes
- ***************************************/
-static const U32 prime3bytes = 506832829U;
-static U32    ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes)  >> (32-h) ; }
-MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */
-
-static const U32 prime4bytes = 2654435761U;
-static U32    ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
-static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); }
-
-static const U64 prime5bytes = 889523592379ULL;
-static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u  << (64-40)) * prime5bytes) >> (64-h)) ; }
-static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); }
-
-static const U64 prime6bytes = 227718039650203ULL;
-static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u  << (64-48)) * prime6bytes) >> (64-h)) ; }
-static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
-
-static const U64 prime7bytes = 58295818150454627ULL;
-static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u  << (64-56)) * prime7bytes) >> (64-h)) ; }
-static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); }
-
-static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
-static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
-static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
-
-MEM_STATIC size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
-{
-    switch(mls)
-    {
-    default:
-    case 4: return ZSTD_hash4Ptr(p, hBits);
-    case 5: return ZSTD_hash5Ptr(p, hBits);
-    case 6: return ZSTD_hash6Ptr(p, hBits);
-    case 7: return ZSTD_hash7Ptr(p, hBits);
-    case 8: return ZSTD_hash8Ptr(p, hBits);
-    }
-}
-
-/** ZSTD_ipow() :
- * Return base^exponent.
- */
-static U64 ZSTD_ipow(U64 base, U64 exponent)
-{
-    U64 power = 1;
-    while (exponent) {
-      if (exponent & 1) power *= base;
-      exponent >>= 1;
-      base *= base;
-    }
-    return power;
-}
-
-#define ZSTD_ROLL_HASH_CHAR_OFFSET 10
-
-/** ZSTD_rollingHash_append() :
- * Add the buffer to the hash value.
- */
-static U64 ZSTD_rollingHash_append(U64 hash, void const* buf, size_t size)
-{
-    BYTE const* istart = (BYTE const*)buf;
-    size_t pos;
-    for (pos = 0; pos < size; ++pos) {
-        hash *= prime8bytes;
-        hash += istart[pos] + ZSTD_ROLL_HASH_CHAR_OFFSET;
-    }
-    return hash;
-}
-
-/** ZSTD_rollingHash_compute() :
- * Compute the rolling hash value of the buffer.
- */
-MEM_STATIC U64 ZSTD_rollingHash_compute(void const* buf, size_t size)
-{
-    return ZSTD_rollingHash_append(0, buf, size);
-}
-
-/** ZSTD_rollingHash_primePower() :
- * Compute the primePower to be passed to ZSTD_rollingHash_rotate() for a hash
- * over a window of length bytes.
- */
-MEM_STATIC U64 ZSTD_rollingHash_primePower(U32 length)
-{
-    return ZSTD_ipow(prime8bytes, length - 1);
-}
-
-/** ZSTD_rollingHash_rotate() :
- * Rotate the rolling hash by one byte.
- */
-MEM_STATIC U64 ZSTD_rollingHash_rotate(U64 hash, BYTE toRemove, BYTE toAdd, U64 primePower)
-{
-    hash -= (toRemove + ZSTD_ROLL_HASH_CHAR_OFFSET) * primePower;
-    hash *= prime8bytes;
-    hash += toAdd + ZSTD_ROLL_HASH_CHAR_OFFSET;
-    return hash;
-}
-
-/*-*************************************
-*  Round buffer management
-***************************************/
-#if (ZSTD_WINDOWLOG_MAX_64 > 31)
-# error "ZSTD_WINDOWLOG_MAX is too large : would overflow ZSTD_CURRENT_MAX"
-#endif
-/* Max current allowed */
-#define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX))
-/* Maximum chunk size before overflow correction needs to be called again */
-#define ZSTD_CHUNKSIZE_MAX                                                     \
-    ( ((U32)-1)                  /* Maximum ending current index */            \
-    - ZSTD_CURRENT_MAX)          /* Maximum beginning lowLimit */
-
-/**
- * ZSTD_window_clear():
- * Clears the window containing the history by simply setting it to empty.
- */
-MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window)
-{
-    size_t const endT = (size_t)(window->nextSrc - window->base);
-    U32 const end = (U32)endT;
-
-    window->lowLimit = end;
-    window->dictLimit = end;
-}
-
-/**
- * ZSTD_window_hasExtDict():
- * Returns non-zero if the window has a non-empty extDict.
- */
-MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window)
-{
-    return window.lowLimit < window.dictLimit;
-}
-
-/**
- * ZSTD_matchState_dictMode():
- * Inspects the provided matchState and figures out what dictMode should be
- * passed to the compressor.
- */
-MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms)
-{
-    return ZSTD_window_hasExtDict(ms->window) ?
-        ZSTD_extDict :
-        ms->dictMatchState != NULL ?
-            ZSTD_dictMatchState :
-            ZSTD_noDict;
-}
-
-/**
- * ZSTD_window_needOverflowCorrection():
- * Returns non-zero if the indices are getting too large and need overflow
- * protection.
- */
-MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window,
-                                                  void const* srcEnd)
-{
-    U32 const current = (U32)((BYTE const*)srcEnd - window.base);
-    return current > ZSTD_CURRENT_MAX;
-}
-
-/**
- * ZSTD_window_correctOverflow():
- * Reduces the indices to protect from index overflow.
- * Returns the correction made to the indices, which must be applied to every
- * stored index.
- *
- * The least significant cycleLog bits of the indices must remain the same,
- * which may be 0. Every index up to maxDist in the past must be valid.
- * NOTE: (maxDist & cycleMask) must be zero.
- */
-MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
-                                           U32 maxDist, void const* src)
-{
-    /* preemptive overflow correction:
-     * 1. correction is large enough:
-     *    lowLimit > (3<<29) ==> current > 3<<29 + 1<<windowLog
-     *    1<<windowLog <= newCurrent < 1<<chainLog + 1<<windowLog
-     *
-     *    current - newCurrent
-     *    > (3<<29 + 1<<windowLog) - (1<<windowLog + 1<<chainLog)
-     *    > (3<<29) - (1<<chainLog)
-     *    > (3<<29) - (1<<30)             (NOTE: chainLog <= 30)
-     *    > 1<<29
-     *
-     * 2. (ip+ZSTD_CHUNKSIZE_MAX - cctx->base) doesn't overflow:
-     *    After correction, current is less than (1<<chainLog + 1<<windowLog).
-     *    In 64-bit mode we are safe, because we have 64-bit ptrdiff_t.
-     *    In 32-bit mode we are safe, because (chainLog <= 29), so
-     *    ip+ZSTD_CHUNKSIZE_MAX - cctx->base < 1<<32.
-     * 3. (cctx->lowLimit + 1<<windowLog) < 1<<32:
-     *    windowLog <= 31 ==> 3<<29 + 1<<windowLog < 7<<29 < 1<<32.
-     */
-    U32 const cycleMask = (1U << cycleLog) - 1;
-    U32 const current = (U32)((BYTE const*)src - window->base);
-    U32 const newCurrent = (current & cycleMask) + maxDist;
-    U32 const correction = current - newCurrent;
-    assert((maxDist & cycleMask) == 0);
-    assert(current > newCurrent);
-    /* Loose bound, should be around 1<<29 (see above) */
-    assert(correction > 1<<28);
-
-    window->base += correction;
-    window->dictBase += correction;
-    window->lowLimit -= correction;
-    window->dictLimit -= correction;
-
-    DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x", correction,
-             window->lowLimit);
-    return correction;
-}
-
-/**
- * ZSTD_window_enforceMaxDist():
- * Updates lowLimit so that:
- *    (srcEnd - base) - lowLimit == maxDist + loadedDictEnd
- *
- * It ensures index is valid as long as index >= lowLimit.
- * This must be called before a block compression call.
- *
- * loadedDictEnd is only defined if a dictionary is in use for current compression.
- * As the name implies, loadedDictEnd represents the index at end of dictionary.
- * The value lies within context's referential, it can be directly compared to blockEndIdx.
- *
- * If loadedDictEndPtr is NULL, no dictionary is in use, and we use loadedDictEnd == 0.
- * If loadedDictEndPtr is not NULL, we set it to zero after updating lowLimit.
- * This is because dictionaries are allowed to be referenced fully
- * as long as the last byte of the dictionary is in the window.
- * Once input has progressed beyond window size, dictionary cannot be referenced anymore.
- *
- * In normal dict mode, the dictionary lies between lowLimit and dictLimit.
- * In dictMatchState mode, lowLimit and dictLimit are the same,
- * and the dictionary is below them.
- * forceWindow and dictMatchState are therefore incompatible.
- */
-MEM_STATIC void
-ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
-                     const void* blockEnd,
-                           U32   maxDist,
-                           U32*  loadedDictEndPtr,
-                     const ZSTD_matchState_t** dictMatchStatePtr)
-{
-    U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
-    U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;
-    DEBUGLOG(5, "ZSTD_window_enforceMaxDist: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
-                (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
-
-    /* - When there is no dictionary : loadedDictEnd == 0.
-         In which case, the test (blockEndIdx > maxDist) is merely to avoid
-         overflowing next operation `newLowLimit = blockEndIdx - maxDist`.
-       - When there is a standard dictionary :
-         Index referential is copied from the dictionary,
-         which means it starts from 0.
-         In which case, loadedDictEnd == dictSize,
-         and it makes sense to compare `blockEndIdx > maxDist + dictSize`
-         since `blockEndIdx` also starts from zero.
-       - When there is an attached dictionary :
-         loadedDictEnd is expressed within the referential of the context,
-         so it can be directly compared against blockEndIdx.
-    */
-    if (blockEndIdx > maxDist + loadedDictEnd) {
-        U32 const newLowLimit = blockEndIdx - maxDist;
-        if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit;
-        if (window->dictLimit < window->lowLimit) {
-            DEBUGLOG(5, "Update dictLimit to match lowLimit, from %u to %u",
-                        (unsigned)window->dictLimit, (unsigned)window->lowLimit);
-            window->dictLimit = window->lowLimit;
-        }
-        /* On reaching window size, dictionaries are invalidated */
-        if (loadedDictEndPtr) *loadedDictEndPtr = 0;
-        if (dictMatchStatePtr) *dictMatchStatePtr = NULL;
-    }
-}
-
-/* Similar to ZSTD_window_enforceMaxDist(),
- * but only invalidates dictionary
- * when input progresses beyond window size. */
-MEM_STATIC void
-ZSTD_checkDictValidity(ZSTD_window_t* window,
-                       const void* blockEnd,
-                             U32   maxDist,
-                             U32*  loadedDictEndPtr,
-                       const ZSTD_matchState_t** dictMatchStatePtr)
-{
-    U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
-    U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;
-    DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
-                (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
-
-    if (loadedDictEnd && (blockEndIdx > maxDist + loadedDictEnd)) {
-        /* On reaching window size, dictionaries are invalidated */
-        if (loadedDictEndPtr) *loadedDictEndPtr = 0;
-        if (dictMatchStatePtr) *dictMatchStatePtr = NULL;
-    }
-}
-
-/**
- * ZSTD_window_update():
- * Updates the window by appending [src, src + srcSize) to the window.
- * If it is not contiguous, the current prefix becomes the extDict, and we
- * forget about the extDict. Handles overlap of the prefix and extDict.
- * Returns non-zero if the segment is contiguous.
- */
-MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
-                                  void const* src, size_t srcSize)
-{
-    BYTE const* const ip = (BYTE const*)src;
-    U32 contiguous = 1;
-    DEBUGLOG(5, "ZSTD_window_update");
-    /* Check if blocks follow each other */
-    if (src != window->nextSrc) {
-        /* not contiguous */
-        size_t const distanceFromBase = (size_t)(window->nextSrc - window->base);
-        DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u", window->dictLimit);
-        window->lowLimit = window->dictLimit;
-        assert(distanceFromBase == (size_t)(U32)distanceFromBase);  /* should never overflow */
-        window->dictLimit = (U32)distanceFromBase;
-        window->dictBase = window->base;
-        window->base = ip - distanceFromBase;
-        // ms->nextToUpdate = window->dictLimit;
-        if (window->dictLimit - window->lowLimit < HASH_READ_SIZE) window->lowLimit = window->dictLimit;   /* too small extDict */
-        contiguous = 0;
-    }
-    window->nextSrc = ip + srcSize;
-    /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
-    if ( (ip+srcSize > window->dictBase + window->lowLimit)
-       & (ip < window->dictBase + window->dictLimit)) {
-        ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase;
-        U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx;
-        window->lowLimit = lowLimitMax;
-        DEBUGLOG(5, "Overlapping extDict and input : new lowLimit = %u", window->lowLimit);
-    }
-    return contiguous;
-}
-
-
-/* debug functions */
-#if (DEBUGLEVEL>=2)
-
-MEM_STATIC double ZSTD_fWeight(U32 rawStat)
-{
-    U32 const fp_accuracy = 8;
-    U32 const fp_multiplier = (1 << fp_accuracy);
-    U32 const newStat = rawStat + 1;
-    U32 const hb = ZSTD_highbit32(newStat);
-    U32 const BWeight = hb * fp_multiplier;
-    U32 const FWeight = (newStat << fp_accuracy) >> hb;
-    U32 const weight = BWeight + FWeight;
-    assert(hb + fp_accuracy < 31);
-    return (double)weight / fp_multiplier;
-}
-
-/* display a table content,
- * listing each element, its frequency, and its predicted bit cost */
-MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max)
-{
-    unsigned u, sum;
-    for (u=0, sum=0; u<=max; u++) sum += table[u];
-    DEBUGLOG(2, "total nb elts: %u", sum);
-    for (u=0; u<=max; u++) {
-        DEBUGLOG(2, "%2u: %5u  (%.2f)",
-                u, table[u], ZSTD_fWeight(sum) - ZSTD_fWeight(table[u]) );
-    }
-}
-
-#endif
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-
-/* ==============================================================
- * Private declarations
- * These prototypes shall only be called from within lib/compress
- * ============================================================== */
-
-/* ZSTD_getCParamsFromCCtxParams() :
- * cParams are built depending on compressionLevel, src size hints,
- * LDM and manually set compression parameters.
- */
-ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
-        const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize);
-
-/*! ZSTD_initCStream_internal() :
- *  Private use only. Init streaming operation.
- *  expects params to be valid.
- *  must receive dict, or cdict, or none, but not both.
- *  @return : 0, or an error code */
-size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
-                     const void* dict, size_t dictSize,
-                     const ZSTD_CDict* cdict,
-                     ZSTD_CCtx_params  params, unsigned long long pledgedSrcSize);
-
-void ZSTD_resetSeqStore(seqStore_t* ssPtr);
-
-/*! ZSTD_getCParamsFromCDict() :
- *  as the name implies */
-ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict);
-
-/* ZSTD_compressBegin_advanced_internal() :
- * Private use only. To be called from zstdmt_compress.c. */
-size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
-                                    const void* dict, size_t dictSize,
-                                    ZSTD_dictContentType_e dictContentType,
-                                    ZSTD_dictTableLoadMethod_e dtlm,
-                                    const ZSTD_CDict* cdict,
-                                    ZSTD_CCtx_params params,
-                                    unsigned long long pledgedSrcSize);
-
-/* ZSTD_compress_advanced_internal() :
- * Private use only. To be called from zstdmt_compress.c. */
-size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx,
-                                       void* dst, size_t dstCapacity,
-                                 const void* src, size_t srcSize,
-                                 const void* dict,size_t dictSize,
-                                 ZSTD_CCtx_params params);
-
-
-/* ZSTD_writeLastEmptyBlock() :
- * output an empty Block with end-of-frame mark to complete a frame
- * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
- *           or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
- */
-size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity);
-
-
-/* ZSTD_referenceExternalSequences() :
- * Must be called before starting a compression operation.
- * seqs must parse a prefix of the source.
- * This cannot be used when long range matching is enabled.
- * Zstd will use these sequences, and pass the literals to a secondary block
- * compressor.
- * @return : An error code on failure.
- * NOTE: seqs are not verified! Invalid sequences can cause out-of-bounds memory
- * access and data corruption.
- */
-size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq);
-
-
-#endif /* ZSTD_COMPRESS_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_ddict.c b/vendor/github.com/DataDog/zstd/zstd_ddict.c
deleted file mode 100644
index 0af3d23..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_ddict.c
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-/* zstd_ddict.c :
- * concentrates all logic that needs to know the internals of ZSTD_DDict object */
-
-/*-*******************************************************
-*  Dependencies
-*********************************************************/
-#include <string.h>      /* memcpy, memmove, memset */
-#include "cpu.h"         /* bmi2 */
-#include "mem.h"         /* low level memory routines */
-#define FSE_STATIC_LINKING_ONLY
-#include "fse.h"
-#define HUF_STATIC_LINKING_ONLY
-#include "huf.h"
-#include "zstd_decompress_internal.h"
-#include "zstd_ddict.h"
-
-#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
-#  include "zstd_legacy.h"
-#endif
-
-
-
-/*-*******************************************************
-*  Types
-*********************************************************/
-struct ZSTD_DDict_s {
-    void* dictBuffer;
-    const void* dictContent;
-    size_t dictSize;
-    ZSTD_entropyDTables_t entropy;
-    U32 dictID;
-    U32 entropyPresent;
-    ZSTD_customMem cMem;
-};  /* typedef'd to ZSTD_DDict within "zstd.h" */
-
-const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict)
-{
-    assert(ddict != NULL);
-    return ddict->dictContent;
-}
-
-size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict)
-{
-    assert(ddict != NULL);
-    return ddict->dictSize;
-}
-
-void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
-{
-    DEBUGLOG(4, "ZSTD_copyDDictParameters");
-    assert(dctx != NULL);
-    assert(ddict != NULL);
-    dctx->dictID = ddict->dictID;
-    dctx->prefixStart = ddict->dictContent;
-    dctx->virtualStart = ddict->dictContent;
-    dctx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize;
-    dctx->previousDstEnd = dctx->dictEnd;
-    if (ddict->entropyPresent) {
-        dctx->litEntropy = 1;
-        dctx->fseEntropy = 1;
-        dctx->LLTptr = ddict->entropy.LLTable;
-        dctx->MLTptr = ddict->entropy.MLTable;
-        dctx->OFTptr = ddict->entropy.OFTable;
-        dctx->HUFptr = ddict->entropy.hufTable;
-        dctx->entropy.rep[0] = ddict->entropy.rep[0];
-        dctx->entropy.rep[1] = ddict->entropy.rep[1];
-        dctx->entropy.rep[2] = ddict->entropy.rep[2];
-    } else {
-        dctx->litEntropy = 0;
-        dctx->fseEntropy = 0;
-    }
-}
-
-
-static size_t
-ZSTD_loadEntropy_intoDDict(ZSTD_DDict* ddict,
-                           ZSTD_dictContentType_e dictContentType)
-{
-    ddict->dictID = 0;
-    ddict->entropyPresent = 0;
-    if (dictContentType == ZSTD_dct_rawContent) return 0;
-
-    if (ddict->dictSize < 8) {
-        if (dictContentType == ZSTD_dct_fullDict)
-            return ERROR(dictionary_corrupted);   /* only accept specified dictionaries */
-        return 0;   /* pure content mode */
-    }
-    {   U32 const magic = MEM_readLE32(ddict->dictContent);
-        if (magic != ZSTD_MAGIC_DICTIONARY) {
-            if (dictContentType == ZSTD_dct_fullDict)
-                return ERROR(dictionary_corrupted);   /* only accept specified dictionaries */
-            return 0;   /* pure content mode */
-        }
-    }
-    ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_FRAMEIDSIZE);
-
-    /* load entropy tables */
-    RETURN_ERROR_IF(ZSTD_isError(ZSTD_loadDEntropy(
-            &ddict->entropy, ddict->dictContent, ddict->dictSize)),
-        dictionary_corrupted);
-    ddict->entropyPresent = 1;
-    return 0;
-}
-
-
-static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict,
-                                      const void* dict, size_t dictSize,
-                                      ZSTD_dictLoadMethod_e dictLoadMethod,
-                                      ZSTD_dictContentType_e dictContentType)
-{
-    if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dict) || (!dictSize)) {
-        ddict->dictBuffer = NULL;
-        ddict->dictContent = dict;
-        if (!dict) dictSize = 0;
-    } else {
-        void* const internalBuffer = ZSTD_malloc(dictSize, ddict->cMem);
-        ddict->dictBuffer = internalBuffer;
-        ddict->dictContent = internalBuffer;
-        if (!internalBuffer) return ERROR(memory_allocation);
-        memcpy(internalBuffer, dict, dictSize);
-    }
-    ddict->dictSize = dictSize;
-    ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001);  /* cover both little and big endian */
-
-    /* parse dictionary content */
-    FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) );
-
-    return 0;
-}
-
-ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
-                                      ZSTD_dictLoadMethod_e dictLoadMethod,
-                                      ZSTD_dictContentType_e dictContentType,
-                                      ZSTD_customMem customMem)
-{
-    if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
-
-    {   ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem);
-        if (ddict == NULL) return NULL;
-        ddict->cMem = customMem;
-        {   size_t const initResult = ZSTD_initDDict_internal(ddict,
-                                            dict, dictSize,
-                                            dictLoadMethod, dictContentType);
-            if (ZSTD_isError(initResult)) {
-                ZSTD_freeDDict(ddict);
-                return NULL;
-        }   }
-        return ddict;
-    }
-}
-
-/*! ZSTD_createDDict() :
-*   Create a digested dictionary, to start decompression without startup delay.
-*   `dict` content is copied inside DDict.
-*   Consequently, `dict` can be released after `ZSTD_DDict` creation */
-ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize)
-{
-    ZSTD_customMem const allocator = { NULL, NULL, NULL };
-    return ZSTD_createDDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, allocator);
-}
-
-/*! ZSTD_createDDict_byReference() :
- *  Create a digested dictionary, to start decompression without startup delay.
- *  Dictionary content is simply referenced, it will be accessed during decompression.
- *  Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */
-ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize)
-{
-    ZSTD_customMem const allocator = { NULL, NULL, NULL };
-    return ZSTD_createDDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, allocator);
-}
-
-
-const ZSTD_DDict* ZSTD_initStaticDDict(
-                                void* sBuffer, size_t sBufferSize,
-                                const void* dict, size_t dictSize,
-                                ZSTD_dictLoadMethod_e dictLoadMethod,
-                                ZSTD_dictContentType_e dictContentType)
-{
-    size_t const neededSpace = sizeof(ZSTD_DDict)
-                             + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
-    ZSTD_DDict* const ddict = (ZSTD_DDict*)sBuffer;
-    assert(sBuffer != NULL);
-    assert(dict != NULL);
-    if ((size_t)sBuffer & 7) return NULL;   /* 8-aligned */
-    if (sBufferSize < neededSpace) return NULL;
-    if (dictLoadMethod == ZSTD_dlm_byCopy) {
-        memcpy(ddict+1, dict, dictSize);  /* local copy */
-        dict = ddict+1;
-    }
-    if (ZSTD_isError( ZSTD_initDDict_internal(ddict,
-                                              dict, dictSize,
-                                              ZSTD_dlm_byRef, dictContentType) ))
-        return NULL;
-    return ddict;
-}
-
-
-size_t ZSTD_freeDDict(ZSTD_DDict* ddict)
-{
-    if (ddict==NULL) return 0;   /* support free on NULL */
-    {   ZSTD_customMem const cMem = ddict->cMem;
-        ZSTD_free(ddict->dictBuffer, cMem);
-        ZSTD_free(ddict, cMem);
-        return 0;
-    }
-}
-
-/*! ZSTD_estimateDDictSize() :
- *  Estimate amount of memory that will be needed to create a dictionary for decompression.
- *  Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */
-size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod)
-{
-    return sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
-}
-
-size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict)
-{
-    if (ddict==NULL) return 0;   /* support sizeof on NULL */
-    return sizeof(*ddict) + (ddict->dictBuffer ? ddict->dictSize : 0) ;
-}
-
-/*! ZSTD_getDictID_fromDDict() :
- *  Provides the dictID of the dictionary loaded into `ddict`.
- *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
- *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
-unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict)
-{
-    if (ddict==NULL) return 0;
-    return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);
-}
diff --git a/vendor/github.com/DataDog/zstd/zstd_ddict.h b/vendor/github.com/DataDog/zstd/zstd_ddict.h
deleted file mode 100644
index 0479d11..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_ddict.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-
-#ifndef ZSTD_DDICT_H
-#define ZSTD_DDICT_H
-
-/*-*******************************************************
- *  Dependencies
- *********************************************************/
-#include <stddef.h>   /* size_t */
-#include "zstd.h"     /* ZSTD_DDict, and several public functions */
-
-
-/*-*******************************************************
- *  Interface
- *********************************************************/
-
-/* note: several prototypes are already published in `zstd.h` :
- * ZSTD_createDDict()
- * ZSTD_createDDict_byReference()
- * ZSTD_createDDict_advanced()
- * ZSTD_freeDDict()
- * ZSTD_initStaticDDict()
- * ZSTD_sizeof_DDict()
- * ZSTD_estimateDDictSize()
- * ZSTD_getDictID_fromDict()
- */
-
-const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict);
-size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict);
-
-void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
-
-
-
-#endif /* ZSTD_DDICT_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_decompress.c b/vendor/github.com/DataDog/zstd/zstd_decompress.c
deleted file mode 100644
index e42872a..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_decompress.c
+++ /dev/null
@@ -1,1768 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-
-/* ***************************************************************
-*  Tuning parameters
-*****************************************************************/
-/*!
- * HEAPMODE :
- * Select how default decompression function ZSTD_decompress() allocates its context,
- * on stack (0), or into heap (1, default; requires malloc()).
- * Note that functions with explicit context such as ZSTD_decompressDCtx() are unaffected.
- */
-#ifndef ZSTD_HEAPMODE
-#  define ZSTD_HEAPMODE 1
-#endif
-
-/*!
-*  LEGACY_SUPPORT :
-*  if set to 1+, ZSTD_decompress() can decode older formats (v0.1+)
-*/
-#ifndef ZSTD_LEGACY_SUPPORT
-#  define ZSTD_LEGACY_SUPPORT 0
-#endif
-
-/*!
- *  MAXWINDOWSIZE_DEFAULT :
- *  maximum window size accepted by DStream __by default__.
- *  Frames requiring more memory will be rejected.
- *  It's possible to set a different limit using ZSTD_DCtx_setMaxWindowSize().
- */
-#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT
-#  define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) + 1)
-#endif
-
-/*!
- *  NO_FORWARD_PROGRESS_MAX :
- *  maximum allowed nb of calls to ZSTD_decompressStream()
- *  without any forward progress
- *  (defined as: no byte read from input, and no byte flushed to output)
- *  before triggering an error.
- */
-#ifndef ZSTD_NO_FORWARD_PROGRESS_MAX
-#  define ZSTD_NO_FORWARD_PROGRESS_MAX 16
-#endif
-
-
-/*-*******************************************************
-*  Dependencies
-*********************************************************/
-#include <string.h>      /* memcpy, memmove, memset */
-#include "cpu.h"         /* bmi2 */
-#include "mem.h"         /* low level memory routines */
-#define FSE_STATIC_LINKING_ONLY
-#include "fse.h"
-#define HUF_STATIC_LINKING_ONLY
-#include "huf.h"
-#include "zstd_internal.h"  /* blockProperties_t */
-#include "zstd_decompress_internal.h"   /* ZSTD_DCtx */
-#include "zstd_ddict.h"  /* ZSTD_DDictDictContent */
-#include "zstd_decompress_block.h"   /* ZSTD_decompressBlock_internal */
-
-#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
-#  include "zstd_legacy.h"
-#endif
-
-
-/*-*************************************************************
-*   Context management
-***************************************************************/
-size_t ZSTD_sizeof_DCtx (const ZSTD_DCtx* dctx)
-{
-    if (dctx==NULL) return 0;   /* support sizeof NULL */
-    return sizeof(*dctx)
-           + ZSTD_sizeof_DDict(dctx->ddictLocal)
-           + dctx->inBuffSize + dctx->outBuffSize;
-}
-
-size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); }
-
-
-static size_t ZSTD_startingInputLength(ZSTD_format_e format)
-{
-    size_t const startingInputLength = (format==ZSTD_f_zstd1_magicless) ?
-                    ZSTD_FRAMEHEADERSIZE_PREFIX - ZSTD_FRAMEIDSIZE :
-                    ZSTD_FRAMEHEADERSIZE_PREFIX;
-    ZSTD_STATIC_ASSERT(ZSTD_FRAMEHEADERSIZE_PREFIX >= ZSTD_FRAMEIDSIZE);
-    /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */
-    assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) );
-    return startingInputLength;
-}
-
-static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
-{
-    dctx->format = ZSTD_f_zstd1;  /* ZSTD_decompressBegin() invokes ZSTD_startingInputLength() with argument dctx->format */
-    dctx->staticSize  = 0;
-    dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
-    dctx->ddict       = NULL;
-    dctx->ddictLocal  = NULL;
-    dctx->dictEnd     = NULL;
-    dctx->ddictIsCold = 0;
-    dctx->dictUses = ZSTD_dont_use;
-    dctx->inBuff      = NULL;
-    dctx->inBuffSize  = 0;
-    dctx->outBuffSize = 0;
-    dctx->streamStage = zdss_init;
-    dctx->legacyContext = NULL;
-    dctx->previousLegacyVersion = 0;
-    dctx->noForwardProgress = 0;
-    dctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
-}
-
-ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize)
-{
-    ZSTD_DCtx* const dctx = (ZSTD_DCtx*) workspace;
-
-    if ((size_t)workspace & 7) return NULL;  /* 8-aligned */
-    if (workspaceSize < sizeof(ZSTD_DCtx)) return NULL;  /* minimum size */
-
-    ZSTD_initDCtx_internal(dctx);
-    dctx->staticSize = workspaceSize;
-    dctx->inBuff = (char*)(dctx+1);
-    return dctx;
-}
-
-ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
-{
-    if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
-
-    {   ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_malloc(sizeof(*dctx), customMem);
-        if (!dctx) return NULL;
-        dctx->customMem = customMem;
-        ZSTD_initDCtx_internal(dctx);
-        return dctx;
-    }
-}
-
-ZSTD_DCtx* ZSTD_createDCtx(void)
-{
-    DEBUGLOG(3, "ZSTD_createDCtx");
-    return ZSTD_createDCtx_advanced(ZSTD_defaultCMem);
-}
-
-static void ZSTD_clearDict(ZSTD_DCtx* dctx)
-{
-    ZSTD_freeDDict(dctx->ddictLocal);
-    dctx->ddictLocal = NULL;
-    dctx->ddict = NULL;
-    dctx->dictUses = ZSTD_dont_use;
-}
-
-size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
-{
-    if (dctx==NULL) return 0;   /* support free on NULL */
-    RETURN_ERROR_IF(dctx->staticSize, memory_allocation, "not compatible with static DCtx");
-    {   ZSTD_customMem const cMem = dctx->customMem;
-        ZSTD_clearDict(dctx);
-        ZSTD_free(dctx->inBuff, cMem);
-        dctx->inBuff = NULL;
-#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
-        if (dctx->legacyContext)
-            ZSTD_freeLegacyStreamContext(dctx->legacyContext, dctx->previousLegacyVersion);
-#endif
-        ZSTD_free(dctx, cMem);
-        return 0;
-    }
-}
-
-/* no longer useful */
-void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx)
-{
-    size_t const toCopy = (size_t)((char*)(&dstDCtx->inBuff) - (char*)dstDCtx);
-    memcpy(dstDCtx, srcDCtx, toCopy);  /* no need to copy workspace */
-}
-
-
-/*-*************************************************************
- *   Frame header decoding
- ***************************************************************/
-
-/*! ZSTD_isFrame() :
- *  Tells if the content of `buffer` starts with a valid Frame Identifier.
- *  Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
- *  Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
- *  Note 3 : Skippable Frame Identifiers are considered valid. */
-unsigned ZSTD_isFrame(const void* buffer, size_t size)
-{
-    if (size < ZSTD_FRAMEIDSIZE) return 0;
-    {   U32 const magic = MEM_readLE32(buffer);
-        if (magic == ZSTD_MAGICNUMBER) return 1;
-        if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
-    }
-#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
-    if (ZSTD_isLegacy(buffer, size)) return 1;
-#endif
-    return 0;
-}
-
-/** ZSTD_frameHeaderSize_internal() :
- *  srcSize must be large enough to reach header size fields.
- *  note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless.
- * @return : size of the Frame Header
- *           or an error code, which can be tested with ZSTD_isError() */
-static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format)
-{
-    size_t const minInputSize = ZSTD_startingInputLength(format);
-    RETURN_ERROR_IF(srcSize < minInputSize, srcSize_wrong);
-
-    {   BYTE const fhd = ((const BYTE*)src)[minInputSize-1];
-        U32 const dictID= fhd & 3;
-        U32 const singleSegment = (fhd >> 5) & 1;
-        U32 const fcsId = fhd >> 6;
-        return minInputSize + !singleSegment
-             + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId]
-             + (singleSegment && !fcsId);
-    }
-}
-
-/** ZSTD_frameHeaderSize() :
- *  srcSize must be >= ZSTD_frameHeaderSize_prefix.
- * @return : size of the Frame Header,
- *           or an error code (if srcSize is too small) */
-size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize)
-{
-    return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_f_zstd1);
-}
-
-
-/** ZSTD_getFrameHeader_advanced() :
- *  decode Frame Header, or require larger `srcSize`.
- *  note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless
- * @return : 0, `zfhPtr` is correctly filled,
- *          >0, `srcSize` is too small, value is wanted `srcSize` amount,
- *           or an error code, which can be tested using ZSTD_isError() */
-size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format)
-{
-    const BYTE* ip = (const BYTE*)src;
-    size_t const minInputSize = ZSTD_startingInputLength(format);
-
-    memset(zfhPtr, 0, sizeof(*zfhPtr));   /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */
-    if (srcSize < minInputSize) return minInputSize;
-    RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter");
-
-    if ( (format != ZSTD_f_zstd1_magicless)
-      && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) {
-        if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
-            /* skippable frame */
-            if (srcSize < ZSTD_SKIPPABLEHEADERSIZE)
-                return ZSTD_SKIPPABLEHEADERSIZE; /* magic number + frame length */
-            memset(zfhPtr, 0, sizeof(*zfhPtr));
-            zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE);
-            zfhPtr->frameType = ZSTD_skippableFrame;
-            return 0;
-        }
-        RETURN_ERROR(prefix_unknown);
-    }
-
-    /* ensure there is enough `srcSize` to fully read/decode frame header */
-    {   size_t const fhsize = ZSTD_frameHeaderSize_internal(src, srcSize, format);
-        if (srcSize < fhsize) return fhsize;
-        zfhPtr->headerSize = (U32)fhsize;
-    }
-
-    {   BYTE const fhdByte = ip[minInputSize-1];
-        size_t pos = minInputSize;
-        U32 const dictIDSizeCode = fhdByte&3;
-        U32 const checksumFlag = (fhdByte>>2)&1;
-        U32 const singleSegment = (fhdByte>>5)&1;
-        U32 const fcsID = fhdByte>>6;
-        U64 windowSize = 0;
-        U32 dictID = 0;
-        U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN;
-        RETURN_ERROR_IF((fhdByte & 0x08) != 0, frameParameter_unsupported,
-                        "reserved bits, must be zero");
-
-        if (!singleSegment) {
-            BYTE const wlByte = ip[pos++];
-            U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
-            RETURN_ERROR_IF(windowLog > ZSTD_WINDOWLOG_MAX, frameParameter_windowTooLarge);
-            windowSize = (1ULL << windowLog);
-            windowSize += (windowSize >> 3) * (wlByte&7);
-        }
-        switch(dictIDSizeCode)
-        {
-            default: assert(0);  /* impossible */
-            case 0 : break;
-            case 1 : dictID = ip[pos]; pos++; break;
-            case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break;
-            case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break;
-        }
-        switch(fcsID)
-        {
-            default: assert(0);  /* impossible */
-            case 0 : if (singleSegment) frameContentSize = ip[pos]; break;
-            case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break;
-            case 2 : frameContentSize = MEM_readLE32(ip+pos); break;
-            case 3 : frameContentSize = MEM_readLE64(ip+pos); break;
-        }
-        if (singleSegment) windowSize = frameContentSize;
-
-        zfhPtr->frameType = ZSTD_frame;
-        zfhPtr->frameContentSize = frameContentSize;
-        zfhPtr->windowSize = windowSize;
-        zfhPtr->blockSizeMax = (unsigned) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
-        zfhPtr->dictID = dictID;
-        zfhPtr->checksumFlag = checksumFlag;
-    }
-    return 0;
-}
-
-/** ZSTD_getFrameHeader() :
- *  decode Frame Header, or require larger `srcSize`.
- *  note : this function does not consume input, it only reads it.
- * @return : 0, `zfhPtr` is correctly filled,
- *          >0, `srcSize` is too small, value is wanted `srcSize` amount,
- *           or an error code, which can be tested using ZSTD_isError() */
-size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize)
-{
-    return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1);
-}
-
-
-/** ZSTD_getFrameContentSize() :
- *  compatible with legacy mode
- * @return : decompressed size of the single frame pointed to be `src` if known, otherwise
- *         - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
- *         - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
-unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
-{
-#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
-    if (ZSTD_isLegacy(src, srcSize)) {
-        unsigned long long const ret = ZSTD_getDecompressedSize_legacy(src, srcSize);
-        return ret == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : ret;
-    }
-#endif
-    {   ZSTD_frameHeader zfh;
-        if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0)
-            return ZSTD_CONTENTSIZE_ERROR;
-        if (zfh.frameType == ZSTD_skippableFrame) {
-            return 0;
-        } else {
-            return zfh.frameContentSize;
-    }   }
-}
-
-static size_t readSkippableFrameSize(void const* src, size_t srcSize)
-{
-    size_t const skippableHeaderSize = ZSTD_SKIPPABLEHEADERSIZE;
-    U32 sizeU32;
-
-    RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong);
-
-    sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE);
-    RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32,
-                    frameParameter_unsupported);
-    {
-        size_t const skippableSize = skippableHeaderSize + sizeU32;
-        RETURN_ERROR_IF(skippableSize > srcSize, srcSize_wrong);
-        return skippableSize;
-    }
-}
-
-/** ZSTD_findDecompressedSize() :
- *  compatible with legacy mode
- *  `srcSize` must be the exact length of some number of ZSTD compressed and/or
- *      skippable frames
- *  @return : decompressed size of the frames contained */
-unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
-{
-    unsigned long long totalDstSize = 0;
-
-    while (srcSize >= ZSTD_FRAMEHEADERSIZE_PREFIX) {
-        U32 const magicNumber = MEM_readLE32(src);
-
-        if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
-            size_t const skippableSize = readSkippableFrameSize(src, srcSize);
-            if (ZSTD_isError(skippableSize)) {
-                return ZSTD_CONTENTSIZE_ERROR;
-            }
-            assert(skippableSize <= srcSize);
-
-            src = (const BYTE *)src + skippableSize;
-            srcSize -= skippableSize;
-            continue;
-        }
-
-        {   unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
-            if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret;
-
-            /* check for overflow */
-            if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR;
-            totalDstSize += ret;
-        }
-        {   size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize);
-            if (ZSTD_isError(frameSrcSize)) {
-                return ZSTD_CONTENTSIZE_ERROR;
-            }
-
-            src = (const BYTE *)src + frameSrcSize;
-            srcSize -= frameSrcSize;
-        }
-    }  /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
-
-    if (srcSize) return ZSTD_CONTENTSIZE_ERROR;
-
-    return totalDstSize;
-}
-
-/** ZSTD_getDecompressedSize() :
- *  compatible with legacy mode
- * @return : decompressed size if known, 0 otherwise
-             note : 0 can mean any of the following :
-                   - frame content is empty
-                   - decompressed size field is not present in frame header
-                   - frame header unknown / not supported
-                   - frame header not complete (`srcSize` too small) */
-unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize)
-{
-    unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
-    ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_ERROR < ZSTD_CONTENTSIZE_UNKNOWN);
-    return (ret >= ZSTD_CONTENTSIZE_ERROR) ? 0 : ret;
-}
-
-
-/** ZSTD_decodeFrameHeader() :
- * `headerSize` must be the size provided by ZSTD_frameHeaderSize().
- * @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
-static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize)
-{
-    size_t const result = ZSTD_getFrameHeader_advanced(&(dctx->fParams), src, headerSize, dctx->format);
-    if (ZSTD_isError(result)) return result;    /* invalid header */
-    RETURN_ERROR_IF(result>0, srcSize_wrong, "headerSize too small");
-#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
-    /* Skip the dictID check in fuzzing mode, because it makes the search
-     * harder.
-     */
-    RETURN_ERROR_IF(dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID),
-                    dictionary_wrong);
-#endif
-    if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0);
-    return 0;
-}
-
-static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(size_t ret)
-{
-    ZSTD_frameSizeInfo frameSizeInfo;
-    frameSizeInfo.compressedSize = ret;
-    frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR;
-    return frameSizeInfo;
-}
-
-static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize)
-{
-    ZSTD_frameSizeInfo frameSizeInfo;
-    memset(&frameSizeInfo, 0, sizeof(ZSTD_frameSizeInfo));
-
-#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
-    if (ZSTD_isLegacy(src, srcSize))
-        return ZSTD_findFrameSizeInfoLegacy(src, srcSize);
-#endif
-
-    if ((srcSize >= ZSTD_SKIPPABLEHEADERSIZE)
-        && (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
-        frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize);
-        assert(ZSTD_isError(frameSizeInfo.compressedSize) ||
-               frameSizeInfo.compressedSize <= srcSize);
-        return frameSizeInfo;
-    } else {
-        const BYTE* ip = (const BYTE*)src;
-        const BYTE* const ipstart = ip;
-        size_t remainingSize = srcSize;
-        size_t nbBlocks = 0;
-        ZSTD_frameHeader zfh;
-
-        /* Extract Frame Header */
-        {   size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize);
-            if (ZSTD_isError(ret))
-                return ZSTD_errorFrameSizeInfo(ret);
-            if (ret > 0)
-                return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
-        }
-
-        ip += zfh.headerSize;
-        remainingSize -= zfh.headerSize;
-
-        /* Iterate over each block */
-        while (1) {
-            blockProperties_t blockProperties;
-            size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
-            if (ZSTD_isError(cBlockSize))
-                return ZSTD_errorFrameSizeInfo(cBlockSize);
-
-            if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
-                return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
-
-            ip += ZSTD_blockHeaderSize + cBlockSize;
-            remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
-            nbBlocks++;
-
-            if (blockProperties.lastBlock) break;
-        }
-
-        /* Final frame content checksum */
-        if (zfh.checksumFlag) {
-            if (remainingSize < 4)
-                return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
-            ip += 4;
-        }
-
-        frameSizeInfo.compressedSize = ip - ipstart;
-        frameSizeInfo.decompressedBound = (zfh.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN)
-                                        ? zfh.frameContentSize
-                                        : nbBlocks * zfh.blockSizeMax;
-        return frameSizeInfo;
-    }
-}
-
-/** ZSTD_findFrameCompressedSize() :
- *  compatible with legacy mode
- *  `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame
- *  `srcSize` must be at least as large as the frame contained
- *  @return : the compressed size of the frame starting at `src` */
-size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
-{
-    ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
-    return frameSizeInfo.compressedSize;
-}
-
-/** ZSTD_decompressBound() :
- *  compatible with legacy mode
- *  `src` must point to the start of a ZSTD frame or a skippeable frame
- *  `srcSize` must be at least as large as the frame contained
- *  @return : the maximum decompressed size of the compressed source
- */
-unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize)
-{
-    unsigned long long bound = 0;
-    /* Iterate over each frame */
-    while (srcSize > 0) {
-        ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
-        size_t const compressedSize = frameSizeInfo.compressedSize;
-        unsigned long long const decompressedBound = frameSizeInfo.decompressedBound;
-        if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR)
-            return ZSTD_CONTENTSIZE_ERROR;
-        assert(srcSize >= compressedSize);
-        src = (const BYTE*)src + compressedSize;
-        srcSize -= compressedSize;
-        bound += decompressedBound;
-    }
-    return bound;
-}
-
-
-/*-*************************************************************
- *   Frame decoding
- ***************************************************************/
-
-
-void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst)
-{
-    if (dst != dctx->previousDstEnd) {   /* not contiguous */
-        dctx->dictEnd = dctx->previousDstEnd;
-        dctx->virtualStart = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
-        dctx->prefixStart = dst;
-        dctx->previousDstEnd = dst;
-    }
-}
-
-/** ZSTD_insertBlock() :
-    insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
-size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize)
-{
-    ZSTD_checkContinuity(dctx, blockStart);
-    dctx->previousDstEnd = (const char*)blockStart + blockSize;
-    return blockSize;
-}
-
-
-static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
-                          const void* src, size_t srcSize)
-{
-    DEBUGLOG(5, "ZSTD_copyRawBlock");
-    if (dst == NULL) {
-        if (srcSize == 0) return 0;
-        RETURN_ERROR(dstBuffer_null);
-    }
-    RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall);
-    memcpy(dst, src, srcSize);
-    return srcSize;
-}
-
-static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
-                               BYTE b,
-                               size_t regenSize)
-{
-    if (dst == NULL) {
-        if (regenSize == 0) return 0;
-        RETURN_ERROR(dstBuffer_null);
-    }
-    RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall);
-    memset(dst, b, regenSize);
-    return regenSize;
-}
-
-
-/*! ZSTD_decompressFrame() :
- * @dctx must be properly initialized
- *  will update *srcPtr and *srcSizePtr,
- *  to make *srcPtr progress by one frame. */
-static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
-                                   void* dst, size_t dstCapacity,
-                             const void** srcPtr, size_t *srcSizePtr)
-{
-    const BYTE* ip = (const BYTE*)(*srcPtr);
-    BYTE* const ostart = (BYTE* const)dst;
-    BYTE* const oend = ostart + dstCapacity;
-    BYTE* op = ostart;
-    size_t remainingSrcSize = *srcSizePtr;
-
-    DEBUGLOG(4, "ZSTD_decompressFrame (srcSize:%i)", (int)*srcSizePtr);
-
-    /* check */
-    RETURN_ERROR_IF(
-        remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN+ZSTD_blockHeaderSize,
-        srcSize_wrong);
-
-    /* Frame Header */
-    {   size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_FRAMEHEADERSIZE_PREFIX);
-        if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
-        RETURN_ERROR_IF(remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize,
-                        srcSize_wrong);
-        FORWARD_IF_ERROR( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) );
-        ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize;
-    }
-
-    /* Loop on each block */
-    while (1) {
-        size_t decodedSize;
-        blockProperties_t blockProperties;
-        size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties);
-        if (ZSTD_isError(cBlockSize)) return cBlockSize;
-
-        ip += ZSTD_blockHeaderSize;
-        remainingSrcSize -= ZSTD_blockHeaderSize;
-        RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong);
-
-        switch(blockProperties.blockType)
-        {
-        case bt_compressed:
-            decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize, /* frame */ 1);
-            break;
-        case bt_raw :
-            decodedSize = ZSTD_copyRawBlock(op, oend-op, ip, cBlockSize);
-            break;
-        case bt_rle :
-            decodedSize = ZSTD_setRleBlock(op, oend-op, *ip, blockProperties.origSize);
-            break;
-        case bt_reserved :
-        default:
-            RETURN_ERROR(corruption_detected);
-        }
-
-        if (ZSTD_isError(decodedSize)) return decodedSize;
-        if (dctx->fParams.checksumFlag)
-            XXH64_update(&dctx->xxhState, op, decodedSize);
-        op += decodedSize;
-        ip += cBlockSize;
-        remainingSrcSize -= cBlockSize;
-        if (blockProperties.lastBlock) break;
-    }
-
-    if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
-        RETURN_ERROR_IF((U64)(op-ostart) != dctx->fParams.frameContentSize,
-                        corruption_detected);
-    }
-    if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
-        U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState);
-        U32 checkRead;
-        RETURN_ERROR_IF(remainingSrcSize<4, checksum_wrong);
-        checkRead = MEM_readLE32(ip);
-        RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong);
-        ip += 4;
-        remainingSrcSize -= 4;
-    }
-
-    /* Allow caller to get size read */
-    *srcPtr = ip;
-    *srcSizePtr = remainingSrcSize;
-    return op-ostart;
-}
-
-static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
-                                        void* dst, size_t dstCapacity,
-                                  const void* src, size_t srcSize,
-                                  const void* dict, size_t dictSize,
-                                  const ZSTD_DDict* ddict)
-{
-    void* const dststart = dst;
-    int moreThan1Frame = 0;
-
-    DEBUGLOG(5, "ZSTD_decompressMultiFrame");
-    assert(dict==NULL || ddict==NULL);  /* either dict or ddict set, not both */
-
-    if (ddict) {
-        dict = ZSTD_DDict_dictContent(ddict);
-        dictSize = ZSTD_DDict_dictSize(ddict);
-    }
-
-    while (srcSize >= ZSTD_FRAMEHEADERSIZE_PREFIX) {
-
-#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
-        if (ZSTD_isLegacy(src, srcSize)) {
-            size_t decodedSize;
-            size_t const frameSize = ZSTD_findFrameCompressedSizeLegacy(src, srcSize);
-            if (ZSTD_isError(frameSize)) return frameSize;
-            RETURN_ERROR_IF(dctx->staticSize, memory_allocation,
-                "legacy support is not compatible with static dctx");
-
-            decodedSize = ZSTD_decompressLegacy(dst, dstCapacity, src, frameSize, dict, dictSize);
-            if (ZSTD_isError(decodedSize)) return decodedSize;
-
-            assert(decodedSize <=- dstCapacity);
-            dst = (BYTE*)dst + decodedSize;
-            dstCapacity -= decodedSize;
-
-            src = (const BYTE*)src + frameSize;
-            srcSize -= frameSize;
-
-            continue;
-        }
-#endif
-
-        {   U32 const magicNumber = MEM_readLE32(src);
-            DEBUGLOG(4, "reading magic number %08X (expecting %08X)",
-                        (unsigned)magicNumber, ZSTD_MAGICNUMBER);
-            if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
-                size_t const skippableSize = readSkippableFrameSize(src, srcSize);
-                FORWARD_IF_ERROR(skippableSize);
-                assert(skippableSize <= srcSize);
-
-                src = (const BYTE *)src + skippableSize;
-                srcSize -= skippableSize;
-                continue;
-        }   }
-
-        if (ddict) {
-            /* we were called from ZSTD_decompress_usingDDict */
-            FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(dctx, ddict));
-        } else {
-            /* this will initialize correctly with no dict if dict == NULL, so
-             * use this in all cases but ddict */
-            FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize));
-        }
-        ZSTD_checkContinuity(dctx, dst);
-
-        {   const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity,
-                                                    &src, &srcSize);
-            RETURN_ERROR_IF(
-                (ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown)
-             && (moreThan1Frame==1),
-                srcSize_wrong,
-                "at least one frame successfully completed, but following "
-                "bytes are garbage: it's more likely to be a srcSize error, "
-                "specifying more bytes than compressed size of frame(s). This "
-                "error message replaces ERROR(prefix_unknown), which would be "
-                "confusing, as the first header is actually correct. Note that "
-                "one could be unlucky, it might be a corruption error instead, "
-                "happening right at the place where we expect zstd magic "
-                "bytes. But this is _much_ less likely than a srcSize field "
-                "error.");
-            if (ZSTD_isError(res)) return res;
-            assert(res <= dstCapacity);
-            dst = (BYTE*)dst + res;
-            dstCapacity -= res;
-        }
-        moreThan1Frame = 1;
-    }  /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
-
-    RETURN_ERROR_IF(srcSize, srcSize_wrong, "input not entirely consumed");
-
-    return (BYTE*)dst - (BYTE*)dststart;
-}
-
-size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
-                                 void* dst, size_t dstCapacity,
-                           const void* src, size_t srcSize,
-                           const void* dict, size_t dictSize)
-{
-    return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL);
-}
-
-
-static ZSTD_DDict const* ZSTD_getDDict(ZSTD_DCtx* dctx)
-{
-    switch (dctx->dictUses) {
-    default:
-        assert(0 /* Impossible */);
-        /* fall-through */
-    case ZSTD_dont_use:
-        ZSTD_clearDict(dctx);
-        return NULL;
-    case ZSTD_use_indefinitely:
-        return dctx->ddict;
-    case ZSTD_use_once:
-        dctx->dictUses = ZSTD_dont_use;
-        return dctx->ddict;
-    }
-}
-
-size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
-{
-    return ZSTD_decompress_usingDDict(dctx, dst, dstCapacity, src, srcSize, ZSTD_getDDict(dctx));
-}
-
-
-size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
-{
-#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1)
-    size_t regenSize;
-    ZSTD_DCtx* const dctx = ZSTD_createDCtx();
-    RETURN_ERROR_IF(dctx==NULL, memory_allocation);
-    regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
-    ZSTD_freeDCtx(dctx);
-    return regenSize;
-#else   /* stack mode */
-    ZSTD_DCtx dctx;
-    ZSTD_initDCtx_internal(&dctx);
-    return ZSTD_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize);
-#endif
-}
-
-
-/*-**************************************
-*   Advanced Streaming Decompression API
-*   Bufferless and synchronous
-****************************************/
-size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; }
-
-ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) {
-    switch(dctx->stage)
-    {
-    default:   /* should not happen */
-        assert(0);
-    case ZSTDds_getFrameHeaderSize:
-    case ZSTDds_decodeFrameHeader:
-        return ZSTDnit_frameHeader;
-    case ZSTDds_decodeBlockHeader:
-        return ZSTDnit_blockHeader;
-    case ZSTDds_decompressBlock:
-        return ZSTDnit_block;
-    case ZSTDds_decompressLastBlock:
-        return ZSTDnit_lastBlock;
-    case ZSTDds_checkChecksum:
-        return ZSTDnit_checksum;
-    case ZSTDds_decodeSkippableHeader:
-    case ZSTDds_skipFrame:
-        return ZSTDnit_skippableFrame;
-    }
-}
-
-static int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skipFrame; }
-
-/** ZSTD_decompressContinue() :
- *  srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress())
- *  @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)
- *            or an error code, which can be tested using ZSTD_isError() */
-size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
-{
-    DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (unsigned)srcSize);
-    /* Sanity check */
-    RETURN_ERROR_IF(srcSize != dctx->expected, srcSize_wrong, "not allowed");
-    if (dstCapacity) ZSTD_checkContinuity(dctx, dst);
-
-    switch (dctx->stage)
-    {
-    case ZSTDds_getFrameHeaderSize :
-        assert(src != NULL);
-        if (dctx->format == ZSTD_f_zstd1) {  /* allows header */
-            assert(srcSize >= ZSTD_FRAMEIDSIZE);  /* to read skippable magic number */
-            if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {        /* skippable frame */
-                memcpy(dctx->headerBuffer, src, srcSize);
-                dctx->expected = ZSTD_SKIPPABLEHEADERSIZE - srcSize;  /* remaining to load to get full skippable frame header */
-                dctx->stage = ZSTDds_decodeSkippableHeader;
-                return 0;
-        }   }
-        dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format);
-        if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize;
-        memcpy(dctx->headerBuffer, src, srcSize);
-        dctx->expected = dctx->headerSize - srcSize;
-        dctx->stage = ZSTDds_decodeFrameHeader;
-        return 0;
-
-    case ZSTDds_decodeFrameHeader:
-        assert(src != NULL);
-        memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize);
-        FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize));
-        dctx->expected = ZSTD_blockHeaderSize;
-        dctx->stage = ZSTDds_decodeBlockHeader;
-        return 0;
-
-    case ZSTDds_decodeBlockHeader:
-        {   blockProperties_t bp;
-            size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
-            if (ZSTD_isError(cBlockSize)) return cBlockSize;
-            dctx->expected = cBlockSize;
-            dctx->bType = bp.blockType;
-            dctx->rleSize = bp.origSize;
-            if (cBlockSize) {
-                dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock;
-                return 0;
-            }
-            /* empty block */
-            if (bp.lastBlock) {
-                if (dctx->fParams.checksumFlag) {
-                    dctx->expected = 4;
-                    dctx->stage = ZSTDds_checkChecksum;
-                } else {
-                    dctx->expected = 0; /* end of frame */
-                    dctx->stage = ZSTDds_getFrameHeaderSize;
-                }
-            } else {
-                dctx->expected = ZSTD_blockHeaderSize;  /* jump to next header */
-                dctx->stage = ZSTDds_decodeBlockHeader;
-            }
-            return 0;
-        }
-
-    case ZSTDds_decompressLastBlock:
-    case ZSTDds_decompressBlock:
-        DEBUGLOG(5, "ZSTD_decompressContinue: case ZSTDds_decompressBlock");
-        {   size_t rSize;
-            switch(dctx->bType)
-            {
-            case bt_compressed:
-                DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed");
-                rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1);
-                break;
-            case bt_raw :
-                rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize);
-                break;
-            case bt_rle :
-                rSize = ZSTD_setRleBlock(dst, dstCapacity, *(const BYTE*)src, dctx->rleSize);
-                break;
-            case bt_reserved :   /* should never happen */
-            default:
-                RETURN_ERROR(corruption_detected);
-            }
-            if (ZSTD_isError(rSize)) return rSize;
-            DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (unsigned)rSize);
-            dctx->decodedSize += rSize;
-            if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize);
-
-            if (dctx->stage == ZSTDds_decompressLastBlock) {   /* end of frame */
-                DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (unsigned)dctx->decodedSize);
-                RETURN_ERROR_IF(
-                    dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
-                 && dctx->decodedSize != dctx->fParams.frameContentSize,
-                    corruption_detected);
-                if (dctx->fParams.checksumFlag) {  /* another round for frame checksum */
-                    dctx->expected = 4;
-                    dctx->stage = ZSTDds_checkChecksum;
-                } else {
-                    dctx->expected = 0;   /* ends here */
-                    dctx->stage = ZSTDds_getFrameHeaderSize;
-                }
-            } else {
-                dctx->stage = ZSTDds_decodeBlockHeader;
-                dctx->expected = ZSTD_blockHeaderSize;
-                dctx->previousDstEnd = (char*)dst + rSize;
-            }
-            return rSize;
-        }
-
-    case ZSTDds_checkChecksum:
-        assert(srcSize == 4);  /* guaranteed by dctx->expected */
-        {   U32 const h32 = (U32)XXH64_digest(&dctx->xxhState);
-            U32 const check32 = MEM_readLE32(src);
-            DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32);
-            RETURN_ERROR_IF(check32 != h32, checksum_wrong);
-            dctx->expected = 0;
-            dctx->stage = ZSTDds_getFrameHeaderSize;
-            return 0;
-        }
-
-    case ZSTDds_decodeSkippableHeader:
-        assert(src != NULL);
-        assert(srcSize <= ZSTD_SKIPPABLEHEADERSIZE);
-        memcpy(dctx->headerBuffer + (ZSTD_SKIPPABLEHEADERSIZE - srcSize), src, srcSize);   /* complete skippable header */
-        dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE);   /* note : dctx->expected can grow seriously large, beyond local buffer size */
-        dctx->stage = ZSTDds_skipFrame;
-        return 0;
-
-    case ZSTDds_skipFrame:
-        dctx->expected = 0;
-        dctx->stage = ZSTDds_getFrameHeaderSize;
-        return 0;
-
-    default:
-        assert(0);   /* impossible */
-        RETURN_ERROR(GENERIC);   /* some compiler require default to do something */
-    }
-}
-
-
-static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
-{
-    dctx->dictEnd = dctx->previousDstEnd;
-    dctx->virtualStart = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
-    dctx->prefixStart = dict;
-    dctx->previousDstEnd = (const char*)dict + dictSize;
-    return 0;
-}
-
-/*! ZSTD_loadDEntropy() :
- *  dict : must point at beginning of a valid zstd dictionary.
- * @return : size of entropy tables read */
-size_t
-ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
-                  const void* const dict, size_t const dictSize)
-{
-    const BYTE* dictPtr = (const BYTE*)dict;
-    const BYTE* const dictEnd = dictPtr + dictSize;
-
-    RETURN_ERROR_IF(dictSize <= 8, dictionary_corrupted);
-    assert(MEM_readLE32(dict) == ZSTD_MAGIC_DICTIONARY);   /* dict must be valid */
-    dictPtr += 8;   /* skip header = magic + dictID */
-
-    ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, OFTable) == offsetof(ZSTD_entropyDTables_t, LLTable) + sizeof(entropy->LLTable));
-    ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, MLTable) == offsetof(ZSTD_entropyDTables_t, OFTable) + sizeof(entropy->OFTable));
-    ZSTD_STATIC_ASSERT(sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable) >= HUF_DECOMPRESS_WORKSPACE_SIZE);
-    {   void* const workspace = &entropy->LLTable;   /* use fse tables as temporary workspace; implies fse tables are grouped together */
-        size_t const workspaceSize = sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable);
-#ifdef HUF_FORCE_DECOMPRESS_X1
-        /* in minimal huffman, we always use X1 variants */
-        size_t const hSize = HUF_readDTableX1_wksp(entropy->hufTable,
-                                                dictPtr, dictEnd - dictPtr,
-                                                workspace, workspaceSize);
-#else
-        size_t const hSize = HUF_readDTableX2_wksp(entropy->hufTable,
-                                                dictPtr, dictEnd - dictPtr,
-                                                workspace, workspaceSize);
-#endif
-        RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted);
-        dictPtr += hSize;
-    }
-
-    {   short offcodeNCount[MaxOff+1];
-        unsigned offcodeMaxValue = MaxOff, offcodeLog;
-        size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
-        RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted);
-        RETURN_ERROR_IF(offcodeMaxValue > MaxOff, dictionary_corrupted);
-        RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted);
-        ZSTD_buildFSETable( entropy->OFTable,
-                            offcodeNCount, offcodeMaxValue,
-                            OF_base, OF_bits,
-                            offcodeLog);
-        dictPtr += offcodeHeaderSize;
-    }
-
-    {   short matchlengthNCount[MaxML+1];
-        unsigned matchlengthMaxValue = MaxML, matchlengthLog;
-        size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
-        RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted);
-        RETURN_ERROR_IF(matchlengthMaxValue > MaxML, dictionary_corrupted);
-        RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted);
-        ZSTD_buildFSETable( entropy->MLTable,
-                            matchlengthNCount, matchlengthMaxValue,
-                            ML_base, ML_bits,
-                            matchlengthLog);
-        dictPtr += matchlengthHeaderSize;
-    }
-
-    {   short litlengthNCount[MaxLL+1];
-        unsigned litlengthMaxValue = MaxLL, litlengthLog;
-        size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
-        RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted);
-        RETURN_ERROR_IF(litlengthMaxValue > MaxLL, dictionary_corrupted);
-        RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted);
-        ZSTD_buildFSETable( entropy->LLTable,
-                            litlengthNCount, litlengthMaxValue,
-                            LL_base, LL_bits,
-                            litlengthLog);
-        dictPtr += litlengthHeaderSize;
-    }
-
-    RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted);
-    {   int i;
-        size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12));
-        for (i=0; i<3; i++) {
-            U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4;
-            RETURN_ERROR_IF(rep==0 || rep >= dictContentSize,
-                            dictionary_corrupted);
-            entropy->rep[i] = rep;
-    }   }
-
-    return dictPtr - (const BYTE*)dict;
-}
-
-static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
-{
-    if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize);
-    {   U32 const magic = MEM_readLE32(dict);
-        if (magic != ZSTD_MAGIC_DICTIONARY) {
-            return ZSTD_refDictContent(dctx, dict, dictSize);   /* pure content mode */
-    }   }
-    dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
-
-    /* load entropy tables */
-    {   size_t const eSize = ZSTD_loadDEntropy(&dctx->entropy, dict, dictSize);
-        RETURN_ERROR_IF(ZSTD_isError(eSize), dictionary_corrupted);
-        dict = (const char*)dict + eSize;
-        dictSize -= eSize;
-    }
-    dctx->litEntropy = dctx->fseEntropy = 1;
-
-    /* reference dictionary content */
-    return ZSTD_refDictContent(dctx, dict, dictSize);
-}
-
-size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
-{
-    assert(dctx != NULL);
-    dctx->expected = ZSTD_startingInputLength(dctx->format);  /* dctx->format must be properly set */
-    dctx->stage = ZSTDds_getFrameHeaderSize;
-    dctx->decodedSize = 0;
-    dctx->previousDstEnd = NULL;
-    dctx->prefixStart = NULL;
-    dctx->virtualStart = NULL;
-    dctx->dictEnd = NULL;
-    dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001);  /* cover both little and big endian */
-    dctx->litEntropy = dctx->fseEntropy = 0;
-    dctx->dictID = 0;
-    ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
-    memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue));  /* initial repcodes */
-    dctx->LLTptr = dctx->entropy.LLTable;
-    dctx->MLTptr = dctx->entropy.MLTable;
-    dctx->OFTptr = dctx->entropy.OFTable;
-    dctx->HUFptr = dctx->entropy.hufTable;
-    return 0;
-}
-
-size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
-{
-    FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) );
-    if (dict && dictSize)
-        RETURN_ERROR_IF(
-            ZSTD_isError(ZSTD_decompress_insertDictionary(dctx, dict, dictSize)),
-            dictionary_corrupted);
-    return 0;
-}
-
-
-/* ======   ZSTD_DDict   ====== */
-
-size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
-{
-    DEBUGLOG(4, "ZSTD_decompressBegin_usingDDict");
-    assert(dctx != NULL);
-    if (ddict) {
-        const char* const dictStart = (const char*)ZSTD_DDict_dictContent(ddict);
-        size_t const dictSize = ZSTD_DDict_dictSize(ddict);
-        const void* const dictEnd = dictStart + dictSize;
-        dctx->ddictIsCold = (dctx->dictEnd != dictEnd);
-        DEBUGLOG(4, "DDict is %s",
-                    dctx->ddictIsCold ? "~cold~" : "hot!");
-    }
-    FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) );
-    if (ddict) {   /* NULL ddict is equivalent to no dictionary */
-        ZSTD_copyDDictParameters(dctx, ddict);
-    }
-    return 0;
-}
-
-/*! ZSTD_getDictID_fromDict() :
- *  Provides the dictID stored within dictionary.
- *  if @return == 0, the dictionary is not conformant with Zstandard specification.
- *  It can still be loaded, but as a content-only dictionary. */
-unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
-{
-    if (dictSize < 8) return 0;
-    if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) return 0;
-    return MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
-}
-
-/*! ZSTD_getDictID_fromFrame() :
- *  Provides the dictID required to decompress frame stored within `src`.
- *  If @return == 0, the dictID could not be decoded.
- *  This could for one of the following reasons :
- *  - The frame does not require a dictionary (most common case).
- *  - The frame was built with dictID intentionally removed.
- *    Needed dictionary is a hidden information.
- *    Note : this use case also happens when using a non-conformant dictionary.
- *  - `srcSize` is too small, and as a result, frame header could not be decoded.
- *    Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`.
- *  - This is not a Zstandard frame.
- *  When identifying the exact failure cause, it's possible to use
- *  ZSTD_getFrameHeader(), which will provide a more precise error code. */
-unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize)
-{
-    ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 };
-    size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize);
-    if (ZSTD_isError(hError)) return 0;
-    return zfp.dictID;
-}
-
-
-/*! ZSTD_decompress_usingDDict() :
-*   Decompression using a pre-digested Dictionary
-*   Use dictionary without significant overhead. */
-size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
-                                  void* dst, size_t dstCapacity,
-                            const void* src, size_t srcSize,
-                            const ZSTD_DDict* ddict)
-{
-    /* pass content and size in case legacy frames are encountered */
-    return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize,
-                                     NULL, 0,
-                                     ddict);
-}
-
-
-/*=====================================
-*   Streaming decompression
-*====================================*/
-
-ZSTD_DStream* ZSTD_createDStream(void)
-{
-    DEBUGLOG(3, "ZSTD_createDStream");
-    return ZSTD_createDStream_advanced(ZSTD_defaultCMem);
-}
-
-ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize)
-{
-    return ZSTD_initStaticDCtx(workspace, workspaceSize);
-}
-
-ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem)
-{
-    return ZSTD_createDCtx_advanced(customMem);
-}
-
-size_t ZSTD_freeDStream(ZSTD_DStream* zds)
-{
-    return ZSTD_freeDCtx(zds);
-}
-
-
-/* ***  Initialization  *** */
-
-size_t ZSTD_DStreamInSize(void)  { return ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize; }
-size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_MAX; }
-
-size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx,
-                                   const void* dict, size_t dictSize,
-                                         ZSTD_dictLoadMethod_e dictLoadMethod,
-                                         ZSTD_dictContentType_e dictContentType)
-{
-    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
-    ZSTD_clearDict(dctx);
-    if (dict && dictSize >= 8) {
-        dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem);
-        RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation);
-        dctx->ddict = dctx->ddictLocal;
-        dctx->dictUses = ZSTD_use_indefinitely;
-    }
-    return 0;
-}
-
-size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
-{
-    return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
-}
-
-size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
-{
-    return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
-}
-
-size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
-{
-    FORWARD_IF_ERROR(ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType));
-    dctx->dictUses = ZSTD_use_once;
-    return 0;
-}
-
-size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize)
-{
-    return ZSTD_DCtx_refPrefix_advanced(dctx, prefix, prefixSize, ZSTD_dct_rawContent);
-}
-
-
-/* ZSTD_initDStream_usingDict() :
- * return : expected size, aka ZSTD_FRAMEHEADERSIZE_PREFIX.
- * this function cannot fail */
-size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize)
-{
-    DEBUGLOG(4, "ZSTD_initDStream_usingDict");
-    FORWARD_IF_ERROR( ZSTD_DCtx_reset(zds, ZSTD_reset_session_only) );
-    FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) );
-    return ZSTD_FRAMEHEADERSIZE_PREFIX;
-}
-
-/* note : this variant can't fail */
-size_t ZSTD_initDStream(ZSTD_DStream* zds)
-{
-    DEBUGLOG(4, "ZSTD_initDStream");
-    return ZSTD_initDStream_usingDDict(zds, NULL);
-}
-
-/* ZSTD_initDStream_usingDDict() :
- * ddict will just be referenced, and must outlive decompression session
- * this function cannot fail */
-size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
-{
-    FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) );
-    FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) );
-    return ZSTD_FRAMEHEADERSIZE_PREFIX;
-}
-
-/* ZSTD_resetDStream() :
- * return : expected size, aka ZSTD_FRAMEHEADERSIZE_PREFIX.
- * this function cannot fail */
-size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
-{
-    FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only));
-    return ZSTD_FRAMEHEADERSIZE_PREFIX;
-}
-
-
-size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
-{
-    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
-    ZSTD_clearDict(dctx);
-    if (ddict) {
-        dctx->ddict = ddict;
-        dctx->dictUses = ZSTD_use_indefinitely;
-    }
-    return 0;
-}
-
-/* ZSTD_DCtx_setMaxWindowSize() :
- * note : no direct equivalence in ZSTD_DCtx_setParameter,
- * since this version sets windowSize, and the other sets windowLog */
-size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize)
-{
-    ZSTD_bounds const bounds = ZSTD_dParam_getBounds(ZSTD_d_windowLogMax);
-    size_t const min = (size_t)1 << bounds.lowerBound;
-    size_t const max = (size_t)1 << bounds.upperBound;
-    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
-    RETURN_ERROR_IF(maxWindowSize < min, parameter_outOfBound);
-    RETURN_ERROR_IF(maxWindowSize > max, parameter_outOfBound);
-    dctx->maxWindowSize = maxWindowSize;
-    return 0;
-}
-
-size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format)
-{
-    return ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, format);
-}
-
-ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam)
-{
-    ZSTD_bounds bounds = { 0, 0, 0 };
-    switch(dParam) {
-        case ZSTD_d_windowLogMax:
-            bounds.lowerBound = ZSTD_WINDOWLOG_ABSOLUTEMIN;
-            bounds.upperBound = ZSTD_WINDOWLOG_MAX;
-            return bounds;
-        case ZSTD_d_format:
-            bounds.lowerBound = (int)ZSTD_f_zstd1;
-            bounds.upperBound = (int)ZSTD_f_zstd1_magicless;
-            ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
-            return bounds;
-        default:;
-    }
-    bounds.error = ERROR(parameter_unsupported);
-    return bounds;
-}
-
-/* ZSTD_dParam_withinBounds:
- * @return 1 if value is within dParam bounds,
- * 0 otherwise */
-static int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value)
-{
-    ZSTD_bounds const bounds = ZSTD_dParam_getBounds(dParam);
-    if (ZSTD_isError(bounds.error)) return 0;
-    if (value < bounds.lowerBound) return 0;
-    if (value > bounds.upperBound) return 0;
-    return 1;
-}
-
-#define CHECK_DBOUNDS(p,v) {                \
-    RETURN_ERROR_IF(!ZSTD_dParam_withinBounds(p, v), parameter_outOfBound); \
-}
-
-size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value)
-{
-    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
-    switch(dParam) {
-        case ZSTD_d_windowLogMax:
-            if (value == 0) value = ZSTD_WINDOWLOG_LIMIT_DEFAULT;
-            CHECK_DBOUNDS(ZSTD_d_windowLogMax, value);
-            dctx->maxWindowSize = ((size_t)1) << value;
-            return 0;
-        case ZSTD_d_format:
-            CHECK_DBOUNDS(ZSTD_d_format, value);
-            dctx->format = (ZSTD_format_e)value;
-            return 0;
-        default:;
-    }
-    RETURN_ERROR(parameter_unsupported);
-}
-
-size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset)
-{
-    if ( (reset == ZSTD_reset_session_only)
-      || (reset == ZSTD_reset_session_and_parameters) ) {
-        dctx->streamStage = zdss_init;
-        dctx->noForwardProgress = 0;
-    }
-    if ( (reset == ZSTD_reset_parameters)
-      || (reset == ZSTD_reset_session_and_parameters) ) {
-        RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
-        ZSTD_clearDict(dctx);
-        dctx->format = ZSTD_f_zstd1;
-        dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
-    }
-    return 0;
-}
-
-
-size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx)
-{
-    return ZSTD_sizeof_DCtx(dctx);
-}
-
-size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize)
-{
-    size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
-    unsigned long long const neededRBSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2);
-    unsigned long long const neededSize = MIN(frameContentSize, neededRBSize);
-    size_t const minRBSize = (size_t) neededSize;
-    RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize,
-                    frameParameter_windowTooLarge);
-    return minRBSize;
-}
-
-size_t ZSTD_estimateDStreamSize(size_t windowSize)
-{
-    size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
-    size_t const inBuffSize = blockSize;  /* no block can be larger */
-    size_t const outBuffSize = ZSTD_decodingBufferSize_min(windowSize, ZSTD_CONTENTSIZE_UNKNOWN);
-    return ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize;
-}
-
-size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize)
-{
-    U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX;   /* note : should be user-selectable, but requires an additional parameter (or a dctx) */
-    ZSTD_frameHeader zfh;
-    size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize);
-    if (ZSTD_isError(err)) return err;
-    RETURN_ERROR_IF(err>0, srcSize_wrong);
-    RETURN_ERROR_IF(zfh.windowSize > windowSizeMax,
-                    frameParameter_windowTooLarge);
-    return ZSTD_estimateDStreamSize((size_t)zfh.windowSize);
-}
-
-
-/* *****   Decompression   ***** */
-
-MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
-{
-    size_t const length = MIN(dstCapacity, srcSize);
-    memcpy(dst, src, length);
-    return length;
-}
-
-
-size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
-{
-    const char* const istart = (const char*)(input->src) + input->pos;
-    const char* const iend = (const char*)(input->src) + input->size;
-    const char* ip = istart;
-    char* const ostart = (char*)(output->dst) + output->pos;
-    char* const oend = (char*)(output->dst) + output->size;
-    char* op = ostart;
-    U32 someMoreWork = 1;
-
-    DEBUGLOG(5, "ZSTD_decompressStream");
-    RETURN_ERROR_IF(
-        input->pos > input->size,
-        srcSize_wrong,
-        "forbidden. in: pos: %u   vs size: %u",
-        (U32)input->pos, (U32)input->size);
-    RETURN_ERROR_IF(
-        output->pos > output->size,
-        dstSize_tooSmall,
-        "forbidden. out: pos: %u   vs size: %u",
-        (U32)output->pos, (U32)output->size);
-    DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos));
-
-    while (someMoreWork) {
-        switch(zds->streamStage)
-        {
-        case zdss_init :
-            DEBUGLOG(5, "stage zdss_init => transparent reset ");
-            zds->streamStage = zdss_loadHeader;
-            zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
-            zds->legacyVersion = 0;
-            zds->hostageByte = 0;
-            /* fall-through */
-
-        case zdss_loadHeader :
-            DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip));
-#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
-            if (zds->legacyVersion) {
-                RETURN_ERROR_IF(zds->staticSize, memory_allocation,
-                    "legacy support is incompatible with static dctx");
-                {   size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, zds->legacyVersion, output, input);
-                    if (hint==0) zds->streamStage = zdss_init;
-                    return hint;
-            }   }
-#endif
-            {   size_t const hSize = ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format);
-                DEBUGLOG(5, "header size : %u", (U32)hSize);
-                if (ZSTD_isError(hSize)) {
-#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
-                    U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart);
-                    if (legacyVersion) {
-                        ZSTD_DDict const* const ddict = ZSTD_getDDict(zds);
-                        const void* const dict = ddict ? ZSTD_DDict_dictContent(ddict) : NULL;
-                        size_t const dictSize = ddict ? ZSTD_DDict_dictSize(ddict) : 0;
-                        DEBUGLOG(5, "ZSTD_decompressStream: detected legacy version v0.%u", legacyVersion);
-                        RETURN_ERROR_IF(zds->staticSize, memory_allocation,
-                            "legacy support is incompatible with static dctx");
-                        FORWARD_IF_ERROR(ZSTD_initLegacyStream(&zds->legacyContext,
-                                    zds->previousLegacyVersion, legacyVersion,
-                                    dict, dictSize));
-                        zds->legacyVersion = zds->previousLegacyVersion = legacyVersion;
-                        {   size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, legacyVersion, output, input);
-                            if (hint==0) zds->streamStage = zdss_init;   /* or stay in stage zdss_loadHeader */
-                            return hint;
-                    }   }
-#endif
-                    return hSize;   /* error */
-                }
-                if (hSize != 0) {   /* need more input */
-                    size_t const toLoad = hSize - zds->lhSize;   /* if hSize!=0, hSize > zds->lhSize */
-                    size_t const remainingInput = (size_t)(iend-ip);
-                    assert(iend >= ip);
-                    if (toLoad > remainingInput) {   /* not enough input to load full header */
-                        if (remainingInput > 0) {
-                            memcpy(zds->headerBuffer + zds->lhSize, ip, remainingInput);
-                            zds->lhSize += remainingInput;
-                        }
-                        input->pos = input->size;
-                        return (MAX(ZSTD_FRAMEHEADERSIZE_MIN, hSize) - zds->lhSize) + ZSTD_blockHeaderSize;   /* remaining header bytes + next block header */
-                    }
-                    assert(ip != NULL);
-                    memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad;
-                    break;
-            }   }
-
-            /* check for single-pass mode opportunity */
-            if (zds->fParams.frameContentSize && zds->fParams.windowSize /* skippable frame if == 0 */
-                && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) {
-                size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend-istart);
-                if (cSize <= (size_t)(iend-istart)) {
-                    /* shortcut : using single-pass mode */
-                    size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, oend-op, istart, cSize, ZSTD_getDDict(zds));
-                    if (ZSTD_isError(decompressedSize)) return decompressedSize;
-                    DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()")
-                    ip = istart + cSize;
-                    op += decompressedSize;
-                    zds->expected = 0;
-                    zds->streamStage = zdss_init;
-                    someMoreWork = 0;
-                    break;
-            }   }
-
-            /* Consume header (see ZSTDds_decodeFrameHeader) */
-            DEBUGLOG(4, "Consume header");
-            FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)));
-
-            if ((MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {  /* skippable frame */
-                zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE);
-                zds->stage = ZSTDds_skipFrame;
-            } else {
-                FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize));
-                zds->expected = ZSTD_blockHeaderSize;
-                zds->stage = ZSTDds_decodeBlockHeader;
-            }
-
-            /* control buffer memory usage */
-            DEBUGLOG(4, "Control max memory usage (%u KB <= max %u KB)",
-                        (U32)(zds->fParams.windowSize >>10),
-                        (U32)(zds->maxWindowSize >> 10) );
-            zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
-            RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize,
-                            frameParameter_windowTooLarge);
-
-            /* Adapt buffer sizes to frame header instructions */
-            {   size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */);
-                size_t const neededOutBuffSize = ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize);
-                if ((zds->inBuffSize < neededInBuffSize) || (zds->outBuffSize < neededOutBuffSize)) {
-                    size_t const bufferSize = neededInBuffSize + neededOutBuffSize;
-                    DEBUGLOG(4, "inBuff  : from %u to %u",
-                                (U32)zds->inBuffSize, (U32)neededInBuffSize);
-                    DEBUGLOG(4, "outBuff : from %u to %u",
-                                (U32)zds->outBuffSize, (U32)neededOutBuffSize);
-                    if (zds->staticSize) {  /* static DCtx */
-                        DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize);
-                        assert(zds->staticSize >= sizeof(ZSTD_DCtx));  /* controlled at init */
-                        RETURN_ERROR_IF(
-                            bufferSize > zds->staticSize - sizeof(ZSTD_DCtx),
-                            memory_allocation);
-                    } else {
-                        ZSTD_free(zds->inBuff, zds->customMem);
-                        zds->inBuffSize = 0;
-                        zds->outBuffSize = 0;
-                        zds->inBuff = (char*)ZSTD_malloc(bufferSize, zds->customMem);
-                        RETURN_ERROR_IF(zds->inBuff == NULL, memory_allocation);
-                    }
-                    zds->inBuffSize = neededInBuffSize;
-                    zds->outBuff = zds->inBuff + zds->inBuffSize;
-                    zds->outBuffSize = neededOutBuffSize;
-            }   }
-            zds->streamStage = zdss_read;
-            /* fall-through */
-
-        case zdss_read:
-            DEBUGLOG(5, "stage zdss_read");
-            {   size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds);
-                DEBUGLOG(5, "neededInSize = %u", (U32)neededInSize);
-                if (neededInSize==0) {  /* end of frame */
-                    zds->streamStage = zdss_init;
-                    someMoreWork = 0;
-                    break;
-                }
-                if ((size_t)(iend-ip) >= neededInSize) {  /* decode directly from src */
-                    int const isSkipFrame = ZSTD_isSkipFrame(zds);
-                    size_t const decodedSize = ZSTD_decompressContinue(zds,
-                        zds->outBuff + zds->outStart, (isSkipFrame ? 0 : zds->outBuffSize - zds->outStart),
-                        ip, neededInSize);
-                    if (ZSTD_isError(decodedSize)) return decodedSize;
-                    ip += neededInSize;
-                    if (!decodedSize && !isSkipFrame) break;   /* this was just a header */
-                    zds->outEnd = zds->outStart + decodedSize;
-                    zds->streamStage = zdss_flush;
-                    break;
-            }   }
-            if (ip==iend) { someMoreWork = 0; break; }   /* no more input */
-            zds->streamStage = zdss_load;
-            /* fall-through */
-
-        case zdss_load:
-            {   size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds);
-                size_t const toLoad = neededInSize - zds->inPos;
-                int const isSkipFrame = ZSTD_isSkipFrame(zds);
-                size_t loadedSize;
-                if (isSkipFrame) {
-                    loadedSize = MIN(toLoad, (size_t)(iend-ip));
-                } else {
-                    RETURN_ERROR_IF(toLoad > zds->inBuffSize - zds->inPos,
-                                    corruption_detected,
-                                    "should never happen");
-                    loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend-ip);
-                }
-                ip += loadedSize;
-                zds->inPos += loadedSize;
-                if (loadedSize < toLoad) { someMoreWork = 0; break; }   /* not enough input, wait for more */
-
-                /* decode loaded input */
-                {   size_t const decodedSize = ZSTD_decompressContinue(zds,
-                        zds->outBuff + zds->outStart, zds->outBuffSize - zds->outStart,
-                        zds->inBuff, neededInSize);
-                    if (ZSTD_isError(decodedSize)) return decodedSize;
-                    zds->inPos = 0;   /* input is consumed */
-                    if (!decodedSize && !isSkipFrame) { zds->streamStage = zdss_read; break; }   /* this was just a header */
-                    zds->outEnd = zds->outStart +  decodedSize;
-            }   }
-            zds->streamStage = zdss_flush;
-            /* fall-through */
-
-        case zdss_flush:
-            {   size_t const toFlushSize = zds->outEnd - zds->outStart;
-                size_t const flushedSize = ZSTD_limitCopy(op, oend-op, zds->outBuff + zds->outStart, toFlushSize);
-                op += flushedSize;
-                zds->outStart += flushedSize;
-                if (flushedSize == toFlushSize) {  /* flush completed */
-                    zds->streamStage = zdss_read;
-                    if ( (zds->outBuffSize < zds->fParams.frameContentSize)
-                      && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) {
-                        DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)",
-                                (int)(zds->outBuffSize - zds->outStart),
-                                (U32)zds->fParams.blockSizeMax);
-                        zds->outStart = zds->outEnd = 0;
-                    }
-                    break;
-            }   }
-            /* cannot complete flush */
-            someMoreWork = 0;
-            break;
-
-        default:
-            assert(0);    /* impossible */
-            RETURN_ERROR(GENERIC);   /* some compiler require default to do something */
-    }   }
-
-    /* result */
-    input->pos = (size_t)(ip - (const char*)(input->src));
-    output->pos = (size_t)(op - (char*)(output->dst));
-    if ((ip==istart) && (op==ostart)) {  /* no forward progress */
-        zds->noForwardProgress ++;
-        if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) {
-            RETURN_ERROR_IF(op==oend, dstSize_tooSmall);
-            RETURN_ERROR_IF(ip==iend, srcSize_wrong);
-            assert(0);
-        }
-    } else {
-        zds->noForwardProgress = 0;
-    }
-    {   size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds);
-        if (!nextSrcSizeHint) {   /* frame fully decoded */
-            if (zds->outEnd == zds->outStart) {  /* output fully flushed */
-                if (zds->hostageByte) {
-                    if (input->pos >= input->size) {
-                        /* can't release hostage (not present) */
-                        zds->streamStage = zdss_read;
-                        return 1;
-                    }
-                    input->pos++;  /* release hostage */
-                }   /* zds->hostageByte */
-                return 0;
-            }  /* zds->outEnd == zds->outStart */
-            if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */
-                input->pos--;   /* note : pos > 0, otherwise, impossible to finish reading last block */
-                zds->hostageByte=1;
-            }
-            return 1;
-        }  /* nextSrcSizeHint==0 */
-        nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds) == ZSTDnit_block);   /* preload header of next block */
-        assert(zds->inPos <= nextSrcSizeHint);
-        nextSrcSizeHint -= zds->inPos;   /* part already loaded*/
-        return nextSrcSizeHint;
-    }
-}
-
-size_t ZSTD_decompressStream_simpleArgs (
-                            ZSTD_DCtx* dctx,
-                            void* dst, size_t dstCapacity, size_t* dstPos,
-                      const void* src, size_t srcSize, size_t* srcPos)
-{
-    ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
-    ZSTD_inBuffer  input  = { src, srcSize, *srcPos };
-    /* ZSTD_compress_generic() will check validity of dstPos and srcPos */
-    size_t const cErr = ZSTD_decompressStream(dctx, &output, &input);
-    *dstPos = output.pos;
-    *srcPos = input.pos;
-    return cErr;
-}
diff --git a/vendor/github.com/DataDog/zstd/zstd_decompress_block.c b/vendor/github.com/DataDog/zstd/zstd_decompress_block.c
deleted file mode 100644
index 24f4859..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_decompress_block.c
+++ /dev/null
@@ -1,1322 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-/* zstd_decompress_block :
- * this module takes care of decompressing _compressed_ block */
-
-/*-*******************************************************
-*  Dependencies
-*********************************************************/
-#include <string.h>      /* memcpy, memmove, memset */
-#include "compiler.h"    /* prefetch */
-#include "cpu.h"         /* bmi2 */
-#include "mem.h"         /* low level memory routines */
-#define FSE_STATIC_LINKING_ONLY
-#include "fse.h"
-#define HUF_STATIC_LINKING_ONLY
-#include "huf.h"
-#include "zstd_internal.h"
-#include "zstd_decompress_internal.h"   /* ZSTD_DCtx */
-#include "zstd_ddict.h"  /* ZSTD_DDictDictContent */
-#include "zstd_decompress_block.h"
-
-/*_*******************************************************
-*  Macros
-**********************************************************/
-
-/* These two optional macros force the use one way or another of the two
- * ZSTD_decompressSequences implementations. You can't force in both directions
- * at the same time.
- */
-#if defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
-    defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
-#error "Cannot force the use of the short and the long ZSTD_decompressSequences variants!"
-#endif
-
-
-/*_*******************************************************
-*  Memory operations
-**********************************************************/
-static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
-
-
-/*-*************************************************************
- *   Block decoding
- ***************************************************************/
-
-/*! ZSTD_getcBlockSize() :
- *  Provides the size of compressed block from block header `src` */
-size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
-                          blockProperties_t* bpPtr)
-{
-    RETURN_ERROR_IF(srcSize < ZSTD_blockHeaderSize, srcSize_wrong);
-
-    {   U32 const cBlockHeader = MEM_readLE24(src);
-        U32 const cSize = cBlockHeader >> 3;
-        bpPtr->lastBlock = cBlockHeader & 1;
-        bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
-        bpPtr->origSize = cSize;   /* only useful for RLE */
-        if (bpPtr->blockType == bt_rle) return 1;
-        RETURN_ERROR_IF(bpPtr->blockType == bt_reserved, corruption_detected);
-        return cSize;
-    }
-}
-
-
-/* Hidden declaration for fullbench */
-size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
-                          const void* src, size_t srcSize);
-/*! ZSTD_decodeLiteralsBlock() :
- * @return : nb of bytes read from src (< srcSize )
- *  note : symbol not declared but exposed for fullbench */
-size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
-                          const void* src, size_t srcSize)   /* note : srcSize < BLOCKSIZE */
-{
-    RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected);
-
-    {   const BYTE* const istart = (const BYTE*) src;
-        symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
-
-        switch(litEncType)
-        {
-        case set_repeat:
-            RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted);
-            /* fall-through */
-
-        case set_compressed:
-            RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3");
-            {   size_t lhSize, litSize, litCSize;
-                U32 singleStream=0;
-                U32 const lhlCode = (istart[0] >> 2) & 3;
-                U32 const lhc = MEM_readLE32(istart);
-                size_t hufSuccess;
-                switch(lhlCode)
-                {
-                case 0: case 1: default:   /* note : default is impossible, since lhlCode into [0..3] */
-                    /* 2 - 2 - 10 - 10 */
-                    singleStream = !lhlCode;
-                    lhSize = 3;
-                    litSize  = (lhc >> 4) & 0x3FF;
-                    litCSize = (lhc >> 14) & 0x3FF;
-                    break;
-                case 2:
-                    /* 2 - 2 - 14 - 14 */
-                    lhSize = 4;
-                    litSize  = (lhc >> 4) & 0x3FFF;
-                    litCSize = lhc >> 18;
-                    break;
-                case 3:
-                    /* 2 - 2 - 18 - 18 */
-                    lhSize = 5;
-                    litSize  = (lhc >> 4) & 0x3FFFF;
-                    litCSize = (lhc >> 22) + (istart[4] << 10);
-                    break;
-                }
-                RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected);
-                RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected);
-
-                /* prefetch huffman table if cold */
-                if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) {
-                    PREFETCH_AREA(dctx->HUFptr, sizeof(dctx->entropy.hufTable));
-                }
-
-                if (litEncType==set_repeat) {
-                    if (singleStream) {
-                        hufSuccess = HUF_decompress1X_usingDTable_bmi2(
-                            dctx->litBuffer, litSize, istart+lhSize, litCSize,
-                            dctx->HUFptr, dctx->bmi2);
-                    } else {
-                        hufSuccess = HUF_decompress4X_usingDTable_bmi2(
-                            dctx->litBuffer, litSize, istart+lhSize, litCSize,
-                            dctx->HUFptr, dctx->bmi2);
-                    }
-                } else {
-                    if (singleStream) {
-#if defined(HUF_FORCE_DECOMPRESS_X2)
-                        hufSuccess = HUF_decompress1X_DCtx_wksp(
-                            dctx->entropy.hufTable, dctx->litBuffer, litSize,
-                            istart+lhSize, litCSize, dctx->workspace,
-                            sizeof(dctx->workspace));
-#else
-                        hufSuccess = HUF_decompress1X1_DCtx_wksp_bmi2(
-                            dctx->entropy.hufTable, dctx->litBuffer, litSize,
-                            istart+lhSize, litCSize, dctx->workspace,
-                            sizeof(dctx->workspace), dctx->bmi2);
-#endif
-                    } else {
-                        hufSuccess = HUF_decompress4X_hufOnly_wksp_bmi2(
-                            dctx->entropy.hufTable, dctx->litBuffer, litSize,
-                            istart+lhSize, litCSize, dctx->workspace,
-                            sizeof(dctx->workspace), dctx->bmi2);
-                    }
-                }
-
-                RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected);
-
-                dctx->litPtr = dctx->litBuffer;
-                dctx->litSize = litSize;
-                dctx->litEntropy = 1;
-                if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable;
-                memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
-                return litCSize + lhSize;
-            }
-
-        case set_basic:
-            {   size_t litSize, lhSize;
-                U32 const lhlCode = ((istart[0]) >> 2) & 3;
-                switch(lhlCode)
-                {
-                case 0: case 2: default:   /* note : default is impossible, since lhlCode into [0..3] */
-                    lhSize = 1;
-                    litSize = istart[0] >> 3;
-                    break;
-                case 1:
-                    lhSize = 2;
-                    litSize = MEM_readLE16(istart) >> 4;
-                    break;
-                case 3:
-                    lhSize = 3;
-                    litSize = MEM_readLE24(istart) >> 4;
-                    break;
-                }
-
-                if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) {  /* risk reading beyond src buffer with wildcopy */
-                    RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected);
-                    memcpy(dctx->litBuffer, istart+lhSize, litSize);
-                    dctx->litPtr = dctx->litBuffer;
-                    dctx->litSize = litSize;
-                    memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
-                    return lhSize+litSize;
-                }
-                /* direct reference into compressed stream */
-                dctx->litPtr = istart+lhSize;
-                dctx->litSize = litSize;
-                return lhSize+litSize;
-            }
-
-        case set_rle:
-            {   U32 const lhlCode = ((istart[0]) >> 2) & 3;
-                size_t litSize, lhSize;
-                switch(lhlCode)
-                {
-                case 0: case 2: default:   /* note : default is impossible, since lhlCode into [0..3] */
-                    lhSize = 1;
-                    litSize = istart[0] >> 3;
-                    break;
-                case 1:
-                    lhSize = 2;
-                    litSize = MEM_readLE16(istart) >> 4;
-                    break;
-                case 3:
-                    lhSize = 3;
-                    litSize = MEM_readLE24(istart) >> 4;
-                    RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4");
-                    break;
-                }
-                RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected);
-                memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
-                dctx->litPtr = dctx->litBuffer;
-                dctx->litSize = litSize;
-                return lhSize+1;
-            }
-        default:
-            RETURN_ERROR(corruption_detected, "impossible");
-        }
-    }
-}
-
-/* Default FSE distribution tables.
- * These are pre-calculated FSE decoding tables using default distributions as defined in specification :
- * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#default-distributions
- * They were generated programmatically with following method :
- * - start from default distributions, present in /lib/common/zstd_internal.h
- * - generate tables normally, using ZSTD_buildFSETable()
- * - printout the content of tables
- * - pretify output, report below, test with fuzzer to ensure it's correct */
-
-/* Default FSE distribution table for Literal Lengths */
-static const ZSTD_seqSymbol LL_defaultDTable[(1<<LL_DEFAULTNORMLOG)+1] = {
-     {  1,  1,  1, LL_DEFAULTNORMLOG},  /* header : fastMode, tableLog */
-     /* nextState, nbAddBits, nbBits, baseVal */
-     {  0,  0,  4,    0},  { 16,  0,  4,    0},
-     { 32,  0,  5,    1},  {  0,  0,  5,    3},
-     {  0,  0,  5,    4},  {  0,  0,  5,    6},
-     {  0,  0,  5,    7},  {  0,  0,  5,    9},
-     {  0,  0,  5,   10},  {  0,  0,  5,   12},
-     {  0,  0,  6,   14},  {  0,  1,  5,   16},
-     {  0,  1,  5,   20},  {  0,  1,  5,   22},
-     {  0,  2,  5,   28},  {  0,  3,  5,   32},
-     {  0,  4,  5,   48},  { 32,  6,  5,   64},
-     {  0,  7,  5,  128},  {  0,  8,  6,  256},
-     {  0, 10,  6, 1024},  {  0, 12,  6, 4096},
-     { 32,  0,  4,    0},  {  0,  0,  4,    1},
-     {  0,  0,  5,    2},  { 32,  0,  5,    4},
-     {  0,  0,  5,    5},  { 32,  0,  5,    7},
-     {  0,  0,  5,    8},  { 32,  0,  5,   10},
-     {  0,  0,  5,   11},  {  0,  0,  6,   13},
-     { 32,  1,  5,   16},  {  0,  1,  5,   18},
-     { 32,  1,  5,   22},  {  0,  2,  5,   24},
-     { 32,  3,  5,   32},  {  0,  3,  5,   40},
-     {  0,  6,  4,   64},  { 16,  6,  4,   64},
-     { 32,  7,  5,  128},  {  0,  9,  6,  512},
-     {  0, 11,  6, 2048},  { 48,  0,  4,    0},
-     { 16,  0,  4,    1},  { 32,  0,  5,    2},
-     { 32,  0,  5,    3},  { 32,  0,  5,    5},
-     { 32,  0,  5,    6},  { 32,  0,  5,    8},
-     { 32,  0,  5,    9},  { 32,  0,  5,   11},
-     { 32,  0,  5,   12},  {  0,  0,  6,   15},
-     { 32,  1,  5,   18},  { 32,  1,  5,   20},
-     { 32,  2,  5,   24},  { 32,  2,  5,   28},
-     { 32,  3,  5,   40},  { 32,  4,  5,   48},
-     {  0, 16,  6,65536},  {  0, 15,  6,32768},
-     {  0, 14,  6,16384},  {  0, 13,  6, 8192},
-};   /* LL_defaultDTable */
-
-/* Default FSE distribution table for Offset Codes */
-static const ZSTD_seqSymbol OF_defaultDTable[(1<<OF_DEFAULTNORMLOG)+1] = {
-    {  1,  1,  1, OF_DEFAULTNORMLOG},  /* header : fastMode, tableLog */
-    /* nextState, nbAddBits, nbBits, baseVal */
-    {  0,  0,  5,    0},     {  0,  6,  4,   61},
-    {  0,  9,  5,  509},     {  0, 15,  5,32765},
-    {  0, 21,  5,2097149},   {  0,  3,  5,    5},
-    {  0,  7,  4,  125},     {  0, 12,  5, 4093},
-    {  0, 18,  5,262141},    {  0, 23,  5,8388605},
-    {  0,  5,  5,   29},     {  0,  8,  4,  253},
-    {  0, 14,  5,16381},     {  0, 20,  5,1048573},
-    {  0,  2,  5,    1},     { 16,  7,  4,  125},
-    {  0, 11,  5, 2045},     {  0, 17,  5,131069},
-    {  0, 22,  5,4194301},   {  0,  4,  5,   13},
-    { 16,  8,  4,  253},     {  0, 13,  5, 8189},
-    {  0, 19,  5,524285},    {  0,  1,  5,    1},
-    { 16,  6,  4,   61},     {  0, 10,  5, 1021},
-    {  0, 16,  5,65533},     {  0, 28,  5,268435453},
-    {  0, 27,  5,134217725}, {  0, 26,  5,67108861},
-    {  0, 25,  5,33554429},  {  0, 24,  5,16777213},
-};   /* OF_defaultDTable */
-
-
-/* Default FSE distribution table for Match Lengths */
-static const ZSTD_seqSymbol ML_defaultDTable[(1<<ML_DEFAULTNORMLOG)+1] = {
-    {  1,  1,  1, ML_DEFAULTNORMLOG},  /* header : fastMode, tableLog */
-    /* nextState, nbAddBits, nbBits, baseVal */
-    {  0,  0,  6,    3},  {  0,  0,  4,    4},
-    { 32,  0,  5,    5},  {  0,  0,  5,    6},
-    {  0,  0,  5,    8},  {  0,  0,  5,    9},
-    {  0,  0,  5,   11},  {  0,  0,  6,   13},
-    {  0,  0,  6,   16},  {  0,  0,  6,   19},
-    {  0,  0,  6,   22},  {  0,  0,  6,   25},
-    {  0,  0,  6,   28},  {  0,  0,  6,   31},
-    {  0,  0,  6,   34},  {  0,  1,  6,   37},
-    {  0,  1,  6,   41},  {  0,  2,  6,   47},
-    {  0,  3,  6,   59},  {  0,  4,  6,   83},
-    {  0,  7,  6,  131},  {  0,  9,  6,  515},
-    { 16,  0,  4,    4},  {  0,  0,  4,    5},
-    { 32,  0,  5,    6},  {  0,  0,  5,    7},
-    { 32,  0,  5,    9},  {  0,  0,  5,   10},
-    {  0,  0,  6,   12},  {  0,  0,  6,   15},
-    {  0,  0,  6,   18},  {  0,  0,  6,   21},
-    {  0,  0,  6,   24},  {  0,  0,  6,   27},
-    {  0,  0,  6,   30},  {  0,  0,  6,   33},
-    {  0,  1,  6,   35},  {  0,  1,  6,   39},
-    {  0,  2,  6,   43},  {  0,  3,  6,   51},
-    {  0,  4,  6,   67},  {  0,  5,  6,   99},
-    {  0,  8,  6,  259},  { 32,  0,  4,    4},
-    { 48,  0,  4,    4},  { 16,  0,  4,    5},
-    { 32,  0,  5,    7},  { 32,  0,  5,    8},
-    { 32,  0,  5,   10},  { 32,  0,  5,   11},
-    {  0,  0,  6,   14},  {  0,  0,  6,   17},
-    {  0,  0,  6,   20},  {  0,  0,  6,   23},
-    {  0,  0,  6,   26},  {  0,  0,  6,   29},
-    {  0,  0,  6,   32},  {  0, 16,  6,65539},
-    {  0, 15,  6,32771},  {  0, 14,  6,16387},
-    {  0, 13,  6, 8195},  {  0, 12,  6, 4099},
-    {  0, 11,  6, 2051},  {  0, 10,  6, 1027},
-};   /* ML_defaultDTable */
-
-
-static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, U32 baseValue, U32 nbAddBits)
-{
-    void* ptr = dt;
-    ZSTD_seqSymbol_header* const DTableH = (ZSTD_seqSymbol_header*)ptr;
-    ZSTD_seqSymbol* const cell = dt + 1;
-
-    DTableH->tableLog = 0;
-    DTableH->fastMode = 0;
-
-    cell->nbBits = 0;
-    cell->nextState = 0;
-    assert(nbAddBits < 255);
-    cell->nbAdditionalBits = (BYTE)nbAddBits;
-    cell->baseValue = baseValue;
-}
-
-
-/* ZSTD_buildFSETable() :
- * generate FSE decoding table for one symbol (ll, ml or off)
- * cannot fail if input is valid =>
- * all inputs are presumed validated at this stage */
-void
-ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
-            const short* normalizedCounter, unsigned maxSymbolValue,
-            const U32* baseValue, const U32* nbAdditionalBits,
-            unsigned tableLog)
-{
-    ZSTD_seqSymbol* const tableDecode = dt+1;
-    U16 symbolNext[MaxSeq+1];
-
-    U32 const maxSV1 = maxSymbolValue + 1;
-    U32 const tableSize = 1 << tableLog;
-    U32 highThreshold = tableSize-1;
-
-    /* Sanity Checks */
-    assert(maxSymbolValue <= MaxSeq);
-    assert(tableLog <= MaxFSELog);
-
-    /* Init, lay down lowprob symbols */
-    {   ZSTD_seqSymbol_header DTableH;
-        DTableH.tableLog = tableLog;
-        DTableH.fastMode = 1;
-        {   S16 const largeLimit= (S16)(1 << (tableLog-1));
-            U32 s;
-            for (s=0; s<maxSV1; s++) {
-                if (normalizedCounter[s]==-1) {
-                    tableDecode[highThreshold--].baseValue = s;
-                    symbolNext[s] = 1;
-                } else {
-                    if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
-                    symbolNext[s] = normalizedCounter[s];
-        }   }   }
-        memcpy(dt, &DTableH, sizeof(DTableH));
-    }
-
-    /* Spread symbols */
-    {   U32 const tableMask = tableSize-1;
-        U32 const step = FSE_TABLESTEP(tableSize);
-        U32 s, position = 0;
-        for (s=0; s<maxSV1; s++) {
-            int i;
-            for (i=0; i<normalizedCounter[s]; i++) {
-                tableDecode[position].baseValue = s;
-                position = (position + step) & tableMask;
-                while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
-        }   }
-        assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
-    }
-
-    /* Build Decoding table */
-    {   U32 u;
-        for (u=0; u<tableSize; u++) {
-            U32 const symbol = tableDecode[u].baseValue;
-            U32 const nextState = symbolNext[symbol]++;
-            tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
-            tableDecode[u].nextState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
-            assert(nbAdditionalBits[symbol] < 255);
-            tableDecode[u].nbAdditionalBits = (BYTE)nbAdditionalBits[symbol];
-            tableDecode[u].baseValue = baseValue[symbol];
-    }   }
-}
-
-
-/*! ZSTD_buildSeqTable() :
- * @return : nb bytes read from src,
- *           or an error code if it fails */
-static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymbol** DTablePtr,
-                                 symbolEncodingType_e type, unsigned max, U32 maxLog,
-                                 const void* src, size_t srcSize,
-                                 const U32* baseValue, const U32* nbAdditionalBits,
-                                 const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable,
-                                 int ddictIsCold, int nbSeq)
-{
-    switch(type)
-    {
-    case set_rle :
-        RETURN_ERROR_IF(!srcSize, srcSize_wrong);
-        RETURN_ERROR_IF((*(const BYTE*)src) > max, corruption_detected);
-        {   U32 const symbol = *(const BYTE*)src;
-            U32 const baseline = baseValue[symbol];
-            U32 const nbBits = nbAdditionalBits[symbol];
-            ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits);
-        }
-        *DTablePtr = DTableSpace;
-        return 1;
-    case set_basic :
-        *DTablePtr = defaultTable;
-        return 0;
-    case set_repeat:
-        RETURN_ERROR_IF(!flagRepeatTable, corruption_detected);
-        /* prefetch FSE table if used */
-        if (ddictIsCold && (nbSeq > 24 /* heuristic */)) {
-            const void* const pStart = *DTablePtr;
-            size_t const pSize = sizeof(ZSTD_seqSymbol) * (SEQSYMBOL_TABLE_SIZE(maxLog));
-            PREFETCH_AREA(pStart, pSize);
-        }
-        return 0;
-    case set_compressed :
-        {   unsigned tableLog;
-            S16 norm[MaxSeq+1];
-            size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
-            RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected);
-            RETURN_ERROR_IF(tableLog > maxLog, corruption_detected);
-            ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog);
-            *DTablePtr = DTableSpace;
-            return headerSize;
-        }
-    default :
-        assert(0);
-        RETURN_ERROR(GENERIC, "impossible");
-    }
-}
-
-size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
-                             const void* src, size_t srcSize)
-{
-    const BYTE* const istart = (const BYTE* const)src;
-    const BYTE* const iend = istart + srcSize;
-    const BYTE* ip = istart;
-    int nbSeq;
-    DEBUGLOG(5, "ZSTD_decodeSeqHeaders");
-
-    /* check */
-    RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong);
-
-    /* SeqHead */
-    nbSeq = *ip++;
-    if (!nbSeq) {
-        *nbSeqPtr=0;
-        RETURN_ERROR_IF(srcSize != 1, srcSize_wrong);
-        return 1;
-    }
-    if (nbSeq > 0x7F) {
-        if (nbSeq == 0xFF) {
-            RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong);
-            nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
-        } else {
-            RETURN_ERROR_IF(ip >= iend, srcSize_wrong);
-            nbSeq = ((nbSeq-0x80)<<8) + *ip++;
-        }
-    }
-    *nbSeqPtr = nbSeq;
-
-    /* FSE table descriptors */
-    RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong); /* minimum possible size: 1 byte for symbol encoding types */
-    {   symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
-        symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
-        symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
-        ip++;
-
-        /* Build DTables */
-        {   size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr,
-                                                      LLtype, MaxLL, LLFSELog,
-                                                      ip, iend-ip,
-                                                      LL_base, LL_bits,
-                                                      LL_defaultDTable, dctx->fseEntropy,
-                                                      dctx->ddictIsCold, nbSeq);
-            RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected);
-            ip += llhSize;
-        }
-
-        {   size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr,
-                                                      OFtype, MaxOff, OffFSELog,
-                                                      ip, iend-ip,
-                                                      OF_base, OF_bits,
-                                                      OF_defaultDTable, dctx->fseEntropy,
-                                                      dctx->ddictIsCold, nbSeq);
-            RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected);
-            ip += ofhSize;
-        }
-
-        {   size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr,
-                                                      MLtype, MaxML, MLFSELog,
-                                                      ip, iend-ip,
-                                                      ML_base, ML_bits,
-                                                      ML_defaultDTable, dctx->fseEntropy,
-                                                      dctx->ddictIsCold, nbSeq);
-            RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected);
-            ip += mlhSize;
-        }
-    }
-
-    return ip-istart;
-}
-
-
-typedef struct {
-    size_t litLength;
-    size_t matchLength;
-    size_t offset;
-    const BYTE* match;
-} seq_t;
-
-typedef struct {
-    size_t state;
-    const ZSTD_seqSymbol* table;
-} ZSTD_fseState;
-
-typedef struct {
-    BIT_DStream_t DStream;
-    ZSTD_fseState stateLL;
-    ZSTD_fseState stateOffb;
-    ZSTD_fseState stateML;
-    size_t prevOffset[ZSTD_REP_NUM];
-    const BYTE* prefixStart;
-    const BYTE* dictEnd;
-    size_t pos;
-} seqState_t;
-
-
-/* ZSTD_execSequenceLast7():
- * exceptional case : decompress a match starting within last 7 bytes of output buffer.
- * requires more careful checks, to ensure there is no overflow.
- * performance does not matter though.
- * note : this case is supposed to be never generated "naturally" by reference encoder,
- *        since in most cases it needs at least 8 bytes to look for a match.
- *        but it's allowed by the specification. */
-FORCE_NOINLINE
-size_t ZSTD_execSequenceLast7(BYTE* op,
-                              BYTE* const oend, seq_t sequence,
-                              const BYTE** litPtr, const BYTE* const litLimit,
-                              const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
-{
-    BYTE* const oLitEnd = op + sequence.litLength;
-    size_t const sequenceLength = sequence.litLength + sequence.matchLength;
-    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
-    const BYTE* const iLitEnd = *litPtr + sequence.litLength;
-    const BYTE* match = oLitEnd - sequence.offset;
-
-    /* check */
-    RETURN_ERROR_IF(oMatchEnd>oend, dstSize_tooSmall, "last match must fit within dstBuffer");
-    RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "try to read beyond literal buffer");
-
-    /* copy literals */
-    while (op < oLitEnd) *op++ = *(*litPtr)++;
-
-    /* copy Match */
-    if (sequence.offset > (size_t)(oLitEnd - base)) {
-        /* offset beyond prefix */
-        RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - vBase),corruption_detected);
-        match = dictEnd - (base-match);
-        if (match + sequence.matchLength <= dictEnd) {
-            memmove(oLitEnd, match, sequence.matchLength);
-            return sequenceLength;
-        }
-        /* span extDict & currentPrefixSegment */
-        {   size_t const length1 = dictEnd - match;
-            memmove(oLitEnd, match, length1);
-            op = oLitEnd + length1;
-            sequence.matchLength -= length1;
-            match = base;
-    }   }
-    while (op < oMatchEnd) *op++ = *match++;
-    return sequenceLength;
-}
-
-
-HINT_INLINE
-size_t ZSTD_execSequence(BYTE* op,
-                         BYTE* const oend, seq_t sequence,
-                         const BYTE** litPtr, const BYTE* const litLimit,
-                         const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
-{
-    BYTE* const oLitEnd = op + sequence.litLength;
-    size_t const sequenceLength = sequence.litLength + sequence.matchLength;
-    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
-    BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
-    const BYTE* const iLitEnd = *litPtr + sequence.litLength;
-    const BYTE* match = oLitEnd - sequence.offset;
-
-    /* check */
-    RETURN_ERROR_IF(oMatchEnd>oend, dstSize_tooSmall, "last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend");
-    RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "over-read beyond lit buffer");
-    if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
-
-    /* copy Literals */
-    if (sequence.litLength > 8)
-        ZSTD_wildcopy_16min(op, (*litPtr), sequence.litLength, ZSTD_no_overlap);   /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
-    else
-        ZSTD_copy8(op, *litPtr);
-    op = oLitEnd;
-    *litPtr = iLitEnd;   /* update for next sequence */
-
-    /* copy Match */
-    if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
-        /* offset beyond prefix -> go into extDict */
-        RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected);
-        match = dictEnd + (match - prefixStart);
-        if (match + sequence.matchLength <= dictEnd) {
-            memmove(oLitEnd, match, sequence.matchLength);
-            return sequenceLength;
-        }
-        /* span extDict & currentPrefixSegment */
-        {   size_t const length1 = dictEnd - match;
-            memmove(oLitEnd, match, length1);
-            op = oLitEnd + length1;
-            sequence.matchLength -= length1;
-            match = prefixStart;
-            if (op > oend_w || sequence.matchLength < MINMATCH) {
-              U32 i;
-              for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
-              return sequenceLength;
-            }
-    }   }
-    /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
-
-    /* match within prefix */
-    if (sequence.offset < 8) {
-        /* close range match, overlap */
-        static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
-        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */
-        int const sub2 = dec64table[sequence.offset];
-        op[0] = match[0];
-        op[1] = match[1];
-        op[2] = match[2];
-        op[3] = match[3];
-        match += dec32table[sequence.offset];
-        ZSTD_copy4(op+4, match);
-        match -= sub2;
-    } else {
-        ZSTD_copy8(op, match);
-    }
-    op += 8; match += 8;
-
-    if (oMatchEnd > oend-(16-MINMATCH)) {
-        if (op < oend_w) {
-            ZSTD_wildcopy(op, match, oend_w - op, ZSTD_overlap_src_before_dst);
-            match += oend_w - op;
-            op = oend_w;
-        }
-        while (op < oMatchEnd) *op++ = *match++;
-    } else {
-        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst);   /* works even if matchLength < 8 */
-    }
-    return sequenceLength;
-}
-
-
-HINT_INLINE
-size_t ZSTD_execSequenceLong(BYTE* op,
-                             BYTE* const oend, seq_t sequence,
-                             const BYTE** litPtr, const BYTE* const litLimit,
-                             const BYTE* const prefixStart, const BYTE* const dictStart, const BYTE* const dictEnd)
-{
-    BYTE* const oLitEnd = op + sequence.litLength;
-    size_t const sequenceLength = sequence.litLength + sequence.matchLength;
-    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
-    BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
-    const BYTE* const iLitEnd = *litPtr + sequence.litLength;
-    const BYTE* match = sequence.match;
-
-    /* check */
-    RETURN_ERROR_IF(oMatchEnd > oend, dstSize_tooSmall, "last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend");
-    RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "over-read beyond lit buffer");
-    if (oLitEnd > oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, dictStart, dictEnd);
-
-    /* copy Literals */
-    if (sequence.litLength > 8)
-        ZSTD_wildcopy_16min(op, *litPtr, sequence.litLength, ZSTD_no_overlap);   /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
-    else
-        ZSTD_copy8(op, *litPtr);  /* note : op <= oLitEnd <= oend_w == oend - 8 */
-
-    op = oLitEnd;
-    *litPtr = iLitEnd;   /* update for next sequence */
-
-    /* copy Match */
-    if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
-        /* offset beyond prefix */
-        RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - dictStart), corruption_detected);
-        if (match + sequence.matchLength <= dictEnd) {
-            memmove(oLitEnd, match, sequence.matchLength);
-            return sequenceLength;
-        }
-        /* span extDict & currentPrefixSegment */
-        {   size_t const length1 = dictEnd - match;
-            memmove(oLitEnd, match, length1);
-            op = oLitEnd + length1;
-            sequence.matchLength -= length1;
-            match = prefixStart;
-            if (op > oend_w || sequence.matchLength < MINMATCH) {
-              U32 i;
-              for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
-              return sequenceLength;
-            }
-    }   }
-    assert(op <= oend_w);
-    assert(sequence.matchLength >= MINMATCH);
-
-    /* match within prefix */
-    if (sequence.offset < 8) {
-        /* close range match, overlap */
-        static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
-        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */
-        int const sub2 = dec64table[sequence.offset];
-        op[0] = match[0];
-        op[1] = match[1];
-        op[2] = match[2];
-        op[3] = match[3];
-        match += dec32table[sequence.offset];
-        ZSTD_copy4(op+4, match);
-        match -= sub2;
-    } else {
-        ZSTD_copy8(op, match);
-    }
-    op += 8; match += 8;
-
-    if (oMatchEnd > oend-(16-MINMATCH)) {
-        if (op < oend_w) {
-            ZSTD_wildcopy(op, match, oend_w - op, ZSTD_overlap_src_before_dst);
-            match += oend_w - op;
-            op = oend_w;
-        }
-        while (op < oMatchEnd) *op++ = *match++;
-    } else {
-        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst);   /* works even if matchLength < 8 */
-    }
-    return sequenceLength;
-}
-
-static void
-ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt)
-{
-    const void* ptr = dt;
-    const ZSTD_seqSymbol_header* const DTableH = (const ZSTD_seqSymbol_header*)ptr;
-    DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
-    DEBUGLOG(6, "ZSTD_initFseState : val=%u using %u bits",
-                (U32)DStatePtr->state, DTableH->tableLog);
-    BIT_reloadDStream(bitD);
-    DStatePtr->table = dt + 1;
-}
-
-FORCE_INLINE_TEMPLATE void
-ZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD)
-{
-    ZSTD_seqSymbol const DInfo = DStatePtr->table[DStatePtr->state];
-    U32 const nbBits = DInfo.nbBits;
-    size_t const lowBits = BIT_readBits(bitD, nbBits);
-    DStatePtr->state = DInfo.nextState + lowBits;
-}
-
-/* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum
- * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1)
- * bits before reloading. This value is the maximum number of bytes we read
- * after reloading when we are decoding long offsets.
- */
-#define LONG_OFFSETS_MAX_EXTRA_BITS_32                       \
-    (ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32       \
-        ? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32  \
-        : 0)
-
-typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e;
-
-#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
-FORCE_INLINE_TEMPLATE seq_t
-ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
-{
-    seq_t seq;
-    U32 const llBits = seqState->stateLL.table[seqState->stateLL.state].nbAdditionalBits;
-    U32 const mlBits = seqState->stateML.table[seqState->stateML.state].nbAdditionalBits;
-    U32 const ofBits = seqState->stateOffb.table[seqState->stateOffb.state].nbAdditionalBits;
-    U32 const totalBits = llBits+mlBits+ofBits;
-    U32 const llBase = seqState->stateLL.table[seqState->stateLL.state].baseValue;
-    U32 const mlBase = seqState->stateML.table[seqState->stateML.state].baseValue;
-    U32 const ofBase = seqState->stateOffb.table[seqState->stateOffb.state].baseValue;
-
-    /* sequence */
-    {   size_t offset;
-        if (!ofBits)
-            offset = 0;
-        else {
-            ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
-            ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
-            assert(ofBits <= MaxOff);
-            if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) {
-                U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed);
-                offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
-                BIT_reloadDStream(&seqState->DStream);
-                if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
-                assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32);   /* to avoid another reload */
-            } else {
-                offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/);   /* <=  (ZSTD_WINDOWLOG_MAX-1) bits */
-                if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
-            }
-        }
-
-        if (ofBits <= 1) {
-            offset += (llBase==0);
-            if (offset) {
-                size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
-                temp += !temp;   /* 0 is not valid; input is corrupted; force offset to 1 */
-                if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
-                seqState->prevOffset[1] = seqState->prevOffset[0];
-                seqState->prevOffset[0] = offset = temp;
-            } else {  /* offset == 0 */
-                offset = seqState->prevOffset[0];
-            }
-        } else {
-            seqState->prevOffset[2] = seqState->prevOffset[1];
-            seqState->prevOffset[1] = seqState->prevOffset[0];
-            seqState->prevOffset[0] = offset;
-        }
-        seq.offset = offset;
-    }
-
-    seq.matchLength = mlBase
-                    + ((mlBits>0) ? BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/) : 0);  /* <=  16 bits */
-    if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
-        BIT_reloadDStream(&seqState->DStream);
-    if (MEM_64bits() && (totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
-        BIT_reloadDStream(&seqState->DStream);
-    /* Ensure there are enough bits to read the rest of data in 64-bit mode. */
-    ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
-
-    seq.litLength = llBase
-                  + ((llBits>0) ? BIT_readBitsFast(&seqState->DStream, llBits/*>0*/) : 0);    /* <=  16 bits */
-    if (MEM_32bits())
-        BIT_reloadDStream(&seqState->DStream);
-
-    DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u",
-                (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
-
-    /* ANS state update */
-    ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream);    /* <=  9 bits */
-    ZSTD_updateFseState(&seqState->stateML, &seqState->DStream);    /* <=  9 bits */
-    if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);    /* <= 18 bits */
-    ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream);  /* <=  8 bits */
-
-    return seq;
-}
-
-FORCE_INLINE_TEMPLATE size_t
-DONT_VECTORIZE
-ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
-                               void* dst, size_t maxDstSize,
-                         const void* seqStart, size_t seqSize, int nbSeq,
-                         const ZSTD_longOffset_e isLongOffset)
-{
-    const BYTE* ip = (const BYTE*)seqStart;
-    const BYTE* const iend = ip + seqSize;
-    BYTE* const ostart = (BYTE* const)dst;
-    BYTE* const oend = ostart + maxDstSize;
-    BYTE* op = ostart;
-    const BYTE* litPtr = dctx->litPtr;
-    const BYTE* const litEnd = litPtr + dctx->litSize;
-    const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
-    const BYTE* const vBase = (const BYTE*) (dctx->virtualStart);
-    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
-    DEBUGLOG(5, "ZSTD_decompressSequences_body");
-
-    /* Regen sequences */
-    if (nbSeq) {
-        seqState_t seqState;
-        dctx->fseEntropy = 1;
-        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
-        RETURN_ERROR_IF(
-            ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
-            corruption_detected);
-        ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
-        ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
-        ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
-
-        ZSTD_STATIC_ASSERT(
-                BIT_DStream_unfinished < BIT_DStream_completed &&
-                BIT_DStream_endOfBuffer < BIT_DStream_completed &&
-                BIT_DStream_completed < BIT_DStream_overflow);
-
-        for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; ) {
-            nbSeq--;
-            {   seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
-                size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd);
-                DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
-                if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
-                op += oneSeqSize;
-        }   }
-
-        /* check if reached exact end */
-        DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq);
-        RETURN_ERROR_IF(nbSeq, corruption_detected);
-        RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected);
-        /* save reps for next block */
-        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
-    }
-
-    /* last literal segment */
-    {   size_t const lastLLSize = litEnd - litPtr;
-        RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall);
-        memcpy(op, litPtr, lastLLSize);
-        op += lastLLSize;
-    }
-
-    return op-ostart;
-}
-
-static size_t
-ZSTD_decompressSequences_default(ZSTD_DCtx* dctx,
-                                 void* dst, size_t maxDstSize,
-                           const void* seqStart, size_t seqSize, int nbSeq,
-                           const ZSTD_longOffset_e isLongOffset)
-{
-    return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
-}
-#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
-
-
-
-#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
-FORCE_INLINE_TEMPLATE seq_t
-ZSTD_decodeSequenceLong(seqState_t* seqState, ZSTD_longOffset_e const longOffsets)
-{
-    seq_t seq;
-    U32 const llBits = seqState->stateLL.table[seqState->stateLL.state].nbAdditionalBits;
-    U32 const mlBits = seqState->stateML.table[seqState->stateML.state].nbAdditionalBits;
-    U32 const ofBits = seqState->stateOffb.table[seqState->stateOffb.state].nbAdditionalBits;
-    U32 const totalBits = llBits+mlBits+ofBits;
-    U32 const llBase = seqState->stateLL.table[seqState->stateLL.state].baseValue;
-    U32 const mlBase = seqState->stateML.table[seqState->stateML.state].baseValue;
-    U32 const ofBase = seqState->stateOffb.table[seqState->stateOffb.state].baseValue;
-
-    /* sequence */
-    {   size_t offset;
-        if (!ofBits)
-            offset = 0;
-        else {
-            ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
-            ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
-            assert(ofBits <= MaxOff);
-            if (MEM_32bits() && longOffsets) {
-                U32 const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN_32-1);
-                offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
-                if (MEM_32bits() || extraBits) BIT_reloadDStream(&seqState->DStream);
-                if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
-            } else {
-                offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits);   /* <=  (ZSTD_WINDOWLOG_MAX-1) bits */
-                if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
-            }
-        }
-
-        if (ofBits <= 1) {
-            offset += (llBase==0);
-            if (offset) {
-                size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
-                temp += !temp;   /* 0 is not valid; input is corrupted; force offset to 1 */
-                if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
-                seqState->prevOffset[1] = seqState->prevOffset[0];
-                seqState->prevOffset[0] = offset = temp;
-            } else {
-                offset = seqState->prevOffset[0];
-            }
-        } else {
-            seqState->prevOffset[2] = seqState->prevOffset[1];
-            seqState->prevOffset[1] = seqState->prevOffset[0];
-            seqState->prevOffset[0] = offset;
-        }
-        seq.offset = offset;
-    }
-
-    seq.matchLength = mlBase + ((mlBits>0) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0);  /* <=  16 bits */
-    if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
-        BIT_reloadDStream(&seqState->DStream);
-    if (MEM_64bits() && (totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
-        BIT_reloadDStream(&seqState->DStream);
-    /* Verify that there is enough bits to read the rest of the data in 64-bit mode. */
-    ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
-
-    seq.litLength = llBase + ((llBits>0) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0);    /* <=  16 bits */
-    if (MEM_32bits())
-        BIT_reloadDStream(&seqState->DStream);
-
-    {   size_t const pos = seqState->pos + seq.litLength;
-        const BYTE* const matchBase = (seq.offset > pos) ? seqState->dictEnd : seqState->prefixStart;
-        seq.match = matchBase + pos - seq.offset;  /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
-                                                    * No consequence though : no memory access will occur, overly large offset will be detected in ZSTD_execSequenceLong() */
-        seqState->pos = pos + seq.matchLength;
-    }
-
-    /* ANS state update */
-    ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream);    /* <=  9 bits */
-    ZSTD_updateFseState(&seqState->stateML, &seqState->DStream);    /* <=  9 bits */
-    if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);    /* <= 18 bits */
-    ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream);  /* <=  8 bits */
-
-    return seq;
-}
-
-FORCE_INLINE_TEMPLATE size_t
-ZSTD_decompressSequencesLong_body(
-                               ZSTD_DCtx* dctx,
-                               void* dst, size_t maxDstSize,
-                         const void* seqStart, size_t seqSize, int nbSeq,
-                         const ZSTD_longOffset_e isLongOffset)
-{
-    const BYTE* ip = (const BYTE*)seqStart;
-    const BYTE* const iend = ip + seqSize;
-    BYTE* const ostart = (BYTE* const)dst;
-    BYTE* const oend = ostart + maxDstSize;
-    BYTE* op = ostart;
-    const BYTE* litPtr = dctx->litPtr;
-    const BYTE* const litEnd = litPtr + dctx->litSize;
-    const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
-    const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart);
-    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
-
-    /* Regen sequences */
-    if (nbSeq) {
-#define STORED_SEQS 4
-#define STORED_SEQS_MASK (STORED_SEQS-1)
-#define ADVANCED_SEQS 4
-        seq_t sequences[STORED_SEQS];
-        int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
-        seqState_t seqState;
-        int seqNb;
-        dctx->fseEntropy = 1;
-        { int i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
-        seqState.prefixStart = prefixStart;
-        seqState.pos = (size_t)(op-prefixStart);
-        seqState.dictEnd = dictEnd;
-        assert(iend >= ip);
-        RETURN_ERROR_IF(
-            ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
-            corruption_detected);
-        ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
-        ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
-        ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
-
-        /* prepare in advance */
-        for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNb<seqAdvance); seqNb++) {
-            sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState, isLongOffset);
-            PREFETCH_L1(sequences[seqNb].match); PREFETCH_L1(sequences[seqNb].match + sequences[seqNb].matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
-        }
-        RETURN_ERROR_IF(seqNb<seqAdvance, corruption_detected);
-
-        /* decode and decompress */
-        for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb<nbSeq) ; seqNb++) {
-            seq_t const sequence = ZSTD_decodeSequenceLong(&seqState, isLongOffset);
-            size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
-            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
-            PREFETCH_L1(sequence.match); PREFETCH_L1(sequence.match + sequence.matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
-            sequences[seqNb & STORED_SEQS_MASK] = sequence;
-            op += oneSeqSize;
-        }
-        RETURN_ERROR_IF(seqNb<nbSeq, corruption_detected);
-
-        /* finish queue */
-        seqNb -= seqAdvance;
-        for ( ; seqNb<nbSeq ; seqNb++) {
-            size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[seqNb&STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
-            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
-            op += oneSeqSize;
-        }
-
-        /* save reps for next block */
-        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
-    }
-
-    /* last literal segment */
-    {   size_t const lastLLSize = litEnd - litPtr;
-        RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall);
-        memcpy(op, litPtr, lastLLSize);
-        op += lastLLSize;
-    }
-
-    return op-ostart;
-}
-
-static size_t
-ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx,
-                                 void* dst, size_t maxDstSize,
-                           const void* seqStart, size_t seqSize, int nbSeq,
-                           const ZSTD_longOffset_e isLongOffset)
-{
-    return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
-}
-#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
-
-
-
-#if DYNAMIC_BMI2
-
-#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
-static TARGET_ATTRIBUTE("bmi2") size_t
-DONT_VECTORIZE
-ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx,
-                                 void* dst, size_t maxDstSize,
-                           const void* seqStart, size_t seqSize, int nbSeq,
-                           const ZSTD_longOffset_e isLongOffset)
-{
-    return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
-}
-#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
-
-#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
-static TARGET_ATTRIBUTE("bmi2") size_t
-ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx,
-                                 void* dst, size_t maxDstSize,
-                           const void* seqStart, size_t seqSize, int nbSeq,
-                           const ZSTD_longOffset_e isLongOffset)
-{
-    return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
-}
-#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
-
-#endif /* DYNAMIC_BMI2 */
-
-typedef size_t (*ZSTD_decompressSequences_t)(
-                            ZSTD_DCtx* dctx,
-                            void* dst, size_t maxDstSize,
-                            const void* seqStart, size_t seqSize, int nbSeq,
-                            const ZSTD_longOffset_e isLongOffset);
-
-#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
-static size_t
-ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
-                   const void* seqStart, size_t seqSize, int nbSeq,
-                   const ZSTD_longOffset_e isLongOffset)
-{
-    DEBUGLOG(5, "ZSTD_decompressSequences");
-#if DYNAMIC_BMI2
-    if (dctx->bmi2) {
-        return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
-    }
-#endif
-  return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
-}
-#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
-
-
-#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
-/* ZSTD_decompressSequencesLong() :
- * decompression function triggered when a minimum share of offsets is considered "long",
- * aka out of cache.
- * note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes meaning "farther than memory cache distance".
- * This function will try to mitigate main memory latency through the use of prefetching */
-static size_t
-ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx,
-                             void* dst, size_t maxDstSize,
-                             const void* seqStart, size_t seqSize, int nbSeq,
-                             const ZSTD_longOffset_e isLongOffset)
-{
-    DEBUGLOG(5, "ZSTD_decompressSequencesLong");
-#if DYNAMIC_BMI2
-    if (dctx->bmi2) {
-        return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
-    }
-#endif
-  return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
-}
-#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
-
-
-
-#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
-    !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
-/* ZSTD_getLongOffsetsShare() :
- * condition : offTable must be valid
- * @return : "share" of long offsets (arbitrarily defined as > (1<<23))
- *           compared to maximum possible of (1<<OffFSELog) */
-static unsigned
-ZSTD_getLongOffsetsShare(const ZSTD_seqSymbol* offTable)
-{
-    const void* ptr = offTable;
-    U32 const tableLog = ((const ZSTD_seqSymbol_header*)ptr)[0].tableLog;
-    const ZSTD_seqSymbol* table = offTable + 1;
-    U32 const max = 1 << tableLog;
-    U32 u, total = 0;
-    DEBUGLOG(5, "ZSTD_getLongOffsetsShare: (tableLog=%u)", tableLog);
-
-    assert(max <= (1 << OffFSELog));  /* max not too large */
-    for (u=0; u<max; u++) {
-        if (table[u].nbAdditionalBits > 22) total += 1;
-    }
-
-    assert(tableLog <= OffFSELog);
-    total <<= (OffFSELog - tableLog);  /* scale to OffFSELog */
-
-    return total;
-}
-#endif
-
-
-size_t
-ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
-                              void* dst, size_t dstCapacity,
-                        const void* src, size_t srcSize, const int frame)
-{   /* blockType == blockCompressed */
-    const BYTE* ip = (const BYTE*)src;
-    /* isLongOffset must be true if there are long offsets.
-     * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN.
-     * We don't expect that to be the case in 64-bit mode.
-     * In block mode, window size is not known, so we have to be conservative.
-     * (note: but it could be evaluated from current-lowLimit)
-     */
-    ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN))));
-    DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
-
-    RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong);
-
-    /* Decode literals section */
-    {   size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
-        DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize);
-        if (ZSTD_isError(litCSize)) return litCSize;
-        ip += litCSize;
-        srcSize -= litCSize;
-    }
-
-    /* Build Decoding Tables */
-    {
-        /* These macros control at build-time which decompressor implementation
-         * we use. If neither is defined, we do some inspection and dispatch at
-         * runtime.
-         */
-#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
-    !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
-        int usePrefetchDecoder = dctx->ddictIsCold;
-#endif
-        int nbSeq;
-        size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize);
-        if (ZSTD_isError(seqHSize)) return seqHSize;
-        ip += seqHSize;
-        srcSize -= seqHSize;
-
-#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
-    !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
-        if ( !usePrefetchDecoder
-          && (!frame || (dctx->fParams.windowSize > (1<<24)))
-          && (nbSeq>ADVANCED_SEQS) ) {  /* could probably use a larger nbSeq limit */
-            U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr);
-            U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */
-            usePrefetchDecoder = (shareLongOffsets >= minShare);
-        }
-#endif
-
-        dctx->ddictIsCold = 0;
-
-#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
-    !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
-        if (usePrefetchDecoder)
-#endif
-#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
-            return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);
-#endif
-
-#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
-        /* else */
-        return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);
-#endif
-    }
-}
-
-
-size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,
-                            void* dst, size_t dstCapacity,
-                      const void* src, size_t srcSize)
-{
-    size_t dSize;
-    ZSTD_checkContinuity(dctx, dst);
-    dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0);
-    dctx->previousDstEnd = (char*)dst + dSize;
-    return dSize;
-}
diff --git a/vendor/github.com/DataDog/zstd/zstd_decompress_block.h b/vendor/github.com/DataDog/zstd/zstd_decompress_block.h
deleted file mode 100644
index 7e92960..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_decompress_block.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-
-#ifndef ZSTD_DEC_BLOCK_H
-#define ZSTD_DEC_BLOCK_H
-
-/*-*******************************************************
- *  Dependencies
- *********************************************************/
-#include <stddef.h>   /* size_t */
-#include "zstd.h"    /* DCtx, and some public functions */
-#include "zstd_internal.h"  /* blockProperties_t, and some public functions */
-#include "zstd_decompress_internal.h"  /* ZSTD_seqSymbol */
-
-
-/* ===   Prototypes   === */
-
-/* note: prototypes already published within `zstd.h` :
- * ZSTD_decompressBlock()
- */
-
-/* note: prototypes already published within `zstd_internal.h` :
- * ZSTD_getcBlockSize()
- * ZSTD_decodeSeqHeaders()
- */
-
-
-/* ZSTD_decompressBlock_internal() :
- * decompress block, starting at `src`,
- * into destination buffer `dst`.
- * @return : decompressed block size,
- *           or an error code (which can be tested using ZSTD_isError())
- */
-size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
-                               void* dst, size_t dstCapacity,
-                         const void* src, size_t srcSize, const int frame);
-
-/* ZSTD_buildFSETable() :
- * generate FSE decoding table for one symbol (ll, ml or off)
- * this function must be called with valid parameters only
- * (dt is large enough, normalizedCounter distribution total is a power of 2, max is within range, etc.)
- * in which case it cannot fail.
- * Internal use only.
- */
-void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
-             const short* normalizedCounter, unsigned maxSymbolValue,
-             const U32* baseValue, const U32* nbAdditionalBits,
-                   unsigned tableLog);
-
-
-#endif /* ZSTD_DEC_BLOCK_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_decompress_internal.h b/vendor/github.com/DataDog/zstd/zstd_decompress_internal.h
deleted file mode 100644
index ccbdfa0..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_decompress_internal.h
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-
-/* zstd_decompress_internal:
- * objects and definitions shared within lib/decompress modules */
-
- #ifndef ZSTD_DECOMPRESS_INTERNAL_H
- #define ZSTD_DECOMPRESS_INTERNAL_H
-
-
-/*-*******************************************************
- *  Dependencies
- *********************************************************/
-#include "mem.h"             /* BYTE, U16, U32 */
-#include "zstd_internal.h"   /* ZSTD_seqSymbol */
-
-
-
-/*-*******************************************************
- *  Constants
- *********************************************************/
-static const U32 LL_base[MaxLL+1] = {
-                 0,    1,    2,     3,     4,     5,     6,      7,
-                 8,    9,   10,    11,    12,    13,    14,     15,
-                16,   18,   20,    22,    24,    28,    32,     40,
-                48,   64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
-                0x2000, 0x4000, 0x8000, 0x10000 };
-
-static const U32 OF_base[MaxOff+1] = {
-                 0,        1,       1,       5,     0xD,     0x1D,     0x3D,     0x7D,
-                 0xFD,   0x1FD,   0x3FD,   0x7FD,   0xFFD,   0x1FFD,   0x3FFD,   0x7FFD,
-                 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
-                 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD };
-
-static const U32 OF_bits[MaxOff+1] = {
-                     0,  1,  2,  3,  4,  5,  6,  7,
-                     8,  9, 10, 11, 12, 13, 14, 15,
-                    16, 17, 18, 19, 20, 21, 22, 23,
-                    24, 25, 26, 27, 28, 29, 30, 31 };
-
-static const U32 ML_base[MaxML+1] = {
-                     3,  4,  5,    6,     7,     8,     9,    10,
-                    11, 12, 13,   14,    15,    16,    17,    18,
-                    19, 20, 21,   22,    23,    24,    25,    26,
-                    27, 28, 29,   30,    31,    32,    33,    34,
-                    35, 37, 39,   41,    43,    47,    51,    59,
-                    67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
-                    0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };
-
-
-/*-*******************************************************
- *  Decompression types
- *********************************************************/
- typedef struct {
-     U32 fastMode;
-     U32 tableLog;
- } ZSTD_seqSymbol_header;
-
- typedef struct {
-     U16  nextState;
-     BYTE nbAdditionalBits;
-     BYTE nbBits;
-     U32  baseValue;
- } ZSTD_seqSymbol;
-
- #define SEQSYMBOL_TABLE_SIZE(log)   (1 + (1 << (log)))
-
-typedef struct {
-    ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)];    /* Note : Space reserved for FSE Tables */
-    ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)];   /* is also used as temporary workspace while building hufTable during DDict creation */
-    ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)];    /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */
-    HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)];  /* can accommodate HUF_decompress4X */
-    U32 rep[ZSTD_REP_NUM];
-} ZSTD_entropyDTables_t;
-
-typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
-               ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock,
-               ZSTDds_decompressLastBlock, ZSTDds_checkChecksum,
-               ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTD_dStage;
-
-typedef enum { zdss_init=0, zdss_loadHeader,
-               zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;
-
-typedef enum {
-    ZSTD_use_indefinitely = -1,  /* Use the dictionary indefinitely */
-    ZSTD_dont_use = 0,           /* Do not use the dictionary (if one exists free it) */
-    ZSTD_use_once = 1            /* Use the dictionary once and set to ZSTD_dont_use */
-} ZSTD_dictUses_e;
-
-struct ZSTD_DCtx_s
-{
-    const ZSTD_seqSymbol* LLTptr;
-    const ZSTD_seqSymbol* MLTptr;
-    const ZSTD_seqSymbol* OFTptr;
-    const HUF_DTable* HUFptr;
-    ZSTD_entropyDTables_t entropy;
-    U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];   /* space needed when building huffman tables */
-    const void* previousDstEnd;   /* detect continuity */
-    const void* prefixStart;      /* start of current segment */
-    const void* virtualStart;     /* virtual start of previous segment if it was just before current one */
-    const void* dictEnd;          /* end of previous segment */
-    size_t expected;
-    ZSTD_frameHeader fParams;
-    U64 decodedSize;
-    blockType_e bType;            /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */
-    ZSTD_dStage stage;
-    U32 litEntropy;
-    U32 fseEntropy;
-    XXH64_state_t xxhState;
-    size_t headerSize;
-    ZSTD_format_e format;
-    const BYTE* litPtr;
-    ZSTD_customMem customMem;
-    size_t litSize;
-    size_t rleSize;
-    size_t staticSize;
-    int bmi2;                     /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
-
-    /* dictionary */
-    ZSTD_DDict* ddictLocal;
-    const ZSTD_DDict* ddict;     /* set by ZSTD_initDStream_usingDDict(), or ZSTD_DCtx_refDDict() */
-    U32 dictID;
-    int ddictIsCold;             /* if == 1 : dictionary is "new" for working context, and presumed "cold" (not in cpu cache) */
-    ZSTD_dictUses_e dictUses;
-
-    /* streaming */
-    ZSTD_dStreamStage streamStage;
-    char*  inBuff;
-    size_t inBuffSize;
-    size_t inPos;
-    size_t maxWindowSize;
-    char*  outBuff;
-    size_t outBuffSize;
-    size_t outStart;
-    size_t outEnd;
-    size_t lhSize;
-    void* legacyContext;
-    U32 previousLegacyVersion;
-    U32 legacyVersion;
-    U32 hostageByte;
-    int noForwardProgress;
-
-    /* workspace */
-    BYTE litBuffer[ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH];
-    BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
-};  /* typedef'd to ZSTD_DCtx within "zstd.h" */
-
-
-/*-*******************************************************
- *  Shared internal functions
- *********************************************************/
-
-/*! ZSTD_loadDEntropy() :
- *  dict : must point at beginning of a valid zstd dictionary.
- * @return : size of entropy tables read */
-size_t ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
-                   const void* const dict, size_t const dictSize);
-
-/*! ZSTD_checkContinuity() :
- *  check if next `dst` follows previous position, where decompression ended.
- *  If yes, do nothing (continue on current segment).
- *  If not, classify previous segment as "external dictionary", and start a new segment.
- *  This function cannot fail. */
-void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst);
-
-
-#endif /* ZSTD_DECOMPRESS_INTERNAL_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_double_fast.c b/vendor/github.com/DataDog/zstd/zstd_double_fast.c
deleted file mode 100644
index 5957255..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_double_fast.c
+++ /dev/null
@@ -1,519 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#include "zstd_compress_internal.h"
-#include "zstd_double_fast.h"
-
-
-void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
-                              void const* end, ZSTD_dictTableLoadMethod_e dtlm)
-{
-    const ZSTD_compressionParameters* const cParams = &ms->cParams;
-    U32* const hashLarge = ms->hashTable;
-    U32  const hBitsL = cParams->hashLog;
-    U32  const mls = cParams->minMatch;
-    U32* const hashSmall = ms->chainTable;
-    U32  const hBitsS = cParams->chainLog;
-    const BYTE* const base = ms->window.base;
-    const BYTE* ip = base + ms->nextToUpdate;
-    const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
-    const U32 fastHashFillStep = 3;
-
-    /* Always insert every fastHashFillStep position into the hash tables.
-     * Insert the other positions into the large hash table if their entry
-     * is empty.
-     */
-    for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
-        U32 const current = (U32)(ip - base);
-        U32 i;
-        for (i = 0; i < fastHashFillStep; ++i) {
-            size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls);
-            size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8);
-            if (i == 0)
-                hashSmall[smHash] = current + i;
-            if (i == 0 || hashLarge[lgHash] == 0)
-                hashLarge[lgHash] = current + i;
-            /* Only load extra positions for ZSTD_dtlm_full */
-            if (dtlm == ZSTD_dtlm_fast)
-                break;
-    }   }
-}
-
-
-FORCE_INLINE_TEMPLATE
-size_t ZSTD_compressBlock_doubleFast_generic(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize,
-        U32 const mls /* template */, ZSTD_dictMode_e const dictMode)
-{
-    ZSTD_compressionParameters const* cParams = &ms->cParams;
-    U32* const hashLong = ms->hashTable;
-    const U32 hBitsL = cParams->hashLog;
-    U32* const hashSmall = ms->chainTable;
-    const U32 hBitsS = cParams->chainLog;
-    const BYTE* const base = ms->window.base;
-    const BYTE* const istart = (const BYTE*)src;
-    const BYTE* ip = istart;
-    const BYTE* anchor = istart;
-    const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
-    const U32 lowestValid = ms->window.dictLimit;
-    const U32 maxDistance = 1U << cParams->windowLog;
-    const U32 prefixLowestIndex = (endIndex - lowestValid > maxDistance) ? endIndex - maxDistance : lowestValid;
-    const BYTE* const prefixLowest = base + prefixLowestIndex;
-    const BYTE* const iend = istart + srcSize;
-    const BYTE* const ilimit = iend - HASH_READ_SIZE;
-    U32 offset_1=rep[0], offset_2=rep[1];
-    U32 offsetSaved = 0;
-
-    const ZSTD_matchState_t* const dms = ms->dictMatchState;
-    const ZSTD_compressionParameters* const dictCParams =
-                                     dictMode == ZSTD_dictMatchState ?
-                                     &dms->cParams : NULL;
-    const U32* const dictHashLong  = dictMode == ZSTD_dictMatchState ?
-                                     dms->hashTable : NULL;
-    const U32* const dictHashSmall = dictMode == ZSTD_dictMatchState ?
-                                     dms->chainTable : NULL;
-    const U32 dictStartIndex       = dictMode == ZSTD_dictMatchState ?
-                                     dms->window.dictLimit : 0;
-    const BYTE* const dictBase     = dictMode == ZSTD_dictMatchState ?
-                                     dms->window.base : NULL;
-    const BYTE* const dictStart    = dictMode == ZSTD_dictMatchState ?
-                                     dictBase + dictStartIndex : NULL;
-    const BYTE* const dictEnd      = dictMode == ZSTD_dictMatchState ?
-                                     dms->window.nextSrc : NULL;
-    const U32 dictIndexDelta       = dictMode == ZSTD_dictMatchState ?
-                                     prefixLowestIndex - (U32)(dictEnd - dictBase) :
-                                     0;
-    const U32 dictHBitsL           = dictMode == ZSTD_dictMatchState ?
-                                     dictCParams->hashLog : hBitsL;
-    const U32 dictHBitsS           = dictMode == ZSTD_dictMatchState ?
-                                     dictCParams->chainLog : hBitsS;
-    const U32 dictAndPrefixLength  = (U32)(ip - prefixLowest + dictEnd - dictStart);
-
-    DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_generic");
-
-    assert(dictMode == ZSTD_noDict || dictMode == ZSTD_dictMatchState);
-
-    /* if a dictionary is attached, it must be within window range */
-    if (dictMode == ZSTD_dictMatchState) {
-        assert(lowestValid + maxDistance >= endIndex);
-    }
-
-    /* init */
-    ip += (dictAndPrefixLength == 0);
-    if (dictMode == ZSTD_noDict) {
-        U32 const maxRep = (U32)(ip - prefixLowest);
-        if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
-        if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
-    }
-    if (dictMode == ZSTD_dictMatchState) {
-        /* dictMatchState repCode checks don't currently handle repCode == 0
-         * disabling. */
-        assert(offset_1 <= dictAndPrefixLength);
-        assert(offset_2 <= dictAndPrefixLength);
-    }
-
-    /* Main Search Loop */
-    while (ip < ilimit) {   /* < instead of <=, because repcode check at (ip+1) */
-        size_t mLength;
-        U32 offset;
-        size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
-        size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
-        size_t const dictHL = ZSTD_hashPtr(ip, dictHBitsL, 8);
-        size_t const dictHS = ZSTD_hashPtr(ip, dictHBitsS, mls);
-        U32 const current = (U32)(ip-base);
-        U32 const matchIndexL = hashLong[h2];
-        U32 matchIndexS = hashSmall[h];
-        const BYTE* matchLong = base + matchIndexL;
-        const BYTE* match = base + matchIndexS;
-        const U32 repIndex = current + 1 - offset_1;
-        const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
-                            && repIndex < prefixLowestIndex) ?
-                               dictBase + (repIndex - dictIndexDelta) :
-                               base + repIndex;
-        hashLong[h2] = hashSmall[h] = current;   /* update hash tables */
-
-        /* check dictMatchState repcode */
-        if (dictMode == ZSTD_dictMatchState
-            && ((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
-            && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
-            const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
-            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
-            ip++;
-            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
-            goto _match_stored;
-        }
-
-        /* check noDict repcode */
-        if ( dictMode == ZSTD_noDict
-          && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
-            mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
-            ip++;
-            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
-            goto _match_stored;
-        }
-
-        if (matchIndexL > prefixLowestIndex) {
-            /* check prefix long match */
-            if (MEM_read64(matchLong) == MEM_read64(ip)) {
-                mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
-                offset = (U32)(ip-matchLong);
-                while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
-                goto _match_found;
-            }
-        } else if (dictMode == ZSTD_dictMatchState) {
-            /* check dictMatchState long match */
-            U32 const dictMatchIndexL = dictHashLong[dictHL];
-            const BYTE* dictMatchL = dictBase + dictMatchIndexL;
-            assert(dictMatchL < dictEnd);
-
-            if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) {
-                mLength = ZSTD_count_2segments(ip+8, dictMatchL+8, iend, dictEnd, prefixLowest) + 8;
-                offset = (U32)(current - dictMatchIndexL - dictIndexDelta);
-                while (((ip>anchor) & (dictMatchL>dictStart)) && (ip[-1] == dictMatchL[-1])) { ip--; dictMatchL--; mLength++; } /* catch up */
-                goto _match_found;
-        }   }
-
-        if (matchIndexS > prefixLowestIndex) {
-            /* check prefix short match */
-            if (MEM_read32(match) == MEM_read32(ip)) {
-                goto _search_next_long;
-            }
-        } else if (dictMode == ZSTD_dictMatchState) {
-            /* check dictMatchState short match */
-            U32 const dictMatchIndexS = dictHashSmall[dictHS];
-            match = dictBase + dictMatchIndexS;
-            matchIndexS = dictMatchIndexS + dictIndexDelta;
-
-            if (match > dictStart && MEM_read32(match) == MEM_read32(ip)) {
-                goto _search_next_long;
-        }   }
-
-        ip += ((ip-anchor) >> kSearchStrength) + 1;
-        continue;
-
-_search_next_long:
-
-        {   size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
-            size_t const dictHLNext = ZSTD_hashPtr(ip+1, dictHBitsL, 8);
-            U32 const matchIndexL3 = hashLong[hl3];
-            const BYTE* matchL3 = base + matchIndexL3;
-            hashLong[hl3] = current + 1;
-
-            /* check prefix long +1 match */
-            if (matchIndexL3 > prefixLowestIndex) {
-                if (MEM_read64(matchL3) == MEM_read64(ip+1)) {
-                    mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
-                    ip++;
-                    offset = (U32)(ip-matchL3);
-                    while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
-                    goto _match_found;
-                }
-            } else if (dictMode == ZSTD_dictMatchState) {
-                /* check dict long +1 match */
-                U32 const dictMatchIndexL3 = dictHashLong[dictHLNext];
-                const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3;
-                assert(dictMatchL3 < dictEnd);
-                if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) {
-                    mLength = ZSTD_count_2segments(ip+1+8, dictMatchL3+8, iend, dictEnd, prefixLowest) + 8;
-                    ip++;
-                    offset = (U32)(current + 1 - dictMatchIndexL3 - dictIndexDelta);
-                    while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */
-                    goto _match_found;
-        }   }   }
-
-        /* if no long +1 match, explore the short match we found */
-        if (dictMode == ZSTD_dictMatchState && matchIndexS < prefixLowestIndex) {
-            mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4;
-            offset = (U32)(current - matchIndexS);
-            while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
-        } else {
-            mLength = ZSTD_count(ip+4, match+4, iend) + 4;
-            offset = (U32)(ip - match);
-            while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
-        }
-
-        /* fall-through */
-
-_match_found:
-        offset_2 = offset_1;
-        offset_1 = offset;
-
-        ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
-
-_match_stored:
-        /* match found */
-        ip += mLength;
-        anchor = ip;
-
-        if (ip <= ilimit) {
-            /* Complementary insertion */
-            /* done after iLimit test, as candidates could be > iend-8 */
-            {   U32 const indexToInsert = current+2;
-                hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
-                hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
-                hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
-                hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
-            }
-
-            /* check immediate repcode */
-            if (dictMode == ZSTD_dictMatchState) {
-                while (ip <= ilimit) {
-                    U32 const current2 = (U32)(ip-base);
-                    U32 const repIndex2 = current2 - offset_2;
-                    const BYTE* repMatch2 = dictMode == ZSTD_dictMatchState
-                        && repIndex2 < prefixLowestIndex ?
-                            dictBase - dictIndexDelta + repIndex2 :
-                            base + repIndex2;
-                    if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
-                       && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
-                        const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
-                        size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
-                        U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
-                        ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
-                        hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
-                        hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
-                        ip += repLength2;
-                        anchor = ip;
-                        continue;
-                    }
-                    break;
-            }   }
-
-            if (dictMode == ZSTD_noDict) {
-                while ( (ip <= ilimit)
-                     && ( (offset_2>0)
-                        & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
-                    /* store sequence */
-                    size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
-                    U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff;  /* swap offset_2 <=> offset_1 */
-                    hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
-                    hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
-                    ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
-                    ip += rLength;
-                    anchor = ip;
-                    continue;   /* faster when present ... (?) */
-        }   }   }
-    }   /* while (ip < ilimit) */
-
-    /* save reps for next block */
-    rep[0] = offset_1 ? offset_1 : offsetSaved;
-    rep[1] = offset_2 ? offset_2 : offsetSaved;
-
-    /* Return the last literals size */
-    return (size_t)(iend - anchor);
-}
-
-
-size_t ZSTD_compressBlock_doubleFast(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize)
-{
-    const U32 mls = ms->cParams.minMatch;
-    switch(mls)
-    {
-    default: /* includes case 3 */
-    case 4 :
-        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_noDict);
-    case 5 :
-        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_noDict);
-    case 6 :
-        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_noDict);
-    case 7 :
-        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_noDict);
-    }
-}
-
-
-size_t ZSTD_compressBlock_doubleFast_dictMatchState(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize)
-{
-    const U32 mls = ms->cParams.minMatch;
-    switch(mls)
-    {
-    default: /* includes case 3 */
-    case 4 :
-        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_dictMatchState);
-    case 5 :
-        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_dictMatchState);
-    case 6 :
-        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_dictMatchState);
-    case 7 :
-        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_dictMatchState);
-    }
-}
-
-
-static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize,
-        U32 const mls /* template */)
-{
-    ZSTD_compressionParameters const* cParams = &ms->cParams;
-    U32* const hashLong = ms->hashTable;
-    U32  const hBitsL = cParams->hashLog;
-    U32* const hashSmall = ms->chainTable;
-    U32  const hBitsS = cParams->chainLog;
-    const BYTE* const istart = (const BYTE*)src;
-    const BYTE* ip = istart;
-    const BYTE* anchor = istart;
-    const BYTE* const iend = istart + srcSize;
-    const BYTE* const ilimit = iend - 8;
-    const BYTE* const base = ms->window.base;
-    const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);
-    const U32   maxDistance = 1U << cParams->windowLog;
-    const U32   lowestValid = ms->window.lowLimit;
-    const U32   lowLimit = (endIndex - lowestValid > maxDistance) ? endIndex - maxDistance : lowestValid;
-    const U32   dictStartIndex = lowLimit;
-    const U32   dictLimit = ms->window.dictLimit;
-    const U32   prefixStartIndex = (dictLimit > lowLimit) ? dictLimit : lowLimit;
-    const BYTE* const prefixStart = base + prefixStartIndex;
-    const BYTE* const dictBase = ms->window.dictBase;
-    const BYTE* const dictStart = dictBase + dictStartIndex;
-    const BYTE* const dictEnd = dictBase + prefixStartIndex;
-    U32 offset_1=rep[0], offset_2=rep[1];
-
-    DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_extDict_generic (srcSize=%zu)", srcSize);
-
-    /* if extDict is invalidated due to maxDistance, switch to "regular" variant */
-    if (prefixStartIndex == dictStartIndex)
-        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, mls, ZSTD_noDict);
-
-    /* Search Loop */
-    while (ip < ilimit) {  /* < instead of <=, because (ip+1) */
-        const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
-        const U32 matchIndex = hashSmall[hSmall];
-        const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
-        const BYTE* match = matchBase + matchIndex;
-
-        const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
-        const U32 matchLongIndex = hashLong[hLong];
-        const BYTE* const matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : base;
-        const BYTE* matchLong = matchLongBase + matchLongIndex;
-
-        const U32 current = (U32)(ip-base);
-        const U32 repIndex = current + 1 - offset_1;   /* offset_1 expected <= current +1 */
-        const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
-        const BYTE* const repMatch = repBase + repIndex;
-        size_t mLength;
-        hashSmall[hSmall] = hashLong[hLong] = current;   /* update hash table */
-
-        if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */
-            & (repIndex > dictStartIndex))
-          && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
-            const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
-            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
-            ip++;
-            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
-        } else {
-            if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
-                const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend;
-                const BYTE* const lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart;
-                U32 offset;
-                mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, prefixStart) + 8;
-                offset = current - matchLongIndex;
-                while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; }   /* catch up */
-                offset_2 = offset_1;
-                offset_1 = offset;
-                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
-
-            } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) {
-                size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
-                U32 const matchIndex3 = hashLong[h3];
-                const BYTE* const match3Base = matchIndex3 < prefixStartIndex ? dictBase : base;
-                const BYTE* match3 = match3Base + matchIndex3;
-                U32 offset;
-                hashLong[h3] = current + 1;
-                if ( (matchIndex3 > dictStartIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
-                    const BYTE* const matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend;
-                    const BYTE* const lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart;
-                    mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, prefixStart) + 8;
-                    ip++;
-                    offset = current+1 - matchIndex3;
-                    while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
-                } else {
-                    const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
-                    const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
-                    mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
-                    offset = current - matchIndex;
-                    while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; }   /* catch up */
-                }
-                offset_2 = offset_1;
-                offset_1 = offset;
-                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
-
-            } else {
-                ip += ((ip-anchor) >> kSearchStrength) + 1;
-                continue;
-        }   }
-
-        /* move to next sequence start */
-        ip += mLength;
-        anchor = ip;
-
-        if (ip <= ilimit) {
-            /* Complementary insertion */
-            /* done after iLimit test, as candidates could be > iend-8 */
-            {   U32 const indexToInsert = current+2;
-                hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
-                hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
-                hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
-                hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
-            }
-
-            /* check immediate repcode */
-            while (ip <= ilimit) {
-                U32 const current2 = (U32)(ip-base);
-                U32 const repIndex2 = current2 - offset_2;
-                const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
-                if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3)   /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */
-                    & (repIndex2 > dictStartIndex))
-                  && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
-                    const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
-                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
-                    U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
-                    ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
-                    hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
-                    hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
-                    ip += repLength2;
-                    anchor = ip;
-                    continue;
-                }
-                break;
-    }   }   }
-
-    /* save reps for next block */
-    rep[0] = offset_1;
-    rep[1] = offset_2;
-
-    /* Return the last literals size */
-    return (size_t)(iend - anchor);
-}
-
-
-size_t ZSTD_compressBlock_doubleFast_extDict(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize)
-{
-    U32 const mls = ms->cParams.minMatch;
-    switch(mls)
-    {
-    default: /* includes case 3 */
-    case 4 :
-        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
-    case 5 :
-        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
-    case 6 :
-        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
-    case 7 :
-        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
-    }
-}
diff --git a/vendor/github.com/DataDog/zstd/zstd_double_fast.h b/vendor/github.com/DataDog/zstd/zstd_double_fast.h
deleted file mode 100644
index 4fa31ac..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_double_fast.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef ZSTD_DOUBLE_FAST_H
-#define ZSTD_DOUBLE_FAST_H
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-#include "mem.h"      /* U32 */
-#include "zstd_compress_internal.h"     /* ZSTD_CCtx, size_t */
-
-void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
-                              void const* end, ZSTD_dictTableLoadMethod_e dtlm);
-size_t ZSTD_compressBlock_doubleFast(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_doubleFast_dictMatchState(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_doubleFast_extDict(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize);
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* ZSTD_DOUBLE_FAST_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_errors.h b/vendor/github.com/DataDog/zstd/zstd_errors.h
deleted file mode 100644
index 92a3433..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_errors.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef ZSTD_ERRORS_H_398273423
-#define ZSTD_ERRORS_H_398273423
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/*===== dependency =====*/
-#include <stddef.h>   /* size_t */
-
-
-/* =====   ZSTDERRORLIB_API : control library symbols visibility   ===== */
-#ifndef ZSTDERRORLIB_VISIBILITY
-#  if defined(__GNUC__) && (__GNUC__ >= 4)
-#    define ZSTDERRORLIB_VISIBILITY __attribute__ ((visibility ("default")))
-#  else
-#    define ZSTDERRORLIB_VISIBILITY
-#  endif
-#endif
-#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
-#  define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBILITY
-#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
-#  define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
-#else
-#  define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY
-#endif
-
-/*-*********************************************
- *  Error codes list
- *-*********************************************
- *  Error codes _values_ are pinned down since v1.3.1 only.
- *  Therefore, don't rely on values if you may link to any version < v1.3.1.
- *
- *  Only values < 100 are considered stable.
- *
- *  note 1 : this API shall be used with static linking only.
- *           dynamic linking is not yet officially supported.
- *  note 2 : Prefer relying on the enum than on its value whenever possible
- *           This is the only supported way to use the error list < v1.3.1
- *  note 3 : ZSTD_isError() is always correct, whatever the library version.
- **********************************************/
-typedef enum {
-  ZSTD_error_no_error = 0,
-  ZSTD_error_GENERIC  = 1,
-  ZSTD_error_prefix_unknown                = 10,
-  ZSTD_error_version_unsupported           = 12,
-  ZSTD_error_frameParameter_unsupported    = 14,
-  ZSTD_error_frameParameter_windowTooLarge = 16,
-  ZSTD_error_corruption_detected = 20,
-  ZSTD_error_checksum_wrong      = 22,
-  ZSTD_error_dictionary_corrupted      = 30,
-  ZSTD_error_dictionary_wrong          = 32,
-  ZSTD_error_dictionaryCreation_failed = 34,
-  ZSTD_error_parameter_unsupported   = 40,
-  ZSTD_error_parameter_outOfBound    = 42,
-  ZSTD_error_tableLog_tooLarge       = 44,
-  ZSTD_error_maxSymbolValue_tooLarge = 46,
-  ZSTD_error_maxSymbolValue_tooSmall = 48,
-  ZSTD_error_stage_wrong       = 60,
-  ZSTD_error_init_missing      = 62,
-  ZSTD_error_memory_allocation = 64,
-  ZSTD_error_workSpace_tooSmall= 66,
-  ZSTD_error_dstSize_tooSmall = 70,
-  ZSTD_error_srcSize_wrong    = 72,
-  ZSTD_error_dstBuffer_null   = 74,
-  /* following error codes are __NOT STABLE__, they can be removed or changed in future versions */
-  ZSTD_error_frameIndex_tooLarge = 100,
-  ZSTD_error_seekableIO          = 102,
-  ZSTD_error_maxCode = 120  /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */
-} ZSTD_ErrorCode;
-
-/*! ZSTD_getErrorCode() :
-    convert a `size_t` function result into a `ZSTD_ErrorCode` enum type,
-    which can be used to compare with enum list published above */
-ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);
-ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code);   /**< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* ZSTD_ERRORS_H_398273423 */
diff --git a/vendor/github.com/DataDog/zstd/zstd_fast.c b/vendor/github.com/DataDog/zstd/zstd_fast.c
deleted file mode 100644
index a05b8a4..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_fast.c
+++ /dev/null
@@ -1,491 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#include "zstd_compress_internal.h"
-#include "zstd_fast.h"
-
-
-void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
-                        const void* const end,
-                        ZSTD_dictTableLoadMethod_e dtlm)
-{
-    const ZSTD_compressionParameters* const cParams = &ms->cParams;
-    U32* const hashTable = ms->hashTable;
-    U32  const hBits = cParams->hashLog;
-    U32  const mls = cParams->minMatch;
-    const BYTE* const base = ms->window.base;
-    const BYTE* ip = base + ms->nextToUpdate;
-    const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
-    const U32 fastHashFillStep = 3;
-
-    /* Always insert every fastHashFillStep position into the hash table.
-     * Insert the other positions if their hash entry is empty.
-     */
-    for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
-        U32 const current = (U32)(ip - base);
-        size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls);
-        hashTable[hash0] = current;
-        if (dtlm == ZSTD_dtlm_fast) continue;
-        /* Only load extra positions for ZSTD_dtlm_full */
-        {   U32 p;
-            for (p = 1; p < fastHashFillStep; ++p) {
-                size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls);
-                if (hashTable[hash] == 0) {  /* not yet filled */
-                    hashTable[hash] = current + p;
-    }   }   }   }
-}
-
-
-FORCE_INLINE_TEMPLATE
-size_t ZSTD_compressBlock_fast_generic(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize,
-        U32 const mls)
-{
-    const ZSTD_compressionParameters* const cParams = &ms->cParams;
-    U32* const hashTable = ms->hashTable;
-    U32 const hlog = cParams->hashLog;
-    /* support stepSize of 0 */
-    size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;
-    const BYTE* const base = ms->window.base;
-    const BYTE* const istart = (const BYTE*)src;
-    /* We check ip0 (ip + 0) and ip1 (ip + 1) each loop */
-    const BYTE* ip0 = istart;
-    const BYTE* ip1;
-    const BYTE* anchor = istart;
-    const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);
-    const U32   maxDistance = 1U << cParams->windowLog;
-    const U32   validStartIndex = ms->window.dictLimit;
-    const U32   prefixStartIndex = (endIndex - validStartIndex > maxDistance) ? endIndex - maxDistance : validStartIndex;
-    const BYTE* const prefixStart = base + prefixStartIndex;
-    const BYTE* const iend = istart + srcSize;
-    const BYTE* const ilimit = iend - HASH_READ_SIZE;
-    U32 offset_1=rep[0], offset_2=rep[1];
-    U32 offsetSaved = 0;
-
-    /* init */
-    ip0 += (ip0 == prefixStart);
-    ip1 = ip0 + 1;
-    {
-        U32 const maxRep = (U32)(ip0 - prefixStart);
-        if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
-        if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
-    }
-
-    /* Main Search Loop */
-    while (ip1 < ilimit) {   /* < instead of <=, because check at ip0+2 */
-        size_t mLength;
-        BYTE const* ip2 = ip0 + 2;
-        size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls);
-        U32 const val0 = MEM_read32(ip0);
-        size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls);
-        U32 const val1 = MEM_read32(ip1);
-        U32 const current0 = (U32)(ip0-base);
-        U32 const current1 = (U32)(ip1-base);
-        U32 const matchIndex0 = hashTable[h0];
-        U32 const matchIndex1 = hashTable[h1];
-        BYTE const* repMatch = ip2-offset_1;
-        const BYTE* match0 = base + matchIndex0;
-        const BYTE* match1 = base + matchIndex1;
-        U32 offcode;
-        hashTable[h0] = current0;   /* update hash table */
-        hashTable[h1] = current1;   /* update hash table */
-
-        assert(ip0 + 1 == ip1);
-
-        if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) {
-            mLength = ip2[-1] == repMatch[-1] ? 1 : 0;
-            ip0 = ip2 - mLength;
-            match0 = repMatch - mLength;
-            offcode = 0;
-            goto _match;
-        }
-        if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) {
-            /* found a regular match */
-            goto _offset;
-        }
-        if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) {
-            /* found a regular match after one literal */
-            ip0 = ip1;
-            match0 = match1;
-            goto _offset;
-        }
-        {
-            size_t const step = ((ip0-anchor) >> (kSearchStrength - 1)) + stepSize;
-            assert(step >= 2);
-            ip0 += step;
-            ip1 += step;
-            continue;
-        }
-_offset: /* Requires: ip0, match0 */
-        /* Compute the offset code */
-        offset_2 = offset_1;
-        offset_1 = (U32)(ip0-match0);
-        offcode = offset_1 + ZSTD_REP_MOVE;
-        mLength = 0;
-        /* Count the backwards match length */
-        while (((ip0>anchor) & (match0>prefixStart))
-             && (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */
-
-_match: /* Requires: ip0, match0, offcode */
-        /* Count the forward length */
-        mLength += ZSTD_count(ip0+mLength+4, match0+mLength+4, iend) + 4;
-        ZSTD_storeSeq(seqStore, ip0-anchor, anchor, offcode, mLength-MINMATCH);
-        /* match found */
-        ip0 += mLength;
-        anchor = ip0;
-        ip1 = ip0 + 1;
-
-        if (ip0 <= ilimit) {
-            /* Fill Table */
-            assert(base+current0+2 > istart);  /* check base overflow */
-            hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2;  /* here because current+2 could be > iend-8 */
-            hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
-
-            while ( (ip0 <= ilimit)
-                 && ( (offset_2>0)
-                    & (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) )) {
-                /* store sequence */
-                size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4;
-                U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff;  /* swap offset_2 <=> offset_1 */
-                hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
-                ip0 += rLength;
-                ip1 = ip0 + 1;
-                ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
-                anchor = ip0;
-                continue;   /* faster when present (confirmed on gcc-8) ... (?) */
-            }
-        }
-    }
-
-    /* save reps for next block */
-    rep[0] = offset_1 ? offset_1 : offsetSaved;
-    rep[1] = offset_2 ? offset_2 : offsetSaved;
-
-    /* Return the last literals size */
-    return (size_t)(iend - anchor);
-}
-
-
-size_t ZSTD_compressBlock_fast(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize)
-{
-    ZSTD_compressionParameters const* cParams = &ms->cParams;
-    U32 const mls = cParams->minMatch;
-    assert(ms->dictMatchState == NULL);
-    switch(mls)
-    {
-    default: /* includes case 3 */
-    case 4 :
-        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4);
-    case 5 :
-        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5);
-    case 6 :
-        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6);
-    case 7 :
-        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7);
-    }
-}
-
-FORCE_INLINE_TEMPLATE
-size_t ZSTD_compressBlock_fast_dictMatchState_generic(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize, U32 const mls)
-{
-    const ZSTD_compressionParameters* const cParams = &ms->cParams;
-    U32* const hashTable = ms->hashTable;
-    U32 const hlog = cParams->hashLog;
-    /* support stepSize of 0 */
-    U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
-    const BYTE* const base = ms->window.base;
-    const BYTE* const istart = (const BYTE*)src;
-    const BYTE* ip = istart;
-    const BYTE* anchor = istart;
-    const U32   prefixStartIndex = ms->window.dictLimit;
-    const BYTE* const prefixStart = base + prefixStartIndex;
-    const BYTE* const iend = istart + srcSize;
-    const BYTE* const ilimit = iend - HASH_READ_SIZE;
-    U32 offset_1=rep[0], offset_2=rep[1];
-    U32 offsetSaved = 0;
-
-    const ZSTD_matchState_t* const dms = ms->dictMatchState;
-    const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
-    const U32* const dictHashTable = dms->hashTable;
-    const U32 dictStartIndex       = dms->window.dictLimit;
-    const BYTE* const dictBase     = dms->window.base;
-    const BYTE* const dictStart    = dictBase + dictStartIndex;
-    const BYTE* const dictEnd      = dms->window.nextSrc;
-    const U32 dictIndexDelta       = prefixStartIndex - (U32)(dictEnd - dictBase);
-    const U32 dictAndPrefixLength  = (U32)(ip - prefixStart + dictEnd - dictStart);
-    const U32 dictHLog             = dictCParams->hashLog;
-
-    /* if a dictionary is still attached, it necessarily means that
-     * it is within window size. So we just check it. */
-    const U32 maxDistance = 1U << cParams->windowLog;
-    const U32 endIndex = (U32)((size_t)(ip - base) + srcSize);
-    assert(endIndex - prefixStartIndex <= maxDistance);
-    (void)maxDistance; (void)endIndex;   /* these variables are not used when assert() is disabled */
-
-    /* ensure there will be no no underflow
-     * when translating a dict index into a local index */
-    assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
-
-    /* init */
-    ip += (dictAndPrefixLength == 0);
-    /* dictMatchState repCode checks don't currently handle repCode == 0
-     * disabling. */
-    assert(offset_1 <= dictAndPrefixLength);
-    assert(offset_2 <= dictAndPrefixLength);
-
-    /* Main Search Loop */
-    while (ip < ilimit) {   /* < instead of <=, because repcode check at (ip+1) */
-        size_t mLength;
-        size_t const h = ZSTD_hashPtr(ip, hlog, mls);
-        U32 const current = (U32)(ip-base);
-        U32 const matchIndex = hashTable[h];
-        const BYTE* match = base + matchIndex;
-        const U32 repIndex = current + 1 - offset_1;
-        const BYTE* repMatch = (repIndex < prefixStartIndex) ?
-                               dictBase + (repIndex - dictIndexDelta) :
-                               base + repIndex;
-        hashTable[h] = current;   /* update hash table */
-
-        if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
-          && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
-            const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
-            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
-            ip++;
-            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
-        } else if ( (matchIndex <= prefixStartIndex) ) {
-            size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
-            U32 const dictMatchIndex = dictHashTable[dictHash];
-            const BYTE* dictMatch = dictBase + dictMatchIndex;
-            if (dictMatchIndex <= dictStartIndex ||
-                MEM_read32(dictMatch) != MEM_read32(ip)) {
-                assert(stepSize >= 1);
-                ip += ((ip-anchor) >> kSearchStrength) + stepSize;
-                continue;
-            } else {
-                /* found a dict match */
-                U32 const offset = (U32)(current-dictMatchIndex-dictIndexDelta);
-                mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
-                while (((ip>anchor) & (dictMatch>dictStart))
-                     && (ip[-1] == dictMatch[-1])) {
-                    ip--; dictMatch--; mLength++;
-                } /* catch up */
-                offset_2 = offset_1;
-                offset_1 = offset;
-                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
-            }
-        } else if (MEM_read32(match) != MEM_read32(ip)) {
-            /* it's not a match, and we're not going to check the dictionary */
-            assert(stepSize >= 1);
-            ip += ((ip-anchor) >> kSearchStrength) + stepSize;
-            continue;
-        } else {
-            /* found a regular match */
-            U32 const offset = (U32)(ip-match);
-            mLength = ZSTD_count(ip+4, match+4, iend) + 4;
-            while (((ip>anchor) & (match>prefixStart))
-                 && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
-            offset_2 = offset_1;
-            offset_1 = offset;
-            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
-        }
-
-        /* match found */
-        ip += mLength;
-        anchor = ip;
-
-        if (ip <= ilimit) {
-            /* Fill Table */
-            assert(base+current+2 > istart);  /* check base overflow */
-            hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2;  /* here because current+2 could be > iend-8 */
-            hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
-
-            /* check immediate repcode */
-            while (ip <= ilimit) {
-                U32 const current2 = (U32)(ip-base);
-                U32 const repIndex2 = current2 - offset_2;
-                const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
-                        dictBase - dictIndexDelta + repIndex2 :
-                        base + repIndex2;
-                if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
-                   && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
-                    const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
-                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
-                    U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
-                    ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
-                    hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
-                    ip += repLength2;
-                    anchor = ip;
-                    continue;
-                }
-                break;
-            }
-        }
-    }
-
-    /* save reps for next block */
-    rep[0] = offset_1 ? offset_1 : offsetSaved;
-    rep[1] = offset_2 ? offset_2 : offsetSaved;
-
-    /* Return the last literals size */
-    return (size_t)(iend - anchor);
-}
-
-size_t ZSTD_compressBlock_fast_dictMatchState(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize)
-{
-    ZSTD_compressionParameters const* cParams = &ms->cParams;
-    U32 const mls = cParams->minMatch;
-    assert(ms->dictMatchState != NULL);
-    switch(mls)
-    {
-    default: /* includes case 3 */
-    case 4 :
-        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4);
-    case 5 :
-        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5);
-    case 6 :
-        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6);
-    case 7 :
-        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7);
-    }
-}
-
-
-static size_t ZSTD_compressBlock_fast_extDict_generic(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize, U32 const mls)
-{
-    const ZSTD_compressionParameters* const cParams = &ms->cParams;
-    U32* const hashTable = ms->hashTable;
-    U32 const hlog = cParams->hashLog;
-    /* support stepSize of 0 */
-    U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
-    const BYTE* const base = ms->window.base;
-    const BYTE* const dictBase = ms->window.dictBase;
-    const BYTE* const istart = (const BYTE*)src;
-    const BYTE* ip = istart;
-    const BYTE* anchor = istart;
-    const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);
-    const U32   maxDistance = 1U << cParams->windowLog;
-    const U32   validLow = ms->window.lowLimit;
-    const U32   lowLimit = (endIndex - validLow > maxDistance) ? endIndex - maxDistance : validLow;
-    const U32   dictStartIndex = lowLimit;
-    const BYTE* const dictStart = dictBase + dictStartIndex;
-    const U32   dictLimit = ms->window.dictLimit;
-    const U32   prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit;
-    const BYTE* const prefixStart = base + prefixStartIndex;
-    const BYTE* const dictEnd = dictBase + prefixStartIndex;
-    const BYTE* const iend = istart + srcSize;
-    const BYTE* const ilimit = iend - 8;
-    U32 offset_1=rep[0], offset_2=rep[1];
-
-    /* switch to "regular" variant if extDict is invalidated due to maxDistance */
-    if (prefixStartIndex == dictStartIndex)
-        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, mls);
-
-    /* Search Loop */
-    while (ip < ilimit) {  /* < instead of <=, because (ip+1) */
-        const size_t h = ZSTD_hashPtr(ip, hlog, mls);
-        const U32    matchIndex = hashTable[h];
-        const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
-        const BYTE*  match = matchBase + matchIndex;
-        const U32    current = (U32)(ip-base);
-        const U32    repIndex = current + 1 - offset_1;
-        const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
-        const BYTE* const repMatch = repBase + repIndex;
-        size_t mLength;
-        hashTable[h] = current;   /* update hash table */
-        assert(offset_1 <= current +1);   /* check repIndex */
-
-        if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex))
-           && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
-            const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
-            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
-            ip++;
-            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
-        } else {
-            if ( (matchIndex < dictStartIndex) ||
-                 (MEM_read32(match) != MEM_read32(ip)) ) {
-                assert(stepSize >= 1);
-                ip += ((ip-anchor) >> kSearchStrength) + stepSize;
-                continue;
-            }
-            {   const BYTE* matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
-                const BYTE* lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
-                U32 offset;
-                mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
-                while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; }   /* catch up */
-                offset = current - matchIndex;
-                offset_2 = offset_1;
-                offset_1 = offset;
-                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
-        }   }
-
-        /* found a match : store it */
-        ip += mLength;
-        anchor = ip;
-
-        if (ip <= ilimit) {
-            /* Fill Table */
-            hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2;
-            hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
-            /* check immediate repcode */
-            while (ip <= ilimit) {
-                U32 const current2 = (U32)(ip-base);
-                U32 const repIndex2 = current2 - offset_2;
-                const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
-                if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (repIndex2 > dictStartIndex))  /* intentional overflow */
-                   && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
-                    const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
-                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
-                    U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
-                    ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
-                    hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
-                    ip += repLength2;
-                    anchor = ip;
-                    continue;
-                }
-                break;
-    }   }   }
-
-    /* save reps for next block */
-    rep[0] = offset_1;
-    rep[1] = offset_2;
-
-    /* Return the last literals size */
-    return (size_t)(iend - anchor);
-}
-
-
-size_t ZSTD_compressBlock_fast_extDict(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize)
-{
-    ZSTD_compressionParameters const* cParams = &ms->cParams;
-    U32 const mls = cParams->minMatch;
-    switch(mls)
-    {
-    default: /* includes case 3 */
-    case 4 :
-        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
-    case 5 :
-        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
-    case 6 :
-        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
-    case 7 :
-        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
-    }
-}
diff --git a/vendor/github.com/DataDog/zstd/zstd_fast.h b/vendor/github.com/DataDog/zstd/zstd_fast.h
deleted file mode 100644
index b74a88c..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_fast.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef ZSTD_FAST_H
-#define ZSTD_FAST_H
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-#include "mem.h"      /* U32 */
-#include "zstd_compress_internal.h"
-
-void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
-                        void const* end, ZSTD_dictTableLoadMethod_e dtlm);
-size_t ZSTD_compressBlock_fast(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_fast_dictMatchState(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_fast_extDict(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize);
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* ZSTD_FAST_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_internal.h b/vendor/github.com/DataDog/zstd/zstd_internal.h
deleted file mode 100644
index 81b16ea..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_internal.h
+++ /dev/null
@@ -1,371 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef ZSTD_CCOMMON_H_MODULE
-#define ZSTD_CCOMMON_H_MODULE
-
-/* this module contains definitions which must be identical
- * across compression, decompression and dictBuilder.
- * It also contains a few functions useful to at least 2 of them
- * and which benefit from being inlined */
-
-/*-*************************************
-*  Dependencies
-***************************************/
-#include "compiler.h"
-#include "mem.h"
-#include "debug.h"                 /* assert, DEBUGLOG, RAWLOG, g_debuglevel */
-#include "error_private.h"
-#define ZSTD_STATIC_LINKING_ONLY
-#include "zstd.h"
-#define FSE_STATIC_LINKING_ONLY
-#include "fse.h"
-#define HUF_STATIC_LINKING_ONLY
-#include "huf.h"
-#ifndef XXH_STATIC_LINKING_ONLY
-#  define XXH_STATIC_LINKING_ONLY  /* XXH64_state_t */
-#endif
-#include "xxhash.h"                /* XXH_reset, update, digest */
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/* ---- static assert (debug) --- */
-#define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)
-#define ZSTD_isError ERR_isError   /* for inlining */
-#define FSE_isError  ERR_isError
-#define HUF_isError  ERR_isError
-
-
-/*-*************************************
-*  shared macros
-***************************************/
-#undef MIN
-#undef MAX
-#define MIN(a,b) ((a)<(b) ? (a) : (b))
-#define MAX(a,b) ((a)>(b) ? (a) : (b))
-
-/**
- * Return the specified error if the condition evaluates to true.
- *
- * In debug modes, prints additional information. In order to do that
- * (particularly, printing the conditional that failed), this can't just wrap
- * RETURN_ERROR().
- */
-#define RETURN_ERROR_IF(cond, err, ...) \
-  if (cond) { \
-    RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", __FILE__, __LINE__, ZSTD_QUOTE(cond), ZSTD_QUOTE(ERROR(err))); \
-    RAWLOG(3, ": " __VA_ARGS__); \
-    RAWLOG(3, "\n"); \
-    return ERROR(err); \
-  }
-
-/**
- * Unconditionally return the specified error.
- *
- * In debug modes, prints additional information.
- */
-#define RETURN_ERROR(err, ...) \
-  do { \
-    RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", __FILE__, __LINE__, ZSTD_QUOTE(ERROR(err))); \
-    RAWLOG(3, ": " __VA_ARGS__); \
-    RAWLOG(3, "\n"); \
-    return ERROR(err); \
-  } while(0);
-
-/**
- * If the provided expression evaluates to an error code, returns that error code.
- *
- * In debug modes, prints additional information.
- */
-#define FORWARD_IF_ERROR(err, ...) \
-  do { \
-    size_t const err_code = (err); \
-    if (ERR_isError(err_code)) { \
-      RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", __FILE__, __LINE__, ZSTD_QUOTE(err), ERR_getErrorName(err_code)); \
-      RAWLOG(3, ": " __VA_ARGS__); \
-      RAWLOG(3, "\n"); \
-      return err_code; \
-    } \
-  } while(0);
-
-
-/*-*************************************
-*  Common constants
-***************************************/
-#define ZSTD_OPT_NUM    (1<<12)
-
-#define ZSTD_REP_NUM      3                 /* number of repcodes */
-#define ZSTD_REP_MOVE     (ZSTD_REP_NUM-1)
-static const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
-
-#define KB *(1 <<10)
-#define MB *(1 <<20)
-#define GB *(1U<<30)
-
-#define BIT7 128
-#define BIT6  64
-#define BIT5  32
-#define BIT4  16
-#define BIT1   2
-#define BIT0   1
-
-#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
-static const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
-static const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
-
-#define ZSTD_FRAMEIDSIZE 4   /* magic number size */
-
-#define ZSTD_BLOCKHEADERSIZE 3   /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
-static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
-typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
-
-#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
-#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */)   /* for a non-null block */
-
-#define HufLog 12
-typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
-
-#define LONGNBSEQ 0x7F00
-
-#define MINMATCH 3
-
-#define Litbits  8
-#define MaxLit ((1<<Litbits) - 1)
-#define MaxML   52
-#define MaxLL   35
-#define DefaultMaxOff 28
-#define MaxOff  31
-#define MaxSeq MAX(MaxLL, MaxML)   /* Assumption : MaxOff < MaxLL,MaxML */
-#define MLFSELog    9
-#define LLFSELog    9
-#define OffFSELog   8
-#define MaxFSELog  MAX(MAX(MLFSELog, LLFSELog), OffFSELog)
-
-static const U32 LL_bits[MaxLL+1] = { 0, 0, 0, 0, 0, 0, 0, 0,
-                                      0, 0, 0, 0, 0, 0, 0, 0,
-                                      1, 1, 1, 1, 2, 2, 3, 3,
-                                      4, 6, 7, 8, 9,10,11,12,
-                                     13,14,15,16 };
-static const S16 LL_defaultNorm[MaxLL+1] = { 4, 3, 2, 2, 2, 2, 2, 2,
-                                             2, 2, 2, 2, 2, 1, 1, 1,
-                                             2, 2, 2, 2, 2, 2, 2, 2,
-                                             2, 3, 2, 1, 1, 1, 1, 1,
-                                            -1,-1,-1,-1 };
-#define LL_DEFAULTNORMLOG 6  /* for static allocation */
-static const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
-
-static const U32 ML_bits[MaxML+1] = { 0, 0, 0, 0, 0, 0, 0, 0,
-                                      0, 0, 0, 0, 0, 0, 0, 0,
-                                      0, 0, 0, 0, 0, 0, 0, 0,
-                                      0, 0, 0, 0, 0, 0, 0, 0,
-                                      1, 1, 1, 1, 2, 2, 3, 3,
-                                      4, 4, 5, 7, 8, 9,10,11,
-                                     12,13,14,15,16 };
-static const S16 ML_defaultNorm[MaxML+1] = { 1, 4, 3, 2, 2, 2, 2, 2,
-                                             2, 1, 1, 1, 1, 1, 1, 1,
-                                             1, 1, 1, 1, 1, 1, 1, 1,
-                                             1, 1, 1, 1, 1, 1, 1, 1,
-                                             1, 1, 1, 1, 1, 1, 1, 1,
-                                             1, 1, 1, 1, 1, 1,-1,-1,
-                                            -1,-1,-1,-1,-1 };
-#define ML_DEFAULTNORMLOG 6  /* for static allocation */
-static const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
-
-static const S16 OF_defaultNorm[DefaultMaxOff+1] = { 1, 1, 1, 1, 1, 1, 2, 2,
-                                                     2, 1, 1, 1, 1, 1, 1, 1,
-                                                     1, 1, 1, 1, 1, 1, 1, 1,
-                                                    -1,-1,-1,-1,-1 };
-#define OF_DEFAULTNORMLOG 5  /* for static allocation */
-static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
-
-
-/*-*******************************************
-*  Shared functions to include for inlining
-*********************************************/
-static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
-
-#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
-static void ZSTD_copy16(void* dst, const void* src) { memcpy(dst, src, 16); }
-#define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; }
-
-#define WILDCOPY_OVERLENGTH 8
-#define VECLEN 16
-
-typedef enum {
-    ZSTD_no_overlap,
-    ZSTD_overlap_src_before_dst,
-    /*  ZSTD_overlap_dst_before_src, */
-} ZSTD_overlap_e;
-
-/*! ZSTD_wildcopy() :
- *  custom version of memcpy(), can overwrite up to WILDCOPY_OVERLENGTH bytes (if length==0) */
-MEM_STATIC FORCE_INLINE_ATTR DONT_VECTORIZE
-void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e ovtype)
-{
-    ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
-    const BYTE* ip = (const BYTE*)src;
-    BYTE* op = (BYTE*)dst;
-    BYTE* const oend = op + length;
-
-    assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff < -8));
-    if (length < VECLEN || (ovtype == ZSTD_overlap_src_before_dst && diff < VECLEN)) {
-      do
-          COPY8(op, ip)
-      while (op < oend);
-    }
-    else {
-      if ((length & 8) == 0)
-        COPY8(op, ip);
-      do {
-        COPY16(op, ip);
-      }
-      while (op < oend);
-    }
-}
-
-/*! ZSTD_wildcopy_16min() :
- *  same semantics as ZSTD_wilcopy() except guaranteed to be able to copy 16 bytes at the start */
-MEM_STATIC FORCE_INLINE_ATTR DONT_VECTORIZE
-void ZSTD_wildcopy_16min(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e ovtype)
-{
-    ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
-    const BYTE* ip = (const BYTE*)src;
-    BYTE* op = (BYTE*)dst;
-    BYTE* const oend = op + length;
-
-    assert(length >= 8);
-    assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff < -8));
-
-    if (ovtype == ZSTD_overlap_src_before_dst && diff < VECLEN) {
-      do
-          COPY8(op, ip)
-      while (op < oend);
-    }
-    else {
-      if ((length & 8) == 0)
-        COPY8(op, ip);
-      do {
-        COPY16(op, ip);
-      }
-      while (op < oend);
-    }
-}
-
-MEM_STATIC void ZSTD_wildcopy_e(void* dst, const void* src, void* dstEnd)   /* should be faster for decoding, but strangely, not verified on all platform */
-{
-    const BYTE* ip = (const BYTE*)src;
-    BYTE* op = (BYTE*)dst;
-    BYTE* const oend = (BYTE*)dstEnd;
-    do
-        COPY8(op, ip)
-    while (op < oend);
-}
-
-
-/*-*******************************************
-*  Private declarations
-*********************************************/
-typedef struct seqDef_s {
-    U32 offset;
-    U16 litLength;
-    U16 matchLength;
-} seqDef;
-
-typedef struct {
-    seqDef* sequencesStart;
-    seqDef* sequences;
-    BYTE* litStart;
-    BYTE* lit;
-    BYTE* llCode;
-    BYTE* mlCode;
-    BYTE* ofCode;
-    size_t maxNbSeq;
-    size_t maxNbLit;
-    U32   longLengthID;   /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
-    U32   longLengthPos;
-} seqStore_t;
-
-/**
- * Contains the compressed frame size and an upper-bound for the decompressed frame size.
- * Note: before using `compressedSize`, check for errors using ZSTD_isError().
- *       similarly, before using `decompressedBound`, check for errors using:
- *          `decompressedBound != ZSTD_CONTENTSIZE_ERROR`
- */
-typedef struct {
-    size_t compressedSize;
-    unsigned long long decompressedBound;
-} ZSTD_frameSizeInfo;   /* decompress & legacy */
-
-const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx);   /* compress & dictBuilder */
-void ZSTD_seqToCodes(const seqStore_t* seqStorePtr);   /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
-
-/* custom memory allocation functions */
-void* ZSTD_malloc(size_t size, ZSTD_customMem customMem);
-void* ZSTD_calloc(size_t size, ZSTD_customMem customMem);
-void ZSTD_free(void* ptr, ZSTD_customMem customMem);
-
-
-MEM_STATIC U32 ZSTD_highbit32(U32 val)   /* compress, dictBuilder, decodeCorpus */
-{
-    assert(val != 0);
-    {
-#   if defined(_MSC_VER)   /* Visual */
-        unsigned long r=0;
-        _BitScanReverse(&r, val);
-        return (unsigned)r;
-#   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* GCC Intrinsic */
-        return 31 - __builtin_clz(val);
-#   else   /* Software version */
-        static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
-        U32 v = val;
-        v |= v >> 1;
-        v |= v >> 2;
-        v |= v >> 4;
-        v |= v >> 8;
-        v |= v >> 16;
-        return DeBruijnClz[(v * 0x07C4ACDDU) >> 27];
-#   endif
-    }
-}
-
-
-/* ZSTD_invalidateRepCodes() :
- * ensures next compression will not use repcodes from previous block.
- * Note : only works with regular variant;
- *        do not use with extDict variant ! */
-void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx);   /* zstdmt, adaptive_compression (shouldn't get this definition from here) */
-
-
-typedef struct {
-    blockType_e blockType;
-    U32 lastBlock;
-    U32 origSize;
-} blockProperties_t;   /* declared here for decompress and fullbench */
-
-/*! ZSTD_getcBlockSize() :
- *  Provides the size of compressed block from block header `src` */
-/* Used by: decompress, fullbench (does not get its definition from here) */
-size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
-                          blockProperties_t* bpPtr);
-
-/*! ZSTD_decodeSeqHeaders() :
- *  decode sequence header from src */
-/* Used by: decompress, fullbench (does not get its definition from here) */
-size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
-                       const void* src, size_t srcSize);
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif   /* ZSTD_CCOMMON_H_MODULE */
diff --git a/vendor/github.com/DataDog/zstd/zstd_lazy.c b/vendor/github.com/DataDog/zstd/zstd_lazy.c
deleted file mode 100644
index 94d906c..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_lazy.c
+++ /dev/null
@@ -1,1111 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#include "zstd_compress_internal.h"
-#include "zstd_lazy.h"
-
-
-/*-*************************************
-*  Binary Tree search
-***************************************/
-
-static void
-ZSTD_updateDUBT(ZSTD_matchState_t* ms,
-                const BYTE* ip, const BYTE* iend,
-                U32 mls)
-{
-    const ZSTD_compressionParameters* const cParams = &ms->cParams;
-    U32* const hashTable = ms->hashTable;
-    U32  const hashLog = cParams->hashLog;
-
-    U32* const bt = ms->chainTable;
-    U32  const btLog  = cParams->chainLog - 1;
-    U32  const btMask = (1 << btLog) - 1;
-
-    const BYTE* const base = ms->window.base;
-    U32 const target = (U32)(ip - base);
-    U32 idx = ms->nextToUpdate;
-
-    if (idx != target)
-        DEBUGLOG(7, "ZSTD_updateDUBT, from %u to %u (dictLimit:%u)",
-                    idx, target, ms->window.dictLimit);
-    assert(ip + 8 <= iend);   /* condition for ZSTD_hashPtr */
-    (void)iend;
-
-    assert(idx >= ms->window.dictLimit);   /* condition for valid base+idx */
-    for ( ; idx < target ; idx++) {
-        size_t const h  = ZSTD_hashPtr(base + idx, hashLog, mls);   /* assumption : ip + 8 <= iend */
-        U32    const matchIndex = hashTable[h];
-
-        U32*   const nextCandidatePtr = bt + 2*(idx&btMask);
-        U32*   const sortMarkPtr  = nextCandidatePtr + 1;
-
-        DEBUGLOG(8, "ZSTD_updateDUBT: insert %u", idx);
-        hashTable[h] = idx;   /* Update Hash Table */
-        *nextCandidatePtr = matchIndex;   /* update BT like a chain */
-        *sortMarkPtr = ZSTD_DUBT_UNSORTED_MARK;
-    }
-    ms->nextToUpdate = target;
-}
-
-
-/** ZSTD_insertDUBT1() :
- *  sort one already inserted but unsorted position
- *  assumption : current >= btlow == (current - btmask)
- *  doesn't fail */
-static void
-ZSTD_insertDUBT1(ZSTD_matchState_t* ms,
-                 U32 current, const BYTE* inputEnd,
-                 U32 nbCompares, U32 btLow,
-                 const ZSTD_dictMode_e dictMode)
-{
-    const ZSTD_compressionParameters* const cParams = &ms->cParams;
-    U32* const bt = ms->chainTable;
-    U32  const btLog  = cParams->chainLog - 1;
-    U32  const btMask = (1 << btLog) - 1;
-    size_t commonLengthSmaller=0, commonLengthLarger=0;
-    const BYTE* const base = ms->window.base;
-    const BYTE* const dictBase = ms->window.dictBase;
-    const U32 dictLimit = ms->window.dictLimit;
-    const BYTE* const ip = (current>=dictLimit) ? base + current : dictBase + current;
-    const BYTE* const iend = (current>=dictLimit) ? inputEnd : dictBase + dictLimit;
-    const BYTE* const dictEnd = dictBase + dictLimit;
-    const BYTE* const prefixStart = base + dictLimit;
-    const BYTE* match;
-    U32* smallerPtr = bt + 2*(current&btMask);
-    U32* largerPtr  = smallerPtr + 1;
-    U32 matchIndex = *smallerPtr;   /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */
-    U32 dummy32;   /* to be nullified at the end */
-    U32 const windowValid = ms->window.lowLimit;
-    U32 const maxDistance = 1U << cParams->windowLog;
-    U32 const windowLow = (current - windowValid > maxDistance) ? current - maxDistance : windowValid;
-
-
-    DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)",
-                current, dictLimit, windowLow);
-    assert(current >= btLow);
-    assert(ip < iend);   /* condition for ZSTD_count */
-
-    while (nbCompares-- && (matchIndex > windowLow)) {
-        U32* const nextPtr = bt + 2*(matchIndex & btMask);
-        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
-        assert(matchIndex < current);
-        /* note : all candidates are now supposed sorted,
-         * but it's still possible to have nextPtr[1] == ZSTD_DUBT_UNSORTED_MARK
-         * when a real index has the same value as ZSTD_DUBT_UNSORTED_MARK */
-
-        if ( (dictMode != ZSTD_extDict)
-          || (matchIndex+matchLength >= dictLimit)  /* both in current segment*/
-          || (current < dictLimit) /* both in extDict */) {
-            const BYTE* const mBase = ( (dictMode != ZSTD_extDict)
-                                     || (matchIndex+matchLength >= dictLimit)) ?
-                                        base : dictBase;
-            assert( (matchIndex+matchLength >= dictLimit)   /* might be wrong if extDict is incorrectly set to 0 */
-                 || (current < dictLimit) );
-            match = mBase + matchIndex;
-            matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
-        } else {
-            match = dictBase + matchIndex;
-            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
-            if (matchIndex+matchLength >= dictLimit)
-                match = base + matchIndex;   /* preparation for next read of match[matchLength] */
-        }
-
-        DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ",
-                    current, matchIndex, (U32)matchLength);
-
-        if (ip+matchLength == iend) {   /* equal : no way to know if inf or sup */
-            break;   /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
-        }
-
-        if (match[matchLength] < ip[matchLength]) {  /* necessarily within buffer */
-            /* match is smaller than current */
-            *smallerPtr = matchIndex;             /* update smaller idx */
-            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
-            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop searching */
-            DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is smaller : next => %u",
-                        matchIndex, btLow, nextPtr[1]);
-            smallerPtr = nextPtr+1;               /* new "candidate" => larger than match, which was smaller than target */
-            matchIndex = nextPtr[1];              /* new matchIndex, larger than previous and closer to current */
-        } else {
-            /* match is larger than current */
-            *largerPtr = matchIndex;
-            commonLengthLarger = matchLength;
-            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop searching */
-            DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is larger => %u",
-                        matchIndex, btLow, nextPtr[0]);
-            largerPtr = nextPtr;
-            matchIndex = nextPtr[0];
-    }   }
-
-    *smallerPtr = *largerPtr = 0;
-}
-
-
-static size_t
-ZSTD_DUBT_findBetterDictMatch (
-        ZSTD_matchState_t* ms,
-        const BYTE* const ip, const BYTE* const iend,
-        size_t* offsetPtr,
-        size_t bestLength,
-        U32 nbCompares,
-        U32 const mls,
-        const ZSTD_dictMode_e dictMode)
-{
-    const ZSTD_matchState_t * const dms = ms->dictMatchState;
-    const ZSTD_compressionParameters* const dmsCParams = &dms->cParams;
-    const U32 * const dictHashTable = dms->hashTable;
-    U32         const hashLog = dmsCParams->hashLog;
-    size_t      const h  = ZSTD_hashPtr(ip, hashLog, mls);
-    U32               dictMatchIndex = dictHashTable[h];
-
-    const BYTE* const base = ms->window.base;
-    const BYTE* const prefixStart = base + ms->window.dictLimit;
-    U32         const current = (U32)(ip-base);
-    const BYTE* const dictBase = dms->window.base;
-    const BYTE* const dictEnd = dms->window.nextSrc;
-    U32         const dictHighLimit = (U32)(dms->window.nextSrc - dms->window.base);
-    U32         const dictLowLimit = dms->window.lowLimit;
-    U32         const dictIndexDelta = ms->window.lowLimit - dictHighLimit;
-
-    U32*        const dictBt = dms->chainTable;
-    U32         const btLog  = dmsCParams->chainLog - 1;
-    U32         const btMask = (1 << btLog) - 1;
-    U32         const btLow = (btMask >= dictHighLimit - dictLowLimit) ? dictLowLimit : dictHighLimit - btMask;
-
-    size_t commonLengthSmaller=0, commonLengthLarger=0;
-
-    (void)dictMode;
-    assert(dictMode == ZSTD_dictMatchState);
-
-    while (nbCompares-- && (dictMatchIndex > dictLowLimit)) {
-        U32* const nextPtr = dictBt + 2*(dictMatchIndex & btMask);
-        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
-        const BYTE* match = dictBase + dictMatchIndex;
-        matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
-        if (dictMatchIndex+matchLength >= dictHighLimit)
-            match = base + dictMatchIndex + dictIndexDelta;   /* to prepare for next usage of match[matchLength] */
-
-        if (matchLength > bestLength) {
-            U32 matchIndex = dictMatchIndex + dictIndexDelta;
-            if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) {
-                DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)",
-                    current, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, ZSTD_REP_MOVE + current - matchIndex, dictMatchIndex, matchIndex);
-                bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
-            }
-            if (ip+matchLength == iend) {   /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */
-                break;   /* drop, to guarantee consistency (miss a little bit of compression) */
-            }
-        }
-
-        if (match[matchLength] < ip[matchLength]) {
-            if (dictMatchIndex <= btLow) { break; }   /* beyond tree size, stop the search */
-            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
-            dictMatchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
-        } else {
-            /* match is larger than current */
-            if (dictMatchIndex <= btLow) { break; }   /* beyond tree size, stop the search */
-            commonLengthLarger = matchLength;
-            dictMatchIndex = nextPtr[0];
-        }
-    }
-
-    if (bestLength >= MINMATCH) {
-        U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
-        DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
-                    current, (U32)bestLength, (U32)*offsetPtr, mIndex);
-    }
-    return bestLength;
-
-}
-
-
-static size_t
-ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
-                        const BYTE* const ip, const BYTE* const iend,
-                        size_t* offsetPtr,
-                        U32 const mls,
-                        const ZSTD_dictMode_e dictMode)
-{
-    const ZSTD_compressionParameters* const cParams = &ms->cParams;
-    U32*   const hashTable = ms->hashTable;
-    U32    const hashLog = cParams->hashLog;
-    size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);
-    U32          matchIndex  = hashTable[h];
-
-    const BYTE* const base = ms->window.base;
-    U32    const current = (U32)(ip-base);
-    U32    const maxDistance = 1U << cParams->windowLog;
-    U32    const windowValid = ms->window.lowLimit;
-    U32    const windowLow = (current - windowValid > maxDistance) ? current - maxDistance : windowValid;
-
-    U32*   const bt = ms->chainTable;
-    U32    const btLog  = cParams->chainLog - 1;
-    U32    const btMask = (1 << btLog) - 1;
-    U32    const btLow = (btMask >= current) ? 0 : current - btMask;
-    U32    const unsortLimit = MAX(btLow, windowLow);
-
-    U32*         nextCandidate = bt + 2*(matchIndex&btMask);
-    U32*         unsortedMark = bt + 2*(matchIndex&btMask) + 1;
-    U32          nbCompares = 1U << cParams->searchLog;
-    U32          nbCandidates = nbCompares;
-    U32          previousCandidate = 0;
-
-    DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", current);
-    assert(ip <= iend-8);   /* required for h calculation */
-
-    /* reach end of unsorted candidates list */
-    while ( (matchIndex > unsortLimit)
-         && (*unsortedMark == ZSTD_DUBT_UNSORTED_MARK)
-         && (nbCandidates > 1) ) {
-        DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted",
-                    matchIndex);
-        *unsortedMark = previousCandidate;  /* the unsortedMark becomes a reversed chain, to move up back to original position */
-        previousCandidate = matchIndex;
-        matchIndex = *nextCandidate;
-        nextCandidate = bt + 2*(matchIndex&btMask);
-        unsortedMark = bt + 2*(matchIndex&btMask) + 1;
-        nbCandidates --;
-    }
-
-    /* nullify last candidate if it's still unsorted
-     * simplification, detrimental to compression ratio, beneficial for speed */
-    if ( (matchIndex > unsortLimit)
-      && (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) {
-        DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u",
-                    matchIndex);
-        *nextCandidate = *unsortedMark = 0;
-    }
-
-    /* batch sort stacked candidates */
-    matchIndex = previousCandidate;
-    while (matchIndex) {  /* will end on matchIndex == 0 */
-        U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1;
-        U32 const nextCandidateIdx = *nextCandidateIdxPtr;
-        ZSTD_insertDUBT1(ms, matchIndex, iend,
-                         nbCandidates, unsortLimit, dictMode);
-        matchIndex = nextCandidateIdx;
-        nbCandidates++;
-    }
-
-    /* find longest match */
-    {   size_t commonLengthSmaller = 0, commonLengthLarger = 0;
-        const BYTE* const dictBase = ms->window.dictBase;
-        const U32 dictLimit = ms->window.dictLimit;
-        const BYTE* const dictEnd = dictBase + dictLimit;
-        const BYTE* const prefixStart = base + dictLimit;
-        U32* smallerPtr = bt + 2*(current&btMask);
-        U32* largerPtr  = bt + 2*(current&btMask) + 1;
-        U32 matchEndIdx = current + 8 + 1;
-        U32 dummy32;   /* to be nullified at the end */
-        size_t bestLength = 0;
-
-        matchIndex  = hashTable[h];
-        hashTable[h] = current;   /* Update Hash Table */
-
-        while (nbCompares-- && (matchIndex > windowLow)) {
-            U32* const nextPtr = bt + 2*(matchIndex & btMask);
-            size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
-            const BYTE* match;
-
-            if ((dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit)) {
-                match = base + matchIndex;
-                matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
-            } else {
-                match = dictBase + matchIndex;
-                matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
-                if (matchIndex+matchLength >= dictLimit)
-                    match = base + matchIndex;   /* to prepare for next usage of match[matchLength] */
-            }
-
-            if (matchLength > bestLength) {
-                if (matchLength > matchEndIdx - matchIndex)
-                    matchEndIdx = matchIndex + (U32)matchLength;
-                if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
-                    bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
-                if (ip+matchLength == iend) {   /* equal : no way to know if inf or sup */
-                    if (dictMode == ZSTD_dictMatchState) {
-                        nbCompares = 0; /* in addition to avoiding checking any
-                                         * further in this loop, make sure we
-                                         * skip checking in the dictionary. */
-                    }
-                    break;   /* drop, to guarantee consistency (miss a little bit of compression) */
-                }
-            }
-
-            if (match[matchLength] < ip[matchLength]) {
-                /* match is smaller than current */
-                *smallerPtr = matchIndex;             /* update smaller idx */
-                commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
-                if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
-                smallerPtr = nextPtr+1;               /* new "smaller" => larger of match */
-                matchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
-            } else {
-                /* match is larger than current */
-                *largerPtr = matchIndex;
-                commonLengthLarger = matchLength;
-                if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
-                largerPtr = nextPtr;
-                matchIndex = nextPtr[0];
-        }   }
-
-        *smallerPtr = *largerPtr = 0;
-
-        if (dictMode == ZSTD_dictMatchState && nbCompares) {
-            bestLength = ZSTD_DUBT_findBetterDictMatch(
-                    ms, ip, iend,
-                    offsetPtr, bestLength, nbCompares,
-                    mls, dictMode);
-        }
-
-        assert(matchEndIdx > current+8); /* ensure nextToUpdate is increased */
-        ms->nextToUpdate = matchEndIdx - 8;   /* skip repetitive patterns */
-        if (bestLength >= MINMATCH) {
-            U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
-            DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
-                        current, (U32)bestLength, (U32)*offsetPtr, mIndex);
-        }
-        return bestLength;
-    }
-}
-
-
-/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
-FORCE_INLINE_TEMPLATE size_t
-ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
-                const BYTE* const ip, const BYTE* const iLimit,
-                      size_t* offsetPtr,
-                const U32 mls /* template */,
-                const ZSTD_dictMode_e dictMode)
-{
-    DEBUGLOG(7, "ZSTD_BtFindBestMatch");
-    if (ip < ms->window.base + ms->nextToUpdate) return 0;   /* skipped area */
-    ZSTD_updateDUBT(ms, ip, iLimit, mls);
-    return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode);
-}
-
-
-static size_t
-ZSTD_BtFindBestMatch_selectMLS (  ZSTD_matchState_t* ms,
-                            const BYTE* ip, const BYTE* const iLimit,
-                                  size_t* offsetPtr)
-{
-    switch(ms->cParams.minMatch)
-    {
-    default : /* includes case 3 */
-    case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
-    case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
-    case 7 :
-    case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
-    }
-}
-
-
-static size_t ZSTD_BtFindBestMatch_dictMatchState_selectMLS (
-                        ZSTD_matchState_t* ms,
-                        const BYTE* ip, const BYTE* const iLimit,
-                        size_t* offsetPtr)
-{
-    switch(ms->cParams.minMatch)
-    {
-    default : /* includes case 3 */
-    case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
-    case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
-    case 7 :
-    case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
-    }
-}
-
-
-static size_t ZSTD_BtFindBestMatch_extDict_selectMLS (
-                        ZSTD_matchState_t* ms,
-                        const BYTE* ip, const BYTE* const iLimit,
-                        size_t* offsetPtr)
-{
-    switch(ms->cParams.minMatch)
-    {
-    default : /* includes case 3 */
-    case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
-    case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
-    case 7 :
-    case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
-    }
-}
-
-
-
-/* *********************************
-*  Hash Chain
-***********************************/
-#define NEXT_IN_CHAIN(d, mask)   chainTable[(d) & (mask)]
-
-/* Update chains up to ip (excluded)
-   Assumption : always within prefix (i.e. not within extDict) */
-static U32 ZSTD_insertAndFindFirstIndex_internal(
-                        ZSTD_matchState_t* ms,
-                        const ZSTD_compressionParameters* const cParams,
-                        const BYTE* ip, U32 const mls)
-{
-    U32* const hashTable  = ms->hashTable;
-    const U32 hashLog = cParams->hashLog;
-    U32* const chainTable = ms->chainTable;
-    const U32 chainMask = (1 << cParams->chainLog) - 1;
-    const BYTE* const base = ms->window.base;
-    const U32 target = (U32)(ip - base);
-    U32 idx = ms->nextToUpdate;
-
-    while(idx < target) { /* catch up */
-        size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
-        NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
-        hashTable[h] = idx;
-        idx++;
-    }
-
-    ms->nextToUpdate = target;
-    return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
-}
-
-U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
-    const ZSTD_compressionParameters* const cParams = &ms->cParams;
-    return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch);
-}
-
-
-/* inlining is important to hardwire a hot branch (template emulation) */
-FORCE_INLINE_TEMPLATE
-size_t ZSTD_HcFindBestMatch_generic (
-                        ZSTD_matchState_t* ms,
-                        const BYTE* const ip, const BYTE* const iLimit,
-                        size_t* offsetPtr,
-                        const U32 mls, const ZSTD_dictMode_e dictMode)
-{
-    const ZSTD_compressionParameters* const cParams = &ms->cParams;
-    U32* const chainTable = ms->chainTable;
-    const U32 chainSize = (1 << cParams->chainLog);
-    const U32 chainMask = chainSize-1;
-    const BYTE* const base = ms->window.base;
-    const BYTE* const dictBase = ms->window.dictBase;
-    const U32 dictLimit = ms->window.dictLimit;
-    const BYTE* const prefixStart = base + dictLimit;
-    const BYTE* const dictEnd = dictBase + dictLimit;
-    const U32 current = (U32)(ip-base);
-    const U32 maxDistance = 1U << cParams->windowLog;
-    const U32 lowValid = ms->window.lowLimit;
-    const U32 lowLimit = (current - lowValid > maxDistance) ? current - maxDistance : lowValid;
-    const U32 minChain = current > chainSize ? current - chainSize : 0;
-    U32 nbAttempts = 1U << cParams->searchLog;
-    size_t ml=4-1;
-
-    /* HC4 match finder */
-    U32 matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls);
-
-    for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) {
-        size_t currentMl=0;
-        if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
-            const BYTE* const match = base + matchIndex;
-            assert(matchIndex >= dictLimit);   /* ensures this is true if dictMode != ZSTD_extDict */
-            if (match[ml] == ip[ml])   /* potentially better */
-                currentMl = ZSTD_count(ip, match, iLimit);
-        } else {
-            const BYTE* const match = dictBase + matchIndex;
-            assert(match+4 <= dictEnd);
-            if (MEM_read32(match) == MEM_read32(ip))   /* assumption : matchIndex <= dictLimit-4 (by table construction) */
-                currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
-        }
-
-        /* save best solution */
-        if (currentMl > ml) {
-            ml = currentMl;
-            *offsetPtr = current - matchIndex + ZSTD_REP_MOVE;
-            if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
-        }
-
-        if (matchIndex <= minChain) break;
-        matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
-    }
-
-    if (dictMode == ZSTD_dictMatchState) {
-        const ZSTD_matchState_t* const dms = ms->dictMatchState;
-        const U32* const dmsChainTable = dms->chainTable;
-        const U32 dmsChainSize         = (1 << dms->cParams.chainLog);
-        const U32 dmsChainMask         = dmsChainSize - 1;
-        const U32 dmsLowestIndex       = dms->window.dictLimit;
-        const BYTE* const dmsBase      = dms->window.base;
-        const BYTE* const dmsEnd       = dms->window.nextSrc;
-        const U32 dmsSize              = (U32)(dmsEnd - dmsBase);
-        const U32 dmsIndexDelta        = dictLimit - dmsSize;
-        const U32 dmsMinChain = dmsSize > dmsChainSize ? dmsSize - dmsChainSize : 0;
-
-        matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)];
-
-        for ( ; (matchIndex>dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) {
-            size_t currentMl=0;
-            const BYTE* const match = dmsBase + matchIndex;
-            assert(match+4 <= dmsEnd);
-            if (MEM_read32(match) == MEM_read32(ip))   /* assumption : matchIndex <= dictLimit-4 (by table construction) */
-                currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;
-
-            /* save best solution */
-            if (currentMl > ml) {
-                ml = currentMl;
-                *offsetPtr = current - (matchIndex + dmsIndexDelta) + ZSTD_REP_MOVE;
-                if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
-            }
-
-            if (matchIndex <= dmsMinChain) break;
-            matchIndex = dmsChainTable[matchIndex & dmsChainMask];
-        }
-    }
-
-    return ml;
-}
-
-
-FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS (
-                        ZSTD_matchState_t* ms,
-                        const BYTE* ip, const BYTE* const iLimit,
-                        size_t* offsetPtr)
-{
-    switch(ms->cParams.minMatch)
-    {
-    default : /* includes case 3 */
-    case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
-    case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
-    case 7 :
-    case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
-    }
-}
-
-
-static size_t ZSTD_HcFindBestMatch_dictMatchState_selectMLS (
-                        ZSTD_matchState_t* ms,
-                        const BYTE* ip, const BYTE* const iLimit,
-                        size_t* offsetPtr)
-{
-    switch(ms->cParams.minMatch)
-    {
-    default : /* includes case 3 */
-    case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
-    case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
-    case 7 :
-    case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
-    }
-}
-
-
-FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
-                        ZSTD_matchState_t* ms,
-                        const BYTE* ip, const BYTE* const iLimit,
-                        size_t* offsetPtr)
-{
-    switch(ms->cParams.minMatch)
-    {
-    default : /* includes case 3 */
-    case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
-    case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
-    case 7 :
-    case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
-    }
-}
-
-
-/* *******************************
-*  Common parser - lazy strategy
-*********************************/
-FORCE_INLINE_TEMPLATE
-size_t ZSTD_compressBlock_lazy_generic(
-                        ZSTD_matchState_t* ms, seqStore_t* seqStore,
-                        U32 rep[ZSTD_REP_NUM],
-                        const void* src, size_t srcSize,
-                        const U32 searchMethod, const U32 depth,
-                        ZSTD_dictMode_e const dictMode)
-{
-    const BYTE* const istart = (const BYTE*)src;
-    const BYTE* ip = istart;
-    const BYTE* anchor = istart;
-    const BYTE* const iend = istart + srcSize;
-    const BYTE* const ilimit = iend - 8;
-    const BYTE* const base = ms->window.base;
-    const U32 prefixLowestIndex = ms->window.dictLimit;
-    const BYTE* const prefixLowest = base + prefixLowestIndex;
-
-    typedef size_t (*searchMax_f)(
-                        ZSTD_matchState_t* ms,
-                        const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
-    searchMax_f const searchMax = dictMode == ZSTD_dictMatchState ?
-        (searchMethod ? ZSTD_BtFindBestMatch_dictMatchState_selectMLS : ZSTD_HcFindBestMatch_dictMatchState_selectMLS) :
-        (searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS);
-    U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
-
-    const ZSTD_matchState_t* const dms = ms->dictMatchState;
-    const U32 dictLowestIndex      = dictMode == ZSTD_dictMatchState ?
-                                     dms->window.dictLimit : 0;
-    const BYTE* const dictBase     = dictMode == ZSTD_dictMatchState ?
-                                     dms->window.base : NULL;
-    const BYTE* const dictLowest   = dictMode == ZSTD_dictMatchState ?
-                                     dictBase + dictLowestIndex : NULL;
-    const BYTE* const dictEnd      = dictMode == ZSTD_dictMatchState ?
-                                     dms->window.nextSrc : NULL;
-    const U32 dictIndexDelta       = dictMode == ZSTD_dictMatchState ?
-                                     prefixLowestIndex - (U32)(dictEnd - dictBase) :
-                                     0;
-    const U32 dictAndPrefixLength = (U32)(ip - prefixLowest + dictEnd - dictLowest);
-
-    /* init */
-    ip += (dictAndPrefixLength == 0);
-    if (dictMode == ZSTD_noDict) {
-        U32 const maxRep = (U32)(ip - prefixLowest);
-        if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
-        if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
-    }
-    if (dictMode == ZSTD_dictMatchState) {
-        /* dictMatchState repCode checks don't currently handle repCode == 0
-         * disabling. */
-        assert(offset_1 <= dictAndPrefixLength);
-        assert(offset_2 <= dictAndPrefixLength);
-    }
-
-    /* Match Loop */
-    while (ip < ilimit) {
-        size_t matchLength=0;
-        size_t offset=0;
-        const BYTE* start=ip+1;
-
-        /* check repCode */
-        if (dictMode == ZSTD_dictMatchState) {
-            const U32 repIndex = (U32)(ip - base) + 1 - offset_1;
-            const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
-                                && repIndex < prefixLowestIndex) ?
-                                   dictBase + (repIndex - dictIndexDelta) :
-                                   base + repIndex;
-            if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
-                && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
-                const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
-                matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
-                if (depth==0) goto _storeSequence;
-            }
-        }
-        if ( dictMode == ZSTD_noDict
-          && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
-            matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
-            if (depth==0) goto _storeSequence;
-        }
-
-        /* first search (depth 0) */
-        {   size_t offsetFound = 999999999;
-            size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
-            if (ml2 > matchLength)
-                matchLength = ml2, start = ip, offset=offsetFound;
-        }
-
-        if (matchLength < 4) {
-            ip += ((ip-anchor) >> kSearchStrength) + 1;   /* jump faster over incompressible sections */
-            continue;
-        }
-
-        /* let's try to find a better solution */
-        if (depth>=1)
-        while (ip<ilimit) {
-            ip ++;
-            if ( (dictMode == ZSTD_noDict)
-              && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
-                size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
-                int const gain2 = (int)(mlRep * 3);
-                int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
-                if ((mlRep >= 4) && (gain2 > gain1))
-                    matchLength = mlRep, offset = 0, start = ip;
-            }
-            if (dictMode == ZSTD_dictMatchState) {
-                const U32 repIndex = (U32)(ip - base) - offset_1;
-                const BYTE* repMatch = repIndex < prefixLowestIndex ?
-                               dictBase + (repIndex - dictIndexDelta) :
-                               base + repIndex;
-                if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
-                    && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
-                    const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
-                    size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
-                    int const gain2 = (int)(mlRep * 3);
-                    int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
-                    if ((mlRep >= 4) && (gain2 > gain1))
-                        matchLength = mlRep, offset = 0, start = ip;
-                }
-            }
-            {   size_t offset2=999999999;
-                size_t const ml2 = searchMax(ms, ip, iend, &offset2);
-                int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
-                int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
-                if ((ml2 >= 4) && (gain2 > gain1)) {
-                    matchLength = ml2, offset = offset2, start = ip;
-                    continue;   /* search a better one */
-            }   }
-
-            /* let's find an even better one */
-            if ((depth==2) && (ip<ilimit)) {
-                ip ++;
-                if ( (dictMode == ZSTD_noDict)
-                  && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
-                    size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
-                    int const gain2 = (int)(mlRep * 4);
-                    int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
-                    if ((mlRep >= 4) && (gain2 > gain1))
-                        matchLength = mlRep, offset = 0, start = ip;
-                }
-                if (dictMode == ZSTD_dictMatchState) {
-                    const U32 repIndex = (U32)(ip - base) - offset_1;
-                    const BYTE* repMatch = repIndex < prefixLowestIndex ?
-                                   dictBase + (repIndex - dictIndexDelta) :
-                                   base + repIndex;
-                    if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
-                        && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
-                        const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
-                        size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
-                        int const gain2 = (int)(mlRep * 4);
-                        int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
-                        if ((mlRep >= 4) && (gain2 > gain1))
-                            matchLength = mlRep, offset = 0, start = ip;
-                    }
-                }
-                {   size_t offset2=999999999;
-                    size_t const ml2 = searchMax(ms, ip, iend, &offset2);
-                    int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
-                    int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
-                    if ((ml2 >= 4) && (gain2 > gain1)) {
-                        matchLength = ml2, offset = offset2, start = ip;
-                        continue;
-            }   }   }
-            break;  /* nothing found : store previous solution */
-        }
-
-        /* NOTE:
-         * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
-         * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
-         * overflows the pointer, which is undefined behavior.
-         */
-        /* catch up */
-        if (offset) {
-            if (dictMode == ZSTD_noDict) {
-                while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > prefixLowest))
-                     && (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) )  /* only search for offset within prefix */
-                    { start--; matchLength++; }
-            }
-            if (dictMode == ZSTD_dictMatchState) {
-                U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
-                const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex;
-                const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest;
-                while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; }  /* catch up */
-            }
-            offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
-        }
-        /* store sequence */
-_storeSequence:
-        {   size_t const litLength = start - anchor;
-            ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH);
-            anchor = ip = start + matchLength;
-        }
-
-        /* check immediate repcode */
-        if (dictMode == ZSTD_dictMatchState) {
-            while (ip <= ilimit) {
-                U32 const current2 = (U32)(ip-base);
-                U32 const repIndex = current2 - offset_2;
-                const BYTE* repMatch = dictMode == ZSTD_dictMatchState
-                    && repIndex < prefixLowestIndex ?
-                        dictBase - dictIndexDelta + repIndex :
-                        base + repIndex;
-                if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */)
-                   && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
-                    const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend;
-                    matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4;
-                    offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset;   /* swap offset_2 <=> offset_1 */
-                    ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
-                    ip += matchLength;
-                    anchor = ip;
-                    continue;
-                }
-                break;
-            }
-        }
-
-        if (dictMode == ZSTD_noDict) {
-            while ( ((ip <= ilimit) & (offset_2>0))
-                 && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
-                /* store sequence */
-                matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
-                offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
-                ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
-                ip += matchLength;
-                anchor = ip;
-                continue;   /* faster when present ... (?) */
-    }   }   }
-
-    /* Save reps for next block */
-    rep[0] = offset_1 ? offset_1 : savedOffset;
-    rep[1] = offset_2 ? offset_2 : savedOffset;
-
-    /* Return the last literals size */
-    return iend - anchor;
-}
-
-
-size_t ZSTD_compressBlock_btlazy2(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize)
-{
-    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 1, 2, ZSTD_noDict);
-}
-
-size_t ZSTD_compressBlock_lazy2(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize)
-{
-    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 2, ZSTD_noDict);
-}
-
-size_t ZSTD_compressBlock_lazy(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize)
-{
-    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 1, ZSTD_noDict);
-}
-
-size_t ZSTD_compressBlock_greedy(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize)
-{
-    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 0, ZSTD_noDict);
-}
-
-size_t ZSTD_compressBlock_btlazy2_dictMatchState(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize)
-{
-    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 1, 2, ZSTD_dictMatchState);
-}
-
-size_t ZSTD_compressBlock_lazy2_dictMatchState(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize)
-{
-    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 2, ZSTD_dictMatchState);
-}
-
-size_t ZSTD_compressBlock_lazy_dictMatchState(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize)
-{
-    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 1, ZSTD_dictMatchState);
-}
-
-size_t ZSTD_compressBlock_greedy_dictMatchState(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize)
-{
-    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 0, ZSTD_dictMatchState);
-}
-
-
-FORCE_INLINE_TEMPLATE
-size_t ZSTD_compressBlock_lazy_extDict_generic(
-                        ZSTD_matchState_t* ms, seqStore_t* seqStore,
-                        U32 rep[ZSTD_REP_NUM],
-                        const void* src, size_t srcSize,
-                        const U32 searchMethod, const U32 depth)
-{
-    const BYTE* const istart = (const BYTE*)src;
-    const BYTE* ip = istart;
-    const BYTE* anchor = istart;
-    const BYTE* const iend = istart + srcSize;
-    const BYTE* const ilimit = iend - 8;
-    const BYTE* const base = ms->window.base;
-    const U32 dictLimit = ms->window.dictLimit;
-    const U32 lowestIndex = ms->window.lowLimit;
-    const BYTE* const prefixStart = base + dictLimit;
-    const BYTE* const dictBase = ms->window.dictBase;
-    const BYTE* const dictEnd  = dictBase + dictLimit;
-    const BYTE* const dictStart  = dictBase + lowestIndex;
-
-    typedef size_t (*searchMax_f)(
-                        ZSTD_matchState_t* ms,
-                        const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
-    searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS;
-
-    U32 offset_1 = rep[0], offset_2 = rep[1];
-
-    /* init */
-    ip += (ip == prefixStart);
-
-    /* Match Loop */
-    while (ip < ilimit) {
-        size_t matchLength=0;
-        size_t offset=0;
-        const BYTE* start=ip+1;
-        U32 current = (U32)(ip-base);
-
-        /* check repCode */
-        {   const U32 repIndex = (U32)(current+1 - offset_1);
-            const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
-            const BYTE* const repMatch = repBase + repIndex;
-            if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex))   /* intentional overflow */
-            if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
-                /* repcode detected we should take it */
-                const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
-                matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4;
-                if (depth==0) goto _storeSequence;
-        }   }
-
-        /* first search (depth 0) */
-        {   size_t offsetFound = 999999999;
-            size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
-            if (ml2 > matchLength)
-                matchLength = ml2, start = ip, offset=offsetFound;
-        }
-
-         if (matchLength < 4) {
-            ip += ((ip-anchor) >> kSearchStrength) + 1;   /* jump faster over incompressible sections */
-            continue;
-        }
-
-        /* let's try to find a better solution */
-        if (depth>=1)
-        while (ip<ilimit) {
-            ip ++;
-            current++;
-            /* check repCode */
-            if (offset) {
-                const U32 repIndex = (U32)(current - offset_1);
-                const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
-                const BYTE* const repMatch = repBase + repIndex;
-                if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex))  /* intentional overflow */
-                if (MEM_read32(ip) == MEM_read32(repMatch)) {
-                    /* repcode detected */
-                    const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
-                    size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
-                    int const gain2 = (int)(repLength * 3);
-                    int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
-                    if ((repLength >= 4) && (gain2 > gain1))
-                        matchLength = repLength, offset = 0, start = ip;
-            }   }
-
-            /* search match, depth 1 */
-            {   size_t offset2=999999999;
-                size_t const ml2 = searchMax(ms, ip, iend, &offset2);
-                int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
-                int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
-                if ((ml2 >= 4) && (gain2 > gain1)) {
-                    matchLength = ml2, offset = offset2, start = ip;
-                    continue;   /* search a better one */
-            }   }
-
-            /* let's find an even better one */
-            if ((depth==2) && (ip<ilimit)) {
-                ip ++;
-                current++;
-                /* check repCode */
-                if (offset) {
-                    const U32 repIndex = (U32)(current - offset_1);
-                    const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
-                    const BYTE* const repMatch = repBase + repIndex;
-                    if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex))  /* intentional overflow */
-                    if (MEM_read32(ip) == MEM_read32(repMatch)) {
-                        /* repcode detected */
-                        const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
-                        size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
-                        int const gain2 = (int)(repLength * 4);
-                        int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
-                        if ((repLength >= 4) && (gain2 > gain1))
-                            matchLength = repLength, offset = 0, start = ip;
-                }   }
-
-                /* search match, depth 2 */
-                {   size_t offset2=999999999;
-                    size_t const ml2 = searchMax(ms, ip, iend, &offset2);
-                    int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
-                    int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
-                    if ((ml2 >= 4) && (gain2 > gain1)) {
-                        matchLength = ml2, offset = offset2, start = ip;
-                        continue;
-            }   }   }
-            break;  /* nothing found : store previous solution */
-        }
-
-        /* catch up */
-        if (offset) {
-            U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
-            const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
-            const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
-            while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; }  /* catch up */
-            offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
-        }
-
-        /* store sequence */
-_storeSequence:
-        {   size_t const litLength = start - anchor;
-            ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH);
-            anchor = ip = start + matchLength;
-        }
-
-        /* check immediate repcode */
-        while (ip <= ilimit) {
-            const U32 repIndex = (U32)((ip-base) - offset_2);
-            const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
-            const BYTE* const repMatch = repBase + repIndex;
-            if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex))  /* intentional overflow */
-            if (MEM_read32(ip) == MEM_read32(repMatch)) {
-                /* repcode detected we should take it */
-                const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
-                matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
-                offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset;   /* swap offset history */
-                ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
-                ip += matchLength;
-                anchor = ip;
-                continue;   /* faster when present ... (?) */
-            }
-            break;
-    }   }
-
-    /* Save reps for next block */
-    rep[0] = offset_1;
-    rep[1] = offset_2;
-
-    /* Return the last literals size */
-    return iend - anchor;
-}
-
-
-size_t ZSTD_compressBlock_greedy_extDict(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize)
-{
-    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 0);
-}
-
-size_t ZSTD_compressBlock_lazy_extDict(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize)
-
-{
-    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 1);
-}
-
-size_t ZSTD_compressBlock_lazy2_extDict(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize)
-
-{
-    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 2);
-}
-
-size_t ZSTD_compressBlock_btlazy2_extDict(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize)
-
-{
-    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 1, 2);
-}
diff --git a/vendor/github.com/DataDog/zstd/zstd_lazy.h b/vendor/github.com/DataDog/zstd/zstd_lazy.h
deleted file mode 100644
index bb17630..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_lazy.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef ZSTD_LAZY_H
-#define ZSTD_LAZY_H
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-#include "zstd_compress_internal.h"
-
-U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip);
-
-void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue);  /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */
-
-size_t ZSTD_compressBlock_btlazy2(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_lazy2(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_lazy(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_greedy(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize);
-
-size_t ZSTD_compressBlock_btlazy2_dictMatchState(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_lazy2_dictMatchState(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_lazy_dictMatchState(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_greedy_dictMatchState(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize);
-
-size_t ZSTD_compressBlock_greedy_extDict(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_lazy_extDict(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_lazy2_extDict(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_btlazy2_extDict(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize);
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* ZSTD_LAZY_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_ldm.c b/vendor/github.com/DataDog/zstd/zstd_ldm.c
deleted file mode 100644
index 3dcf86e..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_ldm.c
+++ /dev/null
@@ -1,597 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- */
-
-#include "zstd_ldm.h"
-
-#include "debug.h"
-#include "zstd_fast.h"          /* ZSTD_fillHashTable() */
-#include "zstd_double_fast.h"   /* ZSTD_fillDoubleHashTable() */
-
-#define LDM_BUCKET_SIZE_LOG 3
-#define LDM_MIN_MATCH_LENGTH 64
-#define LDM_HASH_RLOG 7
-#define LDM_HASH_CHAR_OFFSET 10
-
-void ZSTD_ldm_adjustParameters(ldmParams_t* params,
-                               ZSTD_compressionParameters const* cParams)
-{
-    params->windowLog = cParams->windowLog;
-    ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
-    DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
-    if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
-    if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH;
-    if (cParams->strategy >= ZSTD_btopt) {
-      /* Get out of the way of the optimal parser */
-      U32 const minMatch = MAX(cParams->targetLength, params->minMatchLength);
-      assert(minMatch >= ZSTD_LDM_MINMATCH_MIN);
-      assert(minMatch <= ZSTD_LDM_MINMATCH_MAX);
-      params->minMatchLength = minMatch;
-    }
-    if (params->hashLog == 0) {
-        params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG);
-        assert(params->hashLog <= ZSTD_HASHLOG_MAX);
-    }
-    if (params->hashRateLog == 0) {
-        params->hashRateLog = params->windowLog < params->hashLog
-                                   ? 0
-                                   : params->windowLog - params->hashLog;
-    }
-    params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
-}
-
-size_t ZSTD_ldm_getTableSize(ldmParams_t params)
-{
-    size_t const ldmHSize = ((size_t)1) << params.hashLog;
-    size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog);
-    size_t const ldmBucketSize =
-        ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
-    size_t const totalSize = ldmBucketSize + ldmHSize * sizeof(ldmEntry_t);
-    return params.enableLdm ? totalSize : 0;
-}
-
-size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
-{
-    return params.enableLdm ? (maxChunkSize / params.minMatchLength) : 0;
-}
-
-/** ZSTD_ldm_getSmallHash() :
- *  numBits should be <= 32
- *  If numBits==0, returns 0.
- *  @return : the most significant numBits of value. */
-static U32 ZSTD_ldm_getSmallHash(U64 value, U32 numBits)
-{
-    assert(numBits <= 32);
-    return numBits == 0 ? 0 : (U32)(value >> (64 - numBits));
-}
-
-/** ZSTD_ldm_getChecksum() :
- *  numBitsToDiscard should be <= 32
- *  @return : the next most significant 32 bits after numBitsToDiscard */
-static U32 ZSTD_ldm_getChecksum(U64 hash, U32 numBitsToDiscard)
-{
-    assert(numBitsToDiscard <= 32);
-    return (hash >> (64 - 32 - numBitsToDiscard)) & 0xFFFFFFFF;
-}
-
-/** ZSTD_ldm_getTag() ;
- *  Given the hash, returns the most significant numTagBits bits
- *  after (32 + hbits) bits.
- *
- *  If there are not enough bits remaining, return the last
- *  numTagBits bits. */
-static U32 ZSTD_ldm_getTag(U64 hash, U32 hbits, U32 numTagBits)
-{
-    assert(numTagBits < 32 && hbits <= 32);
-    if (32 - hbits < numTagBits) {
-        return hash & (((U32)1 << numTagBits) - 1);
-    } else {
-        return (hash >> (32 - hbits - numTagBits)) & (((U32)1 << numTagBits) - 1);
-    }
-}
-
-/** ZSTD_ldm_getBucket() :
- *  Returns a pointer to the start of the bucket associated with hash. */
-static ldmEntry_t* ZSTD_ldm_getBucket(
-        ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)
-{
-    return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);
-}
-
-/** ZSTD_ldm_insertEntry() :
- *  Insert the entry with corresponding hash into the hash table */
-static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
-                                 size_t const hash, const ldmEntry_t entry,
-                                 ldmParams_t const ldmParams)
-{
-    BYTE* const bucketOffsets = ldmState->bucketOffsets;
-    *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + bucketOffsets[hash]) = entry;
-    bucketOffsets[hash]++;
-    bucketOffsets[hash] &= ((U32)1 << ldmParams.bucketSizeLog) - 1;
-}
-
-/** ZSTD_ldm_makeEntryAndInsertByTag() :
- *
- *  Gets the small hash, checksum, and tag from the rollingHash.
- *
- *  If the tag matches (1 << ldmParams.hashRateLog)-1, then
- *  creates an ldmEntry from the offset, and inserts it into the hash table.
- *
- *  hBits is the length of the small hash, which is the most significant hBits
- *  of rollingHash. The checksum is the next 32 most significant bits, followed
- *  by ldmParams.hashRateLog bits that make up the tag. */
-static void ZSTD_ldm_makeEntryAndInsertByTag(ldmState_t* ldmState,
-                                             U64 const rollingHash,
-                                             U32 const hBits,
-                                             U32 const offset,
-                                             ldmParams_t const ldmParams)
-{
-    U32 const tag = ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashRateLog);
-    U32 const tagMask = ((U32)1 << ldmParams.hashRateLog) - 1;
-    if (tag == tagMask) {
-        U32 const hash = ZSTD_ldm_getSmallHash(rollingHash, hBits);
-        U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
-        ldmEntry_t entry;
-        entry.offset = offset;
-        entry.checksum = checksum;
-        ZSTD_ldm_insertEntry(ldmState, hash, entry, ldmParams);
-    }
-}
-
-/** ZSTD_ldm_countBackwardsMatch() :
- *  Returns the number of bytes that match backwards before pIn and pMatch.
- *
- *  We count only bytes where pMatch >= pBase and pIn >= pAnchor. */
-static size_t ZSTD_ldm_countBackwardsMatch(
-            const BYTE* pIn, const BYTE* pAnchor,
-            const BYTE* pMatch, const BYTE* pBase)
-{
-    size_t matchLength = 0;
-    while (pIn > pAnchor && pMatch > pBase && pIn[-1] == pMatch[-1]) {
-        pIn--;
-        pMatch--;
-        matchLength++;
-    }
-    return matchLength;
-}
-
-/** ZSTD_ldm_fillFastTables() :
- *
- *  Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies.
- *  This is similar to ZSTD_loadDictionaryContent.
- *
- *  The tables for the other strategies are filled within their
- *  block compressors. */
-static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
-                                      void const* end)
-{
-    const BYTE* const iend = (const BYTE*)end;
-
-    switch(ms->cParams.strategy)
-    {
-    case ZSTD_fast:
-        ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast);
-        break;
-
-    case ZSTD_dfast:
-        ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast);
-        break;
-
-    case ZSTD_greedy:
-    case ZSTD_lazy:
-    case ZSTD_lazy2:
-    case ZSTD_btlazy2:
-    case ZSTD_btopt:
-    case ZSTD_btultra:
-    case ZSTD_btultra2:
-        break;
-    default:
-        assert(0);  /* not possible : not a valid strategy id */
-    }
-
-    return 0;
-}
-
-/** ZSTD_ldm_fillLdmHashTable() :
- *
- *  Fills hashTable from (lastHashed + 1) to iend (non-inclusive).
- *  lastHash is the rolling hash that corresponds to lastHashed.
- *
- *  Returns the rolling hash corresponding to position iend-1. */
-static U64 ZSTD_ldm_fillLdmHashTable(ldmState_t* state,
-                                     U64 lastHash, const BYTE* lastHashed,
-                                     const BYTE* iend, const BYTE* base,
-                                     U32 hBits, ldmParams_t const ldmParams)
-{
-    U64 rollingHash = lastHash;
-    const BYTE* cur = lastHashed + 1;
-
-    while (cur < iend) {
-        rollingHash = ZSTD_rollingHash_rotate(rollingHash, cur[-1],
-                                              cur[ldmParams.minMatchLength-1],
-                                              state->hashPower);
-        ZSTD_ldm_makeEntryAndInsertByTag(state,
-                                         rollingHash, hBits,
-                                         (U32)(cur - base), ldmParams);
-        ++cur;
-    }
-    return rollingHash;
-}
-
-
-/** ZSTD_ldm_limitTableUpdate() :
- *
- *  Sets cctx->nextToUpdate to a position corresponding closer to anchor
- *  if it is far way
- *  (after a long match, only update tables a limited amount). */
-static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
-{
-    U32 const current = (U32)(anchor - ms->window.base);
-    if (current > ms->nextToUpdate + 1024) {
-        ms->nextToUpdate =
-            current - MIN(512, current - ms->nextToUpdate - 1024);
-    }
-}
-
-static size_t ZSTD_ldm_generateSequences_internal(
-        ldmState_t* ldmState, rawSeqStore_t* rawSeqStore,
-        ldmParams_t const* params, void const* src, size_t srcSize)
-{
-    /* LDM parameters */
-    int const extDict = ZSTD_window_hasExtDict(ldmState->window);
-    U32 const minMatchLength = params->minMatchLength;
-    U64 const hashPower = ldmState->hashPower;
-    U32 const hBits = params->hashLog - params->bucketSizeLog;
-    U32 const ldmBucketSize = 1U << params->bucketSizeLog;
-    U32 const hashRateLog = params->hashRateLog;
-    U32 const ldmTagMask = (1U << params->hashRateLog) - 1;
-    /* Prefix and extDict parameters */
-    U32 const dictLimit = ldmState->window.dictLimit;
-    U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit;
-    BYTE const* const base = ldmState->window.base;
-    BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL;
-    BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL;
-    BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL;
-    BYTE const* const lowPrefixPtr = base + dictLimit;
-    /* Input bounds */
-    BYTE const* const istart = (BYTE const*)src;
-    BYTE const* const iend = istart + srcSize;
-    BYTE const* const ilimit = iend - MAX(minMatchLength, HASH_READ_SIZE);
-    /* Input positions */
-    BYTE const* anchor = istart;
-    BYTE const* ip = istart;
-    /* Rolling hash */
-    BYTE const* lastHashed = NULL;
-    U64 rollingHash = 0;
-
-    while (ip <= ilimit) {
-        size_t mLength;
-        U32 const current = (U32)(ip - base);
-        size_t forwardMatchLength = 0, backwardMatchLength = 0;
-        ldmEntry_t* bestEntry = NULL;
-        if (ip != istart) {
-            rollingHash = ZSTD_rollingHash_rotate(rollingHash, lastHashed[0],
-                                                  lastHashed[minMatchLength],
-                                                  hashPower);
-        } else {
-            rollingHash = ZSTD_rollingHash_compute(ip, minMatchLength);
-        }
-        lastHashed = ip;
-
-        /* Do not insert and do not look for a match */
-        if (ZSTD_ldm_getTag(rollingHash, hBits, hashRateLog) != ldmTagMask) {
-           ip++;
-           continue;
-        }
-
-        /* Get the best entry and compute the match lengths */
-        {
-            ldmEntry_t* const bucket =
-                ZSTD_ldm_getBucket(ldmState,
-                                   ZSTD_ldm_getSmallHash(rollingHash, hBits),
-                                   *params);
-            ldmEntry_t* cur;
-            size_t bestMatchLength = 0;
-            U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
-
-            for (cur = bucket; cur < bucket + ldmBucketSize; ++cur) {
-                size_t curForwardMatchLength, curBackwardMatchLength,
-                       curTotalMatchLength;
-                if (cur->checksum != checksum || cur->offset <= lowestIndex) {
-                    continue;
-                }
-                if (extDict) {
-                    BYTE const* const curMatchBase =
-                        cur->offset < dictLimit ? dictBase : base;
-                    BYTE const* const pMatch = curMatchBase + cur->offset;
-                    BYTE const* const matchEnd =
-                        cur->offset < dictLimit ? dictEnd : iend;
-                    BYTE const* const lowMatchPtr =
-                        cur->offset < dictLimit ? dictStart : lowPrefixPtr;
-
-                    curForwardMatchLength = ZSTD_count_2segments(
-                                                ip, pMatch, iend,
-                                                matchEnd, lowPrefixPtr);
-                    if (curForwardMatchLength < minMatchLength) {
-                        continue;
-                    }
-                    curBackwardMatchLength =
-                        ZSTD_ldm_countBackwardsMatch(ip, anchor, pMatch,
-                                                     lowMatchPtr);
-                    curTotalMatchLength = curForwardMatchLength +
-                                          curBackwardMatchLength;
-                } else { /* !extDict */
-                    BYTE const* const pMatch = base + cur->offset;
-                    curForwardMatchLength = ZSTD_count(ip, pMatch, iend);
-                    if (curForwardMatchLength < minMatchLength) {
-                        continue;
-                    }
-                    curBackwardMatchLength =
-                        ZSTD_ldm_countBackwardsMatch(ip, anchor, pMatch,
-                                                     lowPrefixPtr);
-                    curTotalMatchLength = curForwardMatchLength +
-                                          curBackwardMatchLength;
-                }
-
-                if (curTotalMatchLength > bestMatchLength) {
-                    bestMatchLength = curTotalMatchLength;
-                    forwardMatchLength = curForwardMatchLength;
-                    backwardMatchLength = curBackwardMatchLength;
-                    bestEntry = cur;
-                }
-            }
-        }
-
-        /* No match found -- continue searching */
-        if (bestEntry == NULL) {
-            ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash,
-                                             hBits, current,
-                                             *params);
-            ip++;
-            continue;
-        }
-
-        /* Match found */
-        mLength = forwardMatchLength + backwardMatchLength;
-        ip -= backwardMatchLength;
-
-        {
-            /* Store the sequence:
-             * ip = current - backwardMatchLength
-             * The match is at (bestEntry->offset - backwardMatchLength)
-             */
-            U32 const matchIndex = bestEntry->offset;
-            U32 const offset = current - matchIndex;
-            rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
-
-            /* Out of sequence storage */
-            if (rawSeqStore->size == rawSeqStore->capacity)
-                return ERROR(dstSize_tooSmall);
-            seq->litLength = (U32)(ip - anchor);
-            seq->matchLength = (U32)mLength;
-            seq->offset = offset;
-            rawSeqStore->size++;
-        }
-
-        /* Insert the current entry into the hash table */
-        ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits,
-                                         (U32)(lastHashed - base),
-                                         *params);
-
-        assert(ip + backwardMatchLength == lastHashed);
-
-        /* Fill the hash table from lastHashed+1 to ip+mLength*/
-        /* Heuristic: don't need to fill the entire table at end of block */
-        if (ip + mLength <= ilimit) {
-            rollingHash = ZSTD_ldm_fillLdmHashTable(
-                              ldmState, rollingHash, lastHashed,
-                              ip + mLength, base, hBits, *params);
-            lastHashed = ip + mLength - 1;
-        }
-        ip += mLength;
-        anchor = ip;
-    }
-    return iend - anchor;
-}
-
-/*! ZSTD_ldm_reduceTable() :
- *  reduce table indexes by `reducerValue` */
-static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
-                                 U32 const reducerValue)
-{
-    U32 u;
-    for (u = 0; u < size; u++) {
-        if (table[u].offset < reducerValue) table[u].offset = 0;
-        else table[u].offset -= reducerValue;
-    }
-}
-
-size_t ZSTD_ldm_generateSequences(
-        ldmState_t* ldmState, rawSeqStore_t* sequences,
-        ldmParams_t const* params, void const* src, size_t srcSize)
-{
-    U32 const maxDist = 1U << params->windowLog;
-    BYTE const* const istart = (BYTE const*)src;
-    BYTE const* const iend = istart + srcSize;
-    size_t const kMaxChunkSize = 1 << 20;
-    size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0);
-    size_t chunk;
-    size_t leftoverSize = 0;
-
-    assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize);
-    /* Check that ZSTD_window_update() has been called for this chunk prior
-     * to passing it to this function.
-     */
-    assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
-    /* The input could be very large (in zstdmt), so it must be broken up into
-     * chunks to enforce the maximum distance and handle overflow correction.
-     */
-    assert(sequences->pos <= sequences->size);
-    assert(sequences->size <= sequences->capacity);
-    for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) {
-        BYTE const* const chunkStart = istart + chunk * kMaxChunkSize;
-        size_t const remaining = (size_t)(iend - chunkStart);
-        BYTE const *const chunkEnd =
-            (remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize;
-        size_t const chunkSize = chunkEnd - chunkStart;
-        size_t newLeftoverSize;
-        size_t const prevSize = sequences->size;
-
-        assert(chunkStart < iend);
-        /* 1. Perform overflow correction if necessary. */
-        if (ZSTD_window_needOverflowCorrection(ldmState->window, chunkEnd)) {
-            U32 const ldmHSize = 1U << params->hashLog;
-            U32 const correction = ZSTD_window_correctOverflow(
-                &ldmState->window, /* cycleLog */ 0, maxDist, chunkStart);
-            ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction);
-        }
-        /* 2. We enforce the maximum offset allowed.
-         *
-         * kMaxChunkSize should be small enough that we don't lose too much of
-         * the window through early invalidation.
-         * TODO: * Test the chunk size.
-         *       * Try invalidation after the sequence generation and test the
-         *         the offset against maxDist directly.
-         */
-        ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, NULL, NULL);
-        /* 3. Generate the sequences for the chunk, and get newLeftoverSize. */
-        newLeftoverSize = ZSTD_ldm_generateSequences_internal(
-            ldmState, sequences, params, chunkStart, chunkSize);
-        if (ZSTD_isError(newLeftoverSize))
-            return newLeftoverSize;
-        /* 4. We add the leftover literals from previous iterations to the first
-         *    newly generated sequence, or add the `newLeftoverSize` if none are
-         *    generated.
-         */
-        /* Prepend the leftover literals from the last call */
-        if (prevSize < sequences->size) {
-            sequences->seq[prevSize].litLength += (U32)leftoverSize;
-            leftoverSize = newLeftoverSize;
-        } else {
-            assert(newLeftoverSize == chunkSize);
-            leftoverSize += chunkSize;
-        }
-    }
-    return 0;
-}
-
-void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) {
-    while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
-        rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;
-        if (srcSize <= seq->litLength) {
-            /* Skip past srcSize literals */
-            seq->litLength -= (U32)srcSize;
-            return;
-        }
-        srcSize -= seq->litLength;
-        seq->litLength = 0;
-        if (srcSize < seq->matchLength) {
-            /* Skip past the first srcSize of the match */
-            seq->matchLength -= (U32)srcSize;
-            if (seq->matchLength < minMatch) {
-                /* The match is too short, omit it */
-                if (rawSeqStore->pos + 1 < rawSeqStore->size) {
-                    seq[1].litLength += seq[0].matchLength;
-                }
-                rawSeqStore->pos++;
-            }
-            return;
-        }
-        srcSize -= seq->matchLength;
-        seq->matchLength = 0;
-        rawSeqStore->pos++;
-    }
-}
-
-/**
- * If the sequence length is longer than remaining then the sequence is split
- * between this block and the next.
- *
- * Returns the current sequence to handle, or if the rest of the block should
- * be literals, it returns a sequence with offset == 0.
- */
-static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
-                                 U32 const remaining, U32 const minMatch)
-{
-    rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos];
-    assert(sequence.offset > 0);
-    /* Likely: No partial sequence */
-    if (remaining >= sequence.litLength + sequence.matchLength) {
-        rawSeqStore->pos++;
-        return sequence;
-    }
-    /* Cut the sequence short (offset == 0 ==> rest is literals). */
-    if (remaining <= sequence.litLength) {
-        sequence.offset = 0;
-    } else if (remaining < sequence.litLength + sequence.matchLength) {
-        sequence.matchLength = remaining - sequence.litLength;
-        if (sequence.matchLength < minMatch) {
-            sequence.offset = 0;
-        }
-    }
-    /* Skip past `remaining` bytes for the future sequences. */
-    ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch);
-    return sequence;
-}
-
-size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
-    ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-    void const* src, size_t srcSize)
-{
-    const ZSTD_compressionParameters* const cParams = &ms->cParams;
-    unsigned const minMatch = cParams->minMatch;
-    ZSTD_blockCompressor const blockCompressor =
-        ZSTD_selectBlockCompressor(cParams->strategy, ZSTD_matchState_dictMode(ms));
-    /* Input bounds */
-    BYTE const* const istart = (BYTE const*)src;
-    BYTE const* const iend = istart + srcSize;
-    /* Input positions */
-    BYTE const* ip = istart;
-
-    DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize);
-    assert(rawSeqStore->pos <= rawSeqStore->size);
-    assert(rawSeqStore->size <= rawSeqStore->capacity);
-    /* Loop through each sequence and apply the block compressor to the lits */
-    while (rawSeqStore->pos < rawSeqStore->size && ip < iend) {
-        /* maybeSplitSequence updates rawSeqStore->pos */
-        rawSeq const sequence = maybeSplitSequence(rawSeqStore,
-                                                   (U32)(iend - ip), minMatch);
-        int i;
-        /* End signal */
-        if (sequence.offset == 0)
-            break;
-
-        assert(sequence.offset <= (1U << cParams->windowLog));
-        assert(ip + sequence.litLength + sequence.matchLength <= iend);
-
-        /* Fill tables for block compressor */
-        ZSTD_ldm_limitTableUpdate(ms, ip);
-        ZSTD_ldm_fillFastTables(ms, ip);
-        /* Run the block compressor */
-        DEBUGLOG(5, "calling block compressor on segment of size %u", sequence.litLength);
-        {
-            size_t const newLitLength =
-                blockCompressor(ms, seqStore, rep, ip, sequence.litLength);
-            ip += sequence.litLength;
-            /* Update the repcodes */
-            for (i = ZSTD_REP_NUM - 1; i > 0; i--)
-                rep[i] = rep[i-1];
-            rep[0] = sequence.offset;
-            /* Store the sequence */
-            ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength,
-                          sequence.offset + ZSTD_REP_MOVE,
-                          sequence.matchLength - MINMATCH);
-            ip += sequence.matchLength;
-        }
-    }
-    /* Fill the tables for the block compressor */
-    ZSTD_ldm_limitTableUpdate(ms, ip);
-    ZSTD_ldm_fillFastTables(ms, ip);
-    /* Compress the last literals */
-    return blockCompressor(ms, seqStore, rep, ip, iend - ip);
-}
diff --git a/vendor/github.com/DataDog/zstd/zstd_ldm.h b/vendor/github.com/DataDog/zstd/zstd_ldm.h
deleted file mode 100644
index a478461..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_ldm.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- */
-
-#ifndef ZSTD_LDM_H
-#define ZSTD_LDM_H
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-#include "zstd_compress_internal.h"   /* ldmParams_t, U32 */
-#include "zstd.h"   /* ZSTD_CCtx, size_t */
-
-/*-*************************************
-*  Long distance matching
-***************************************/
-
-#define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_LIMIT_DEFAULT
-
-/**
- * ZSTD_ldm_generateSequences():
- *
- * Generates the sequences using the long distance match finder.
- * Generates long range matching sequences in `sequences`, which parse a prefix
- * of the source. `sequences` must be large enough to store every sequence,
- * which can be checked with `ZSTD_ldm_getMaxNbSeq()`.
- * @returns 0 or an error code.
- *
- * NOTE: The user must have called ZSTD_window_update() for all of the input
- * they have, even if they pass it to ZSTD_ldm_generateSequences() in chunks.
- * NOTE: This function returns an error if it runs out of space to store
- *       sequences.
- */
-size_t ZSTD_ldm_generateSequences(
-            ldmState_t* ldms, rawSeqStore_t* sequences,
-            ldmParams_t const* params, void const* src, size_t srcSize);
-
-/**
- * ZSTD_ldm_blockCompress():
- *
- * Compresses a block using the predefined sequences, along with a secondary
- * block compressor. The literals section of every sequence is passed to the
- * secondary block compressor, and those sequences are interspersed with the
- * predefined sequences. Returns the length of the last literals.
- * Updates `rawSeqStore.pos` to indicate how many sequences have been consumed.
- * `rawSeqStore.seq` may also be updated to split the last sequence between two
- * blocks.
- * @return The length of the last literals.
- *
- * NOTE: The source must be at most the maximum block size, but the predefined
- * sequences can be any size, and may be longer than the block. In the case that
- * they are longer than the block, the last sequences may need to be split into
- * two. We handle that case correctly, and update `rawSeqStore` appropriately.
- * NOTE: This function does not return any errors.
- */
-size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
-            ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-            void const* src, size_t srcSize);
-
-/**
- * ZSTD_ldm_skipSequences():
- *
- * Skip past `srcSize` bytes worth of sequences in `rawSeqStore`.
- * Avoids emitting matches less than `minMatch` bytes.
- * Must be called for data with is not passed to ZSTD_ldm_blockCompress().
- */
-void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize,
-    U32 const minMatch);
-
-
-/** ZSTD_ldm_getTableSize() :
- *  Estimate the space needed for long distance matching tables or 0 if LDM is
- *  disabled.
- */
-size_t ZSTD_ldm_getTableSize(ldmParams_t params);
-
-/** ZSTD_ldm_getSeqSpace() :
- *  Return an upper bound on the number of sequences that can be produced by
- *  the long distance matcher, or 0 if LDM is disabled.
- */
-size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize);
-
-/** ZSTD_ldm_adjustParameters() :
- *  If the params->hashRateLog is not set, set it to its default value based on
- *  windowLog and params->hashLog.
- *
- *  Ensures that params->bucketSizeLog is <= params->hashLog (setting it to
- *  params->hashLog if it is not).
- *
- *  Ensures that the minMatchLength >= targetLength during optimal parsing.
- */
-void ZSTD_ldm_adjustParameters(ldmParams_t* params,
-                               ZSTD_compressionParameters const* cParams);
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* ZSTD_FAST_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_legacy.h b/vendor/github.com/DataDog/zstd/zstd_legacy.h
deleted file mode 100644
index 0dbd3c7..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_legacy.h
+++ /dev/null
@@ -1,415 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef ZSTD_LEGACY_H
-#define ZSTD_LEGACY_H
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/* *************************************
-*  Includes
-***************************************/
-#include "mem.h"            /* MEM_STATIC */
-#include "error_private.h"  /* ERROR */
-#include "zstd_internal.h"  /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTD_frameSizeInfo */
-
-#if !defined (ZSTD_LEGACY_SUPPORT) || (ZSTD_LEGACY_SUPPORT == 0)
-#  undef ZSTD_LEGACY_SUPPORT
-#  define ZSTD_LEGACY_SUPPORT 8
-#endif
-
-#if (ZSTD_LEGACY_SUPPORT <= 1)
-#  include "zstd_v01.h"
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 2)
-#  include "zstd_v02.h"
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 3)
-#  include "zstd_v03.h"
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 4)
-#  include "zstd_v04.h"
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 5)
-#  include "zstd_v05.h"
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 6)
-#  include "zstd_v06.h"
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 7)
-#  include "zstd_v07.h"
-#endif
-
-/** ZSTD_isLegacy() :
-    @return : > 0 if supported by legacy decoder. 0 otherwise.
-              return value is the version.
-*/
-MEM_STATIC unsigned ZSTD_isLegacy(const void* src, size_t srcSize)
-{
-    U32 magicNumberLE;
-    if (srcSize<4) return 0;
-    magicNumberLE = MEM_readLE32(src);
-    switch(magicNumberLE)
-    {
-#if (ZSTD_LEGACY_SUPPORT <= 1)
-        case ZSTDv01_magicNumberLE:return 1;
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 2)
-        case ZSTDv02_magicNumber : return 2;
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 3)
-        case ZSTDv03_magicNumber : return 3;
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 4)
-        case ZSTDv04_magicNumber : return 4;
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 5)
-        case ZSTDv05_MAGICNUMBER : return 5;
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 6)
-        case ZSTDv06_MAGICNUMBER : return 6;
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 7)
-        case ZSTDv07_MAGICNUMBER : return 7;
-#endif
-        default : return 0;
-    }
-}
-
-
-MEM_STATIC unsigned long long ZSTD_getDecompressedSize_legacy(const void* src, size_t srcSize)
-{
-    U32 const version = ZSTD_isLegacy(src, srcSize);
-    if (version < 5) return 0;  /* no decompressed size in frame header, or not a legacy format */
-#if (ZSTD_LEGACY_SUPPORT <= 5)
-    if (version==5) {
-        ZSTDv05_parameters fParams;
-        size_t const frResult = ZSTDv05_getFrameParams(&fParams, src, srcSize);
-        if (frResult != 0) return 0;
-        return fParams.srcSize;
-    }
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 6)
-    if (version==6) {
-        ZSTDv06_frameParams fParams;
-        size_t const frResult = ZSTDv06_getFrameParams(&fParams, src, srcSize);
-        if (frResult != 0) return 0;
-        return fParams.frameContentSize;
-    }
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 7)
-    if (version==7) {
-        ZSTDv07_frameParams fParams;
-        size_t const frResult = ZSTDv07_getFrameParams(&fParams, src, srcSize);
-        if (frResult != 0) return 0;
-        return fParams.frameContentSize;
-    }
-#endif
-    return 0;   /* should not be possible */
-}
-
-
-MEM_STATIC size_t ZSTD_decompressLegacy(
-                     void* dst, size_t dstCapacity,
-               const void* src, size_t compressedSize,
-               const void* dict,size_t dictSize)
-{
-    U32 const version = ZSTD_isLegacy(src, compressedSize);
-    (void)dst; (void)dstCapacity; (void)dict; (void)dictSize;  /* unused when ZSTD_LEGACY_SUPPORT >= 8 */
-    switch(version)
-    {
-#if (ZSTD_LEGACY_SUPPORT <= 1)
-        case 1 :
-            return ZSTDv01_decompress(dst, dstCapacity, src, compressedSize);
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 2)
-        case 2 :
-            return ZSTDv02_decompress(dst, dstCapacity, src, compressedSize);
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 3)
-        case 3 :
-            return ZSTDv03_decompress(dst, dstCapacity, src, compressedSize);
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 4)
-        case 4 :
-            return ZSTDv04_decompress(dst, dstCapacity, src, compressedSize);
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 5)
-        case 5 :
-            {   size_t result;
-                ZSTDv05_DCtx* const zd = ZSTDv05_createDCtx();
-                if (zd==NULL) return ERROR(memory_allocation);
-                result = ZSTDv05_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize);
-                ZSTDv05_freeDCtx(zd);
-                return result;
-            }
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 6)
-        case 6 :
-            {   size_t result;
-                ZSTDv06_DCtx* const zd = ZSTDv06_createDCtx();
-                if (zd==NULL) return ERROR(memory_allocation);
-                result = ZSTDv06_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize);
-                ZSTDv06_freeDCtx(zd);
-                return result;
-            }
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 7)
-        case 7 :
-            {   size_t result;
-                ZSTDv07_DCtx* const zd = ZSTDv07_createDCtx();
-                if (zd==NULL) return ERROR(memory_allocation);
-                result = ZSTDv07_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize);
-                ZSTDv07_freeDCtx(zd);
-                return result;
-            }
-#endif
-        default :
-            return ERROR(prefix_unknown);
-    }
-}
-
-MEM_STATIC ZSTD_frameSizeInfo ZSTD_findFrameSizeInfoLegacy(const void *src, size_t srcSize)
-{
-    ZSTD_frameSizeInfo frameSizeInfo;
-    U32 const version = ZSTD_isLegacy(src, srcSize);
-    switch(version)
-    {
-#if (ZSTD_LEGACY_SUPPORT <= 1)
-        case 1 :
-            ZSTDv01_findFrameSizeInfoLegacy(src, srcSize,
-                &frameSizeInfo.compressedSize,
-                &frameSizeInfo.decompressedBound);
-            break;
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 2)
-        case 2 :
-            ZSTDv02_findFrameSizeInfoLegacy(src, srcSize,
-                &frameSizeInfo.compressedSize,
-                &frameSizeInfo.decompressedBound);
-            break;
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 3)
-        case 3 :
-            ZSTDv03_findFrameSizeInfoLegacy(src, srcSize,
-                &frameSizeInfo.compressedSize,
-                &frameSizeInfo.decompressedBound);
-            break;
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 4)
-        case 4 :
-            ZSTDv04_findFrameSizeInfoLegacy(src, srcSize,
-                &frameSizeInfo.compressedSize,
-                &frameSizeInfo.decompressedBound);
-            break;
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 5)
-        case 5 :
-            ZSTDv05_findFrameSizeInfoLegacy(src, srcSize,
-                &frameSizeInfo.compressedSize,
-                &frameSizeInfo.decompressedBound);
-            break;
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 6)
-        case 6 :
-            ZSTDv06_findFrameSizeInfoLegacy(src, srcSize,
-                &frameSizeInfo.compressedSize,
-                &frameSizeInfo.decompressedBound);
-            break;
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 7)
-        case 7 :
-            ZSTDv07_findFrameSizeInfoLegacy(src, srcSize,
-                &frameSizeInfo.compressedSize,
-                &frameSizeInfo.decompressedBound);
-            break;
-#endif
-        default :
-            frameSizeInfo.compressedSize = ERROR(prefix_unknown);
-            frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR;
-            break;
-    }
-    if (!ZSTD_isError(frameSizeInfo.compressedSize) && frameSizeInfo.compressedSize > srcSize) {
-        frameSizeInfo.compressedSize = ERROR(srcSize_wrong);
-        frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR;
-    }
-    return frameSizeInfo;
-}
-
-MEM_STATIC size_t ZSTD_findFrameCompressedSizeLegacy(const void *src, size_t srcSize)
-{
-    ZSTD_frameSizeInfo frameSizeInfo = ZSTD_findFrameSizeInfoLegacy(src, srcSize);
-    return frameSizeInfo.compressedSize;
-}
-
-MEM_STATIC size_t ZSTD_freeLegacyStreamContext(void* legacyContext, U32 version)
-{
-    switch(version)
-    {
-        default :
-        case 1 :
-        case 2 :
-        case 3 :
-            (void)legacyContext;
-            return ERROR(version_unsupported);
-#if (ZSTD_LEGACY_SUPPORT <= 4)
-        case 4 : return ZBUFFv04_freeDCtx((ZBUFFv04_DCtx*)legacyContext);
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 5)
-        case 5 : return ZBUFFv05_freeDCtx((ZBUFFv05_DCtx*)legacyContext);
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 6)
-        case 6 : return ZBUFFv06_freeDCtx((ZBUFFv06_DCtx*)legacyContext);
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 7)
-        case 7 : return ZBUFFv07_freeDCtx((ZBUFFv07_DCtx*)legacyContext);
-#endif
-    }
-}
-
-
-MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U32 newVersion,
-                                        const void* dict, size_t dictSize)
-{
-    DEBUGLOG(5, "ZSTD_initLegacyStream for v0.%u", newVersion);
-    if (prevVersion != newVersion) ZSTD_freeLegacyStreamContext(*legacyContext, prevVersion);
-    switch(newVersion)
-    {
-        default :
-        case 1 :
-        case 2 :
-        case 3 :
-            (void)dict; (void)dictSize;
-            return 0;
-#if (ZSTD_LEGACY_SUPPORT <= 4)
-        case 4 :
-        {
-            ZBUFFv04_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv04_createDCtx() : (ZBUFFv04_DCtx*)*legacyContext;
-            if (dctx==NULL) return ERROR(memory_allocation);
-            ZBUFFv04_decompressInit(dctx);
-            ZBUFFv04_decompressWithDictionary(dctx, dict, dictSize);
-            *legacyContext = dctx;
-            return 0;
-        }
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 5)
-        case 5 :
-        {
-            ZBUFFv05_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv05_createDCtx() : (ZBUFFv05_DCtx*)*legacyContext;
-            if (dctx==NULL) return ERROR(memory_allocation);
-            ZBUFFv05_decompressInitDictionary(dctx, dict, dictSize);
-            *legacyContext = dctx;
-            return 0;
-        }
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 6)
-        case 6 :
-        {
-            ZBUFFv06_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv06_createDCtx() : (ZBUFFv06_DCtx*)*legacyContext;
-            if (dctx==NULL) return ERROR(memory_allocation);
-            ZBUFFv06_decompressInitDictionary(dctx, dict, dictSize);
-            *legacyContext = dctx;
-            return 0;
-        }
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 7)
-        case 7 :
-        {
-            ZBUFFv07_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv07_createDCtx() : (ZBUFFv07_DCtx*)*legacyContext;
-            if (dctx==NULL) return ERROR(memory_allocation);
-            ZBUFFv07_decompressInitDictionary(dctx, dict, dictSize);
-            *legacyContext = dctx;
-            return 0;
-        }
-#endif
-    }
-}
-
-
-
-MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version,
-                                              ZSTD_outBuffer* output, ZSTD_inBuffer* input)
-{
-    DEBUGLOG(5, "ZSTD_decompressLegacyStream for v0.%u", version);
-    switch(version)
-    {
-        default :
-        case 1 :
-        case 2 :
-        case 3 :
-            (void)legacyContext; (void)output; (void)input;
-            return ERROR(version_unsupported);
-#if (ZSTD_LEGACY_SUPPORT <= 4)
-        case 4 :
-            {
-                ZBUFFv04_DCtx* dctx = (ZBUFFv04_DCtx*) legacyContext;
-                const void* src = (const char*)input->src + input->pos;
-                size_t readSize = input->size - input->pos;
-                void* dst = (char*)output->dst + output->pos;
-                size_t decodedSize = output->size - output->pos;
-                size_t const hintSize = ZBUFFv04_decompressContinue(dctx, dst, &decodedSize, src, &readSize);
-                output->pos += decodedSize;
-                input->pos += readSize;
-                return hintSize;
-            }
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 5)
-        case 5 :
-            {
-                ZBUFFv05_DCtx* dctx = (ZBUFFv05_DCtx*) legacyContext;
-                const void* src = (const char*)input->src + input->pos;
-                size_t readSize = input->size - input->pos;
-                void* dst = (char*)output->dst + output->pos;
-                size_t decodedSize = output->size - output->pos;
-                size_t const hintSize = ZBUFFv05_decompressContinue(dctx, dst, &decodedSize, src, &readSize);
-                output->pos += decodedSize;
-                input->pos += readSize;
-                return hintSize;
-            }
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 6)
-        case 6 :
-            {
-                ZBUFFv06_DCtx* dctx = (ZBUFFv06_DCtx*) legacyContext;
-                const void* src = (const char*)input->src + input->pos;
-                size_t readSize = input->size - input->pos;
-                void* dst = (char*)output->dst + output->pos;
-                size_t decodedSize = output->size - output->pos;
-                size_t const hintSize = ZBUFFv06_decompressContinue(dctx, dst, &decodedSize, src, &readSize);
-                output->pos += decodedSize;
-                input->pos += readSize;
-                return hintSize;
-            }
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 7)
-        case 7 :
-            {
-                ZBUFFv07_DCtx* dctx = (ZBUFFv07_DCtx*) legacyContext;
-                const void* src = (const char*)input->src + input->pos;
-                size_t readSize = input->size - input->pos;
-                void* dst = (char*)output->dst + output->pos;
-                size_t decodedSize = output->size - output->pos;
-                size_t const hintSize = ZBUFFv07_decompressContinue(dctx, dst, &decodedSize, src, &readSize);
-                output->pos += decodedSize;
-                input->pos += readSize;
-                return hintSize;
-            }
-#endif
-    }
-}
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif   /* ZSTD_LEGACY_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_opt.c b/vendor/github.com/DataDog/zstd/zstd_opt.c
deleted file mode 100644
index e32e542..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_opt.c
+++ /dev/null
@@ -1,1246 +0,0 @@
-/*
- * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#include "zstd_compress_internal.h"
-#include "hist.h"
-#include "zstd_opt.h"
-
-
-#define ZSTD_LITFREQ_ADD    2   /* scaling factor for litFreq, so that frequencies adapt faster to new stats */
-#define ZSTD_FREQ_DIV       4   /* log factor when using previous stats to init next stats */
-#define ZSTD_MAX_PRICE     (1<<30)
-
-#define ZSTD_PREDEF_THRESHOLD 1024   /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */
-
-
-/*-*************************************
-*  Price functions for optimal parser
-***************************************/
-
-#if 0    /* approximation at bit level */
-#  define BITCOST_ACCURACY 0
-#  define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
-#  define WEIGHT(stat)  ((void)opt, ZSTD_bitWeight(stat))
-#elif 0  /* fractional bit accuracy */
-#  define BITCOST_ACCURACY 8
-#  define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
-#  define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat))
-#else    /* opt==approx, ultra==accurate */
-#  define BITCOST_ACCURACY 8
-#  define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
-#  define WEIGHT(stat,opt) (opt ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat))
-#endif
-
-MEM_STATIC U32 ZSTD_bitWeight(U32 stat)
-{
-    return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER);
-}
-
-MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat)
-{
-    U32 const stat = rawStat + 1;
-    U32 const hb = ZSTD_highbit32(stat);
-    U32 const BWeight = hb * BITCOST_MULTIPLIER;
-    U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb;
-    U32 const weight = BWeight + FWeight;
-    assert(hb + BITCOST_ACCURACY < 31);
-    return weight;
-}
-
-#if (DEBUGLEVEL>=2)
-/* debugging function,
- * @return price in bytes as fractional value
- * for debug messages only */
-MEM_STATIC double ZSTD_fCost(U32 price)
-{
-    return (double)price / (BITCOST_MULTIPLIER*8);
-}
-#endif
-
-static int ZSTD_compressedLiterals(optState_t const* const optPtr)
-{
-    return optPtr->literalCompressionMode != ZSTD_lcm_uncompressed;
-}
-
-static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)
-{
-    if (ZSTD_compressedLiterals(optPtr))
-        optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel);
-    optPtr->litLengthSumBasePrice = WEIGHT(optPtr->litLengthSum, optLevel);
-    optPtr->matchLengthSumBasePrice = WEIGHT(optPtr->matchLengthSum, optLevel);
-    optPtr->offCodeSumBasePrice = WEIGHT(optPtr->offCodeSum, optLevel);
-}
-
-
-/* ZSTD_downscaleStat() :
- * reduce all elements in table by a factor 2^(ZSTD_FREQ_DIV+malus)
- * return the resulting sum of elements */
-static U32 ZSTD_downscaleStat(unsigned* table, U32 lastEltIndex, int malus)
-{
-    U32 s, sum=0;
-    DEBUGLOG(5, "ZSTD_downscaleStat (nbElts=%u)", (unsigned)lastEltIndex+1);
-    assert(ZSTD_FREQ_DIV+malus > 0 && ZSTD_FREQ_DIV+malus < 31);
-    for (s=0; s<lastEltIndex+1; s++) {
-        table[s] = 1 + (table[s] >> (ZSTD_FREQ_DIV+malus));
-        sum += table[s];
-    }
-    return sum;
-}
-
-/* ZSTD_rescaleFreqs() :
- * if first block (detected by optPtr->litLengthSum == 0) : init statistics
- *    take hints from dictionary if there is one
- *    or init from zero, using src for literals stats, or flat 1 for match symbols
- * otherwise downscale existing stats, to be used as seed for next block.
- */
-static void
-ZSTD_rescaleFreqs(optState_t* const optPtr,
-            const BYTE* const src, size_t const srcSize,
-                  int const optLevel)
-{
-    int const compressedLiterals = ZSTD_compressedLiterals(optPtr);
-    DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize);
-    optPtr->priceType = zop_dynamic;
-
-    if (optPtr->litLengthSum == 0) {  /* first block : init */
-        if (srcSize <= ZSTD_PREDEF_THRESHOLD) {  /* heuristic */
-            DEBUGLOG(5, "(srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef");
-            optPtr->priceType = zop_predef;
-        }
-
-        assert(optPtr->symbolCosts != NULL);
-        if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) {
-            /* huffman table presumed generated by dictionary */
-            optPtr->priceType = zop_dynamic;
-
-            if (compressedLiterals) {
-                unsigned lit;
-                assert(optPtr->litFreq != NULL);
-                optPtr->litSum = 0;
-                for (lit=0; lit<=MaxLit; lit++) {
-                    U32 const scaleLog = 11;   /* scale to 2K */
-                    U32 const bitCost = HUF_getNbBits(optPtr->symbolCosts->huf.CTable, lit);
-                    assert(bitCost <= scaleLog);
-                    optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
-                    optPtr->litSum += optPtr->litFreq[lit];
-            }   }
-
-            {   unsigned ll;
-                FSE_CState_t llstate;
-                FSE_initCState(&llstate, optPtr->symbolCosts->fse.litlengthCTable);
-                optPtr->litLengthSum = 0;
-                for (ll=0; ll<=MaxLL; ll++) {
-                    U32 const scaleLog = 10;   /* scale to 1K */
-                    U32 const bitCost = FSE_getMaxNbBits(llstate.symbolTT, ll);
-                    assert(bitCost < scaleLog);
-                    optPtr->litLengthFreq[ll] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
-                    optPtr->litLengthSum += optPtr->litLengthFreq[ll];
-            }   }
-
-            {   unsigned ml;
-                FSE_CState_t mlstate;
-                FSE_initCState(&mlstate, optPtr->symbolCosts->fse.matchlengthCTable);
-                optPtr->matchLengthSum = 0;
-                for (ml=0; ml<=MaxML; ml++) {
-                    U32 const scaleLog = 10;
-                    U32 const bitCost = FSE_getMaxNbBits(mlstate.symbolTT, ml);
-                    assert(bitCost < scaleLog);
-                    optPtr->matchLengthFreq[ml] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
-                    optPtr->matchLengthSum += optPtr->matchLengthFreq[ml];
-            }   }
-
-            {   unsigned of;
-                FSE_CState_t ofstate;
-                FSE_initCState(&ofstate, optPtr->symbolCosts->fse.offcodeCTable);
-                optPtr->offCodeSum = 0;
-                for (of=0; of<=MaxOff; of++) {
-                    U32 const scaleLog = 10;
-                    U32 const bitCost = FSE_getMaxNbBits(ofstate.symbolTT, of);
-                    assert(bitCost < scaleLog);
-                    optPtr->offCodeFreq[of] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
-                    optPtr->offCodeSum += optPtr->offCodeFreq[of];
-            }   }
-
-        } else {  /* not a dictionary */
-
-            assert(optPtr->litFreq != NULL);
-            if (compressedLiterals) {
-                unsigned lit = MaxLit;
-                HIST_count_simple(optPtr->litFreq, &lit, src, srcSize);   /* use raw first block to init statistics */
-                optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
-            }
-
-            {   unsigned ll;
-                for (ll=0; ll<=MaxLL; ll++)
-                    optPtr->litLengthFreq[ll] = 1;
-            }
-            optPtr->litLengthSum = MaxLL+1;
-
-            {   unsigned ml;
-                for (ml=0; ml<=MaxML; ml++)
-                    optPtr->matchLengthFreq[ml] = 1;
-            }
-            optPtr->matchLengthSum = MaxML+1;
-
-            {   unsigned of;
-                for (of=0; of<=MaxOff; of++)
-                    optPtr->offCodeFreq[of] = 1;
-            }
-            optPtr->offCodeSum = MaxOff+1;
-
-        }
-
-    } else {   /* new block : re-use previous statistics, scaled down */
-
-        if (compressedLiterals)
-            optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
-        optPtr->litLengthSum = ZSTD_downscaleStat(optPtr->litLengthFreq, MaxLL, 0);
-        optPtr->matchLengthSum = ZSTD_downscaleStat(optPtr->matchLengthFreq, MaxML, 0);
-        optPtr->offCodeSum = ZSTD_downscaleStat(optPtr->offCodeFreq, MaxOff, 0);
-    }
-
-    ZSTD_setBasePrices(optPtr, optLevel);
-}
-
-/* ZSTD_rawLiteralsCost() :
- * price of literals (only) in specified segment (which length can be 0).
- * does not include price of literalLength symbol */
-static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
-                                const optState_t* const optPtr,
-                                int optLevel)
-{
-    if (litLength == 0) return 0;
-
-    if (!ZSTD_compressedLiterals(optPtr))
-        return (litLength << 3) * BITCOST_MULTIPLIER;  /* Uncompressed - 8 bytes per literal. */
-
-    if (optPtr->priceType == zop_predef)
-        return (litLength*6) * BITCOST_MULTIPLIER;  /* 6 bit per literal - no statistic used */
-
-    /* dynamic statistics */
-    {   U32 price = litLength * optPtr->litSumBasePrice;
-        U32 u;
-        for (u=0; u < litLength; u++) {
-            assert(WEIGHT(optPtr->litFreq[literals[u]], optLevel) <= optPtr->litSumBasePrice);   /* literal cost should never be negative */
-            price -= WEIGHT(optPtr->litFreq[literals[u]], optLevel);
-        }
-        return price;
-    }
-}
-
-/* ZSTD_litLengthPrice() :
- * cost of literalLength symbol */
-static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel)
-{
-    if (optPtr->priceType == zop_predef) return WEIGHT(litLength, optLevel);
-
-    /* dynamic statistics */
-    {   U32 const llCode = ZSTD_LLcode(litLength);
-        return (LL_bits[llCode] * BITCOST_MULTIPLIER)
-             + optPtr->litLengthSumBasePrice
-             - WEIGHT(optPtr->litLengthFreq[llCode], optLevel);
-    }
-}
-
-/* ZSTD_litLengthContribution() :
- * @return ( cost(litlength) - cost(0) )
- * this value can then be added to rawLiteralsCost()
- * to provide a cost which is directly comparable to a match ending at same position */
-static int ZSTD_litLengthContribution(U32 const litLength, const optState_t* const optPtr, int optLevel)
-{
-    if (optPtr->priceType >= zop_predef) return (int)WEIGHT(litLength, optLevel);
-
-    /* dynamic statistics */
-    {   U32 const llCode = ZSTD_LLcode(litLength);
-        int const contribution = (int)(LL_bits[llCode] * BITCOST_MULTIPLIER)
-                               + (int)WEIGHT(optPtr->litLengthFreq[0], optLevel)   /* note: log2litLengthSum cancel out */
-                               - (int)WEIGHT(optPtr->litLengthFreq[llCode], optLevel);
-#if 1
-        return contribution;
-#else
-        return MAX(0, contribution); /* sometimes better, sometimes not ... */
-#endif
-    }
-}
-
-/* ZSTD_literalsContribution() :
- * creates a fake cost for the literals part of a sequence
- * which can be compared to the ending cost of a match
- * should a new match start at this position */
-static int ZSTD_literalsContribution(const BYTE* const literals, U32 const litLength,
-                                     const optState_t* const optPtr,
-                                     int optLevel)
-{
-    int const contribution = (int)ZSTD_rawLiteralsCost(literals, litLength, optPtr, optLevel)
-                           + ZSTD_litLengthContribution(litLength, optPtr, optLevel);
-    return contribution;
-}
-
-/* ZSTD_getMatchPrice() :
- * Provides the cost of the match part (offset + matchLength) of a sequence
- * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence.
- * optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) */
-FORCE_INLINE_TEMPLATE U32
-ZSTD_getMatchPrice(U32 const offset,
-                   U32 const matchLength,
-             const optState_t* const optPtr,
-                   int const optLevel)
-{
-    U32 price;
-    U32 const offCode = ZSTD_highbit32(offset+1);
-    U32 const mlBase = matchLength - MINMATCH;
-    assert(matchLength >= MINMATCH);
-
-    if (optPtr->priceType == zop_predef)  /* fixed scheme, do not use statistics */
-        return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER);
-
-    /* dynamic statistics */
-    price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel));
-    if ((optLevel<2) /*static*/ && offCode >= 20)
-        price += (offCode-19)*2 * BITCOST_MULTIPLIER; /* handicap for long distance offsets, favor decompression speed */
-
-    /* match Length */
-    {   U32 const mlCode = ZSTD_MLcode(mlBase);
-        price += (ML_bits[mlCode] * BITCOST_MULTIPLIER) + (optPtr->matchLengthSumBasePrice - WEIGHT(optPtr->matchLengthFreq[mlCode], optLevel));
-    }
-
-    price += BITCOST_MULTIPLIER / 5;   /* heuristic : make matches a bit more costly to favor less sequences -> faster decompression speed */
-
-    DEBUGLOG(8, "ZSTD_getMatchPrice(ml:%u) = %u", matchLength, price);
-    return price;
-}
-
-/* ZSTD_updateStats() :
- * assumption : literals + litLengtn <= iend */
-static void ZSTD_updateStats(optState_t* const optPtr,
-                             U32 litLength, const BYTE* literals,
-                             U32 offsetCode, U32 matchLength)
-{
-    /* literals */
-    if (ZSTD_compressedLiterals(optPtr)) {
-        U32 u;
-        for (u=0; u < litLength; u++)
-            optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
-        optPtr->litSum += litLength*ZSTD_LITFREQ_ADD;
-    }
-
-    /* literal Length */
-    {   U32 const llCode = ZSTD_LLcode(litLength);
-        optPtr->litLengthFreq[llCode]++;
-        optPtr->litLengthSum++;
-    }
-
-    /* match offset code (0-2=>repCode; 3+=>offset+2) */
-    {   U32 const offCode = ZSTD_highbit32(offsetCode+1);
-        assert(offCode <= MaxOff);
-        optPtr->offCodeFreq[offCode]++;
-        optPtr->offCodeSum++;
-    }
-
-    /* match Length */
-    {   U32 const mlBase = matchLength - MINMATCH;
-        U32 const mlCode = ZSTD_MLcode(mlBase);
-        optPtr->matchLengthFreq[mlCode]++;
-        optPtr->matchLengthSum++;
-    }
-}
-
-
-/* ZSTD_readMINMATCH() :
- * function safe only for comparisons
- * assumption : memPtr must be at least 4 bytes before end of buffer */
-MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
-{
-    switch (length)
-    {
-    default :
-    case 4 : return MEM_read32(memPtr);
-    case 3 : if (MEM_isLittleEndian())
-                return MEM_read32(memPtr)<<8;
-             else
-                return MEM_read32(memPtr)>>8;
-    }
-}
-
-
-/* Update hashTable3 up to ip (excluded)
-   Assumption : always within prefix (i.e. not within extDict) */
-static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms,
-                                              U32* nextToUpdate3,
-                                              const BYTE* const ip)
-{
-    U32* const hashTable3 = ms->hashTable3;
-    U32 const hashLog3 = ms->hashLog3;
-    const BYTE* const base = ms->window.base;
-    U32 idx = *nextToUpdate3;
-    U32 const target = (U32)(ip - base);
-    size_t const hash3 = ZSTD_hash3Ptr(ip, hashLog3);
-    assert(hashLog3 > 0);
-
-    while(idx < target) {
-        hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx;
-        idx++;
-    }
-
-    *nextToUpdate3 = target;
-    return hashTable3[hash3];
-}
-
-
-/*-*************************************
-*  Binary Tree search
-***************************************/
-/** ZSTD_insertBt1() : add one or multiple positions to tree.
- *  ip : assumed <= iend-8 .
- * @return : nb of positions added */
-static U32 ZSTD_insertBt1(
-                ZSTD_matchState_t* ms,
-                const BYTE* const ip, const BYTE* const iend,
-                U32 const mls, const int extDict)
-{
-    const ZSTD_compressionParameters* const cParams = &ms->cParams;
-    U32*   const hashTable = ms->hashTable;
-    U32    const hashLog = cParams->hashLog;
-    size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);
-    U32*   const bt = ms->chainTable;
-    U32    const btLog  = cParams->chainLog - 1;
-    U32    const btMask = (1 << btLog) - 1;
-    U32 matchIndex = hashTable[h];
-    size_t commonLengthSmaller=0, commonLengthLarger=0;
-    const BYTE* const base = ms->window.base;
-    const BYTE* const dictBase = ms->window.dictBase;
-    const U32 dictLimit = ms->window.dictLimit;
-    const BYTE* const dictEnd = dictBase + dictLimit;
-    const BYTE* const prefixStart = base + dictLimit;
-    const BYTE* match;
-    const U32 current = (U32)(ip-base);
-    const U32 btLow = btMask >= current ? 0 : current - btMask;
-    U32* smallerPtr = bt + 2*(current&btMask);
-    U32* largerPtr  = smallerPtr + 1;
-    U32 dummy32;   /* to be nullified at the end */
-    U32 const windowLow = ms->window.lowLimit;
-    U32 matchEndIdx = current+8+1;
-    size_t bestLength = 8;
-    U32 nbCompares = 1U << cParams->searchLog;
-#ifdef ZSTD_C_PREDICT
-    U32 predictedSmall = *(bt + 2*((current-1)&btMask) + 0);
-    U32 predictedLarge = *(bt + 2*((current-1)&btMask) + 1);
-    predictedSmall += (predictedSmall>0);
-    predictedLarge += (predictedLarge>0);
-#endif /* ZSTD_C_PREDICT */
-
-    DEBUGLOG(8, "ZSTD_insertBt1 (%u)", current);
-
-    assert(ip <= iend-8);   /* required for h calculation */
-    hashTable[h] = current;   /* Update Hash Table */
-
-    assert(windowLow > 0);
-    while (nbCompares-- && (matchIndex >= windowLow)) {
-        U32* const nextPtr = bt + 2*(matchIndex & btMask);
-        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
-        assert(matchIndex < current);
-
-#ifdef ZSTD_C_PREDICT   /* note : can create issues when hlog small <= 11 */
-        const U32* predictPtr = bt + 2*((matchIndex-1) & btMask);   /* written this way, as bt is a roll buffer */
-        if (matchIndex == predictedSmall) {
-            /* no need to check length, result known */
-            *smallerPtr = matchIndex;
-            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
-            smallerPtr = nextPtr+1;               /* new "smaller" => larger of match */
-            matchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
-            predictedSmall = predictPtr[1] + (predictPtr[1]>0);
-            continue;
-        }
-        if (matchIndex == predictedLarge) {
-            *largerPtr = matchIndex;
-            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
-            largerPtr = nextPtr;
-            matchIndex = nextPtr[0];
-            predictedLarge = predictPtr[0] + (predictPtr[0]>0);
-            continue;
-        }
-#endif
-
-        if (!extDict || (matchIndex+matchLength >= dictLimit)) {
-            assert(matchIndex+matchLength >= dictLimit);   /* might be wrong if actually extDict */
-            match = base + matchIndex;
-            matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
-        } else {
-            match = dictBase + matchIndex;
-            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
-            if (matchIndex+matchLength >= dictLimit)
-                match = base + matchIndex;   /* to prepare for next usage of match[matchLength] */
-        }
-
-        if (matchLength > bestLength) {
-            bestLength = matchLength;
-            if (matchLength > matchEndIdx - matchIndex)
-                matchEndIdx = matchIndex + (U32)matchLength;
-        }
-
-        if (ip+matchLength == iend) {   /* equal : no way to know if inf or sup */
-            break;   /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
-        }
-
-        if (match[matchLength] < ip[matchLength]) {  /* necessarily within buffer */
-            /* match is smaller than current */
-            *smallerPtr = matchIndex;             /* update smaller idx */
-            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
-            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop searching */
-            smallerPtr = nextPtr+1;               /* new "candidate" => larger than match, which was smaller than target */
-            matchIndex = nextPtr[1];              /* new matchIndex, larger than previous and closer to current */
-        } else {
-            /* match is larger than current */
-            *largerPtr = matchIndex;
-            commonLengthLarger = matchLength;
-            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop searching */
-            largerPtr = nextPtr;
-            matchIndex = nextPtr[0];
-    }   }
-
-    *smallerPtr = *largerPtr = 0;
-    {   U32 positions = 0;
-        if (bestLength > 384) positions = MIN(192, (U32)(bestLength - 384));   /* speed optimization */
-        assert(matchEndIdx > current + 8);
-        return MAX(positions, matchEndIdx - (current + 8));
-    }
-}
-
-FORCE_INLINE_TEMPLATE
-void ZSTD_updateTree_internal(
-                ZSTD_matchState_t* ms,
-                const BYTE* const ip, const BYTE* const iend,
-                const U32 mls, const ZSTD_dictMode_e dictMode)
-{
-    const BYTE* const base = ms->window.base;
-    U32 const target = (U32)(ip - base);
-    U32 idx = ms->nextToUpdate;
-    DEBUGLOG(6, "ZSTD_updateTree_internal, from %u to %u  (dictMode:%u)",
-                idx, target, dictMode);
-
-    while(idx < target) {
-        U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, mls, dictMode == ZSTD_extDict);
-        assert(idx < (U32)(idx + forward));
-        idx += forward;
-    }
-    assert((size_t)(ip - base) <= (size_t)(U32)(-1));
-    assert((size_t)(iend - base) <= (size_t)(U32)(-1));
-    ms->nextToUpdate = target;
-}
-
-void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) {
-    ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict);
-}
-
-FORCE_INLINE_TEMPLATE
-U32 ZSTD_insertBtAndGetAllMatches (
-                    ZSTD_match_t* matches,   /* store result (found matches) in this table (presumed large enough) */
-                    ZSTD_matchState_t* ms,
-                    U32* nextToUpdate3,
-                    const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode,
-                    const U32 rep[ZSTD_REP_NUM],
-                    U32 const ll0,   /* tells if associated literal length is 0 or not. This value must be 0 or 1 */
-                    const U32 lengthToBeat,
-                    U32 const mls /* template */)
-{
-    const ZSTD_compressionParameters* const cParams = &ms->cParams;
-    U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
-    U32 const maxDistance = 1U << cParams->windowLog;
-    const BYTE* const base = ms->window.base;
-    U32 const current = (U32)(ip-base);
-    U32 const hashLog = cParams->hashLog;
-    U32 const minMatch = (mls==3) ? 3 : 4;
-    U32* const hashTable = ms->hashTable;
-    size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);
-    U32 matchIndex  = hashTable[h];
-    U32* const bt   = ms->chainTable;
-    U32 const btLog = cParams->chainLog - 1;
-    U32 const btMask= (1U << btLog) - 1;
-    size_t commonLengthSmaller=0, commonLengthLarger=0;
-    const BYTE* const dictBase = ms->window.dictBase;
-    U32 const dictLimit = ms->window.dictLimit;
-    const BYTE* const dictEnd = dictBase + dictLimit;
-    const BYTE* const prefixStart = base + dictLimit;
-    U32 const btLow = (btMask >= current) ? 0 : current - btMask;
-    U32 const windowValid = ms->window.lowLimit;
-    U32 const windowLow = ((current - windowValid) > maxDistance) ? current - maxDistance : windowValid;
-    U32 const matchLow = windowLow ? windowLow : 1;
-    U32* smallerPtr = bt + 2*(current&btMask);
-    U32* largerPtr  = bt + 2*(current&btMask) + 1;
-    U32 matchEndIdx = current+8+1;   /* farthest referenced position of any match => detects repetitive patterns */
-    U32 dummy32;   /* to be nullified at the end */
-    U32 mnum = 0;
-    U32 nbCompares = 1U << cParams->searchLog;
-
-    const ZSTD_matchState_t* dms    = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL;
-    const ZSTD_compressionParameters* const dmsCParams =
-                                      dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL;
-    const BYTE* const dmsBase       = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL;
-    const BYTE* const dmsEnd        = dictMode == ZSTD_dictMatchState ? dms->window.nextSrc : NULL;
-    U32         const dmsHighLimit  = dictMode == ZSTD_dictMatchState ? (U32)(dmsEnd - dmsBase) : 0;
-    U32         const dmsLowLimit   = dictMode == ZSTD_dictMatchState ? dms->window.lowLimit : 0;
-    U32         const dmsIndexDelta = dictMode == ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0;
-    U32         const dmsHashLog    = dictMode == ZSTD_dictMatchState ? dmsCParams->hashLog : hashLog;
-    U32         const dmsBtLog      = dictMode == ZSTD_dictMatchState ? dmsCParams->chainLog - 1 : btLog;
-    U32         const dmsBtMask     = dictMode == ZSTD_dictMatchState ? (1U << dmsBtLog) - 1 : 0;
-    U32         const dmsBtLow      = dictMode == ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit;
-
-    size_t bestLength = lengthToBeat-1;
-    DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", current);
-
-    /* check repCode */
-    assert(ll0 <= 1);   /* necessarily 1 or 0 */
-    {   U32 const lastR = ZSTD_REP_NUM + ll0;
-        U32 repCode;
-        for (repCode = ll0; repCode < lastR; repCode++) {
-            U32 const repOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
-            U32 const repIndex = current - repOffset;
-            U32 repLen = 0;
-            assert(current >= dictLimit);
-            if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < current-dictLimit) {  /* equivalent to `current > repIndex >= dictLimit` */
-                if (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch)) {
-                    repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch;
-                }
-            } else {  /* repIndex < dictLimit || repIndex >= current */
-                const BYTE* const repMatch = dictMode == ZSTD_dictMatchState ?
-                                             dmsBase + repIndex - dmsIndexDelta :
-                                             dictBase + repIndex;
-                assert(current >= windowLow);
-                if ( dictMode == ZSTD_extDict
-                  && ( ((repOffset-1) /*intentional overflow*/ < current - windowLow)  /* equivalent to `current > repIndex >= windowLow` */
-                     & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */)
-                  && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
-                    repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch;
-                }
-                if (dictMode == ZSTD_dictMatchState
-                  && ( ((repOffset-1) /*intentional overflow*/ < current - (dmsLowLimit + dmsIndexDelta))  /* equivalent to `current > repIndex >= dmsLowLimit` */
-                     & ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */
-                  && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
-                    repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch;
-            }   }
-            /* save longer solution */
-            if (repLen > bestLength) {
-                DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u",
-                            repCode, ll0, repOffset, repLen);
-                bestLength = repLen;
-                matches[mnum].off = repCode - ll0;
-                matches[mnum].len = (U32)repLen;
-                mnum++;
-                if ( (repLen > sufficient_len)
-                   | (ip+repLen == iLimit) ) {  /* best possible */
-                    return mnum;
-    }   }   }   }
-
-    /* HC3 match finder */
-    if ((mls == 3) /*static*/ && (bestLength < mls)) {
-        U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, nextToUpdate3, ip);
-        if ((matchIndex3 >= matchLow)
-          & (current - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) {
-            size_t mlen;
-            if ((dictMode == ZSTD_noDict) /*static*/ || (dictMode == ZSTD_dictMatchState) /*static*/ || (matchIndex3 >= dictLimit)) {
-                const BYTE* const match = base + matchIndex3;
-                mlen = ZSTD_count(ip, match, iLimit);
-            } else {
-                const BYTE* const match = dictBase + matchIndex3;
-                mlen = ZSTD_count_2segments(ip, match, iLimit, dictEnd, prefixStart);
-            }
-
-            /* save best solution */
-            if (mlen >= mls /* == 3 > bestLength */) {
-                DEBUGLOG(8, "found small match with hlog3, of length %u",
-                            (U32)mlen);
-                bestLength = mlen;
-                assert(current > matchIndex3);
-                assert(mnum==0);  /* no prior solution */
-                matches[0].off = (current - matchIndex3) + ZSTD_REP_MOVE;
-                matches[0].len = (U32)mlen;
-                mnum = 1;
-                if ( (mlen > sufficient_len) |
-                     (ip+mlen == iLimit) ) {  /* best possible length */
-                    ms->nextToUpdate = current+1;  /* skip insertion */
-                    return 1;
-        }   }   }
-        /* no dictMatchState lookup: dicts don't have a populated HC3 table */
-    }
-
-    hashTable[h] = current;   /* Update Hash Table */
-
-    while (nbCompares-- && (matchIndex >= matchLow)) {
-        U32* const nextPtr = bt + 2*(matchIndex & btMask);
-        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
-        const BYTE* match;
-        assert(current > matchIndex);
-
-        if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) {
-            assert(matchIndex+matchLength >= dictLimit);  /* ensure the condition is correct when !extDict */
-            match = base + matchIndex;
-            matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit);
-        } else {
-            match = dictBase + matchIndex;
-            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart);
-            if (matchIndex+matchLength >= dictLimit)
-                match = base + matchIndex;   /* prepare for match[matchLength] */
-        }
-
-        if (matchLength > bestLength) {
-            DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)",
-                    (U32)matchLength, current - matchIndex, current - matchIndex + ZSTD_REP_MOVE);
-            assert(matchEndIdx > matchIndex);
-            if (matchLength > matchEndIdx - matchIndex)
-                matchEndIdx = matchIndex + (U32)matchLength;
-            bestLength = matchLength;
-            matches[mnum].off = (current - matchIndex) + ZSTD_REP_MOVE;
-            matches[mnum].len = (U32)matchLength;
-            mnum++;
-            if ( (matchLength > ZSTD_OPT_NUM)
-               | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
-                if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */
-                break; /* drop, to preserve bt consistency (miss a little bit of compression) */
-            }
-        }
-
-        if (match[matchLength] < ip[matchLength]) {
-            /* match smaller than current */
-            *smallerPtr = matchIndex;             /* update smaller idx */
-            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
-            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
-            smallerPtr = nextPtr+1;               /* new candidate => larger than match, which was smaller than current */
-            matchIndex = nextPtr[1];              /* new matchIndex, larger than previous, closer to current */
-        } else {
-            *largerPtr = matchIndex;
-            commonLengthLarger = matchLength;
-            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
-            largerPtr = nextPtr;
-            matchIndex = nextPtr[0];
-    }   }
-
-    *smallerPtr = *largerPtr = 0;
-
-    if (dictMode == ZSTD_dictMatchState && nbCompares) {
-        size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls);
-        U32 dictMatchIndex = dms->hashTable[dmsH];
-        const U32* const dmsBt = dms->chainTable;
-        commonLengthSmaller = commonLengthLarger = 0;
-        while (nbCompares-- && (dictMatchIndex > dmsLowLimit)) {
-            const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask);
-            size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
-            const BYTE* match = dmsBase + dictMatchIndex;
-            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dmsEnd, prefixStart);
-            if (dictMatchIndex+matchLength >= dmsHighLimit)
-                match = base + dictMatchIndex + dmsIndexDelta;   /* to prepare for next usage of match[matchLength] */
-
-            if (matchLength > bestLength) {
-                matchIndex = dictMatchIndex + dmsIndexDelta;
-                DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)",
-                        (U32)matchLength, current - matchIndex, current - matchIndex + ZSTD_REP_MOVE);
-                if (matchLength > matchEndIdx - matchIndex)
-                    matchEndIdx = matchIndex + (U32)matchLength;
-                bestLength = matchLength;
-                matches[mnum].off = (current - matchIndex) + ZSTD_REP_MOVE;
-                matches[mnum].len = (U32)matchLength;
-                mnum++;
-                if ( (matchLength > ZSTD_OPT_NUM)
-                   | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
-                    break;   /* drop, to guarantee consistency (miss a little bit of compression) */
-                }
-            }
-
-            if (dictMatchIndex <= dmsBtLow) { break; }   /* beyond tree size, stop the search */
-            if (match[matchLength] < ip[matchLength]) {
-                commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
-                dictMatchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
-            } else {
-                /* match is larger than current */
-                commonLengthLarger = matchLength;
-                dictMatchIndex = nextPtr[0];
-            }
-        }
-    }
-
-    assert(matchEndIdx > current+8);
-    ms->nextToUpdate = matchEndIdx - 8;  /* skip repetitive patterns */
-    return mnum;
-}
-
-
-FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches (
-                        ZSTD_match_t* matches,   /* store result (match found, increasing size) in this table */
-                        ZSTD_matchState_t* ms,
-                        U32* nextToUpdate3,
-                        const BYTE* ip, const BYTE* const iHighLimit, const ZSTD_dictMode_e dictMode,
-                        const U32 rep[ZSTD_REP_NUM],
-                        U32 const ll0,
-                        U32 const lengthToBeat)
-{
-    const ZSTD_compressionParameters* const cParams = &ms->cParams;
-    U32 const matchLengthSearch = cParams->minMatch;
-    DEBUGLOG(8, "ZSTD_BtGetAllMatches");
-    if (ip < ms->window.base + ms->nextToUpdate) return 0;   /* skipped area */
-    ZSTD_updateTree_internal(ms, ip, iHighLimit, matchLengthSearch, dictMode);
-    switch(matchLengthSearch)
-    {
-    case 3 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 3);
-    default :
-    case 4 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 4);
-    case 5 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 5);
-    case 7 :
-    case 6 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 6);
-    }
-}
-
-
-/*-*******************************
-*  Optimal parser
-*********************************/
-typedef struct repcodes_s {
-    U32 rep[3];
-} repcodes_t;
-
-static repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0)
-{
-    repcodes_t newReps;
-    if (offset >= ZSTD_REP_NUM) {  /* full offset */
-        newReps.rep[2] = rep[1];
-        newReps.rep[1] = rep[0];
-        newReps.rep[0] = offset - ZSTD_REP_MOVE;
-    } else {   /* repcode */
-        U32 const repCode = offset + ll0;
-        if (repCode > 0) {  /* note : if repCode==0, no change */
-            U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
-            newReps.rep[2] = (repCode >= 2) ? rep[1] : rep[2];
-            newReps.rep[1] = rep[0];
-            newReps.rep[0] = currentOffset;
-        } else {   /* repCode == 0 */
-            memcpy(&newReps, rep, sizeof(newReps));
-        }
-    }
-    return newReps;
-}
-
-
-static U32 ZSTD_totalLen(ZSTD_optimal_t sol)
-{
-    return sol.litlen + sol.mlen;
-}
-
-#if 0 /* debug */
-
-static void
-listStats(const U32* table, int lastEltID)
-{
-    int const nbElts = lastEltID + 1;
-    int enb;
-    for (enb=0; enb < nbElts; enb++) {
-        (void)table;
-        //RAWLOG(2, "%3i:%3i,  ", enb, table[enb]);
-        RAWLOG(2, "%4i,", table[enb]);
-    }
-    RAWLOG(2, " \n");
-}
-
-#endif
-
-FORCE_INLINE_TEMPLATE size_t
-ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
-                               seqStore_t* seqStore,
-                               U32 rep[ZSTD_REP_NUM],
-                         const void* src, size_t srcSize,
-                         const int optLevel,
-                         const ZSTD_dictMode_e dictMode)
-{
-    optState_t* const optStatePtr = &ms->opt;
-    const BYTE* const istart = (const BYTE*)src;
-    const BYTE* ip = istart;
-    const BYTE* anchor = istart;
-    const BYTE* const iend = istart + srcSize;
-    const BYTE* const ilimit = iend - 8;
-    const BYTE* const base = ms->window.base;
-    const BYTE* const prefixStart = base + ms->window.dictLimit;
-    const ZSTD_compressionParameters* const cParams = &ms->cParams;
-
-    U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
-    U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4;
-    U32 nextToUpdate3 = ms->nextToUpdate;
-
-    ZSTD_optimal_t* const opt = optStatePtr->priceTable;
-    ZSTD_match_t* const matches = optStatePtr->matchTable;
-    ZSTD_optimal_t lastSequence;
-
-    /* init */
-    DEBUGLOG(5, "ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u",
-                (U32)(ip - base), ms->window.dictLimit, ms->nextToUpdate);
-    assert(optLevel <= 2);
-    ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize, optLevel);
-    ip += (ip==prefixStart);
-
-    /* Match Loop */
-    while (ip < ilimit) {
-        U32 cur, last_pos = 0;
-
-        /* find first match */
-        {   U32 const litlen = (U32)(ip - anchor);
-            U32 const ll0 = !litlen;
-            U32 const nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, ip, iend, dictMode, rep, ll0, minMatch);
-            if (!nbMatches) { ip++; continue; }
-
-            /* initialize opt[0] */
-            { U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
-            opt[0].mlen = 0;  /* means is_a_literal */
-            opt[0].litlen = litlen;
-            opt[0].price = ZSTD_literalsContribution(anchor, litlen, optStatePtr, optLevel);
-
-            /* large match -> immediate encoding */
-            {   U32 const maxML = matches[nbMatches-1].len;
-                U32 const maxOffset = matches[nbMatches-1].off;
-                DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series",
-                            nbMatches, maxML, maxOffset, (U32)(ip-prefixStart));
-
-                if (maxML > sufficient_len) {
-                    lastSequence.litlen = litlen;
-                    lastSequence.mlen = maxML;
-                    lastSequence.off = maxOffset;
-                    DEBUGLOG(6, "large match (%u>%u), immediate encoding",
-                                maxML, sufficient_len);
-                    cur = 0;
-                    last_pos = ZSTD_totalLen(lastSequence);
-                    goto _shortestPath;
-            }   }
-
-            /* set prices for first matches starting position == 0 */
-            {   U32 const literalsPrice = opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
-                U32 pos;
-                U32 matchNb;
-                for (pos = 1; pos < minMatch; pos++) {
-                    opt[pos].price = ZSTD_MAX_PRICE;   /* mlen, litlen and price will be fixed during forward scanning */
-                }
-                for (matchNb = 0; matchNb < nbMatches; matchNb++) {
-                    U32 const offset = matches[matchNb].off;
-                    U32 const end = matches[matchNb].len;
-                    repcodes_t const repHistory = ZSTD_updateRep(rep, offset, ll0);
-                    for ( ; pos <= end ; pos++ ) {
-                        U32 const matchPrice = ZSTD_getMatchPrice(offset, pos, optStatePtr, optLevel);
-                        U32 const sequencePrice = literalsPrice + matchPrice;
-                        DEBUGLOG(7, "rPos:%u => set initial price : %.2f",
-                                    pos, ZSTD_fCost(sequencePrice));
-                        opt[pos].mlen = pos;
-                        opt[pos].off = offset;
-                        opt[pos].litlen = litlen;
-                        opt[pos].price = sequencePrice;
-                        ZSTD_STATIC_ASSERT(sizeof(opt[pos].rep) == sizeof(repHistory));
-                        memcpy(opt[pos].rep, &repHistory, sizeof(repHistory));
-                }   }
-                last_pos = pos-1;
-            }
-        }
-
-        /* check further positions */
-        for (cur = 1; cur <= last_pos; cur++) {
-            const BYTE* const inr = ip + cur;
-            assert(cur < ZSTD_OPT_NUM);
-            DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur)
-
-            /* Fix current position with one literal if cheaper */
-            {   U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1;
-                int const price = opt[cur-1].price
-                                + ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel)
-                                + ZSTD_litLengthPrice(litlen, optStatePtr, optLevel)
-                                - ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel);
-                assert(price < 1000000000); /* overflow check */
-                if (price <= opt[cur].price) {
-                    DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)",
-                                inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen,
-                                opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]);
-                    opt[cur].mlen = 0;
-                    opt[cur].off = 0;
-                    opt[cur].litlen = litlen;
-                    opt[cur].price = price;
-                    memcpy(opt[cur].rep, opt[cur-1].rep, sizeof(opt[cur].rep));
-                } else {
-                    DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)",
-                                inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price),
-                                opt[cur].rep[0], opt[cur].rep[1], opt[cur].rep[2]);
-                }
-            }
-
-            /* last match must start at a minimum distance of 8 from oend */
-            if (inr > ilimit) continue;
-
-            if (cur == last_pos) break;
-
-            if ( (optLevel==0) /*static_test*/
-              && (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) {
-                DEBUGLOG(7, "move to next rPos:%u : price is <=", cur+1);
-                continue;  /* skip unpromising positions; about ~+6% speed, -0.01 ratio */
-            }
-
-            {   U32 const ll0 = (opt[cur].mlen != 0);
-                U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0;
-                U32 const previousPrice = opt[cur].price;
-                U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
-                U32 const nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, inr, iend, dictMode, opt[cur].rep, ll0, minMatch);
-                U32 matchNb;
-                if (!nbMatches) {
-                    DEBUGLOG(7, "rPos:%u : no match found", cur);
-                    continue;
-                }
-
-                {   U32 const maxML = matches[nbMatches-1].len;
-                    DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of maxLength=%u",
-                                inr-istart, cur, nbMatches, maxML);
-
-                    if ( (maxML > sufficient_len)
-                      || (cur + maxML >= ZSTD_OPT_NUM) ) {
-                        lastSequence.mlen = maxML;
-                        lastSequence.off = matches[nbMatches-1].off;
-                        lastSequence.litlen = litlen;
-                        cur -= (opt[cur].mlen==0) ? opt[cur].litlen : 0;  /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */
-                        last_pos = cur + ZSTD_totalLen(lastSequence);
-                        if (cur > ZSTD_OPT_NUM) cur = 0;   /* underflow => first match */
-                        goto _shortestPath;
-                }   }
-
-                /* set prices using matches found at position == cur */
-                for (matchNb = 0; matchNb < nbMatches; matchNb++) {
-                    U32 const offset = matches[matchNb].off;
-                    repcodes_t const repHistory = ZSTD_updateRep(opt[cur].rep, offset, ll0);
-                    U32 const lastML = matches[matchNb].len;
-                    U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch;
-                    U32 mlen;
-
-                    DEBUGLOG(7, "testing match %u => offCode=%4u, mlen=%2u, llen=%2u",
-                                matchNb, matches[matchNb].off, lastML, litlen);
-
-                    for (mlen = lastML; mlen >= startML; mlen--) {  /* scan downward */
-                        U32 const pos = cur + mlen;
-                        int const price = basePrice + ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
-
-                        if ((pos > last_pos) || (price < opt[pos].price)) {
-                            DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)",
-                                        pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
-                            while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; }   /* fill empty positions */
-                            opt[pos].mlen = mlen;
-                            opt[pos].off = offset;
-                            opt[pos].litlen = litlen;
-                            opt[pos].price = price;
-                            ZSTD_STATIC_ASSERT(sizeof(opt[pos].rep) == sizeof(repHistory));
-                            memcpy(opt[pos].rep, &repHistory, sizeof(repHistory));
-                        } else {
-                            DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)",
-                                        pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
-                            if (optLevel==0) break;  /* early update abort; gets ~+10% speed for about -0.01 ratio loss */
-                        }
-            }   }   }
-        }  /* for (cur = 1; cur <= last_pos; cur++) */
-
-        lastSequence = opt[last_pos];
-        cur = last_pos > ZSTD_totalLen(lastSequence) ? last_pos - ZSTD_totalLen(lastSequence) : 0;  /* single sequence, and it starts before `ip` */
-        assert(cur < ZSTD_OPT_NUM);  /* control overflow*/
-
-_shortestPath:   /* cur, last_pos, best_mlen, best_off have to be set */
-        assert(opt[0].mlen == 0);
-
-        {   U32 const storeEnd = cur + 1;
-            U32 storeStart = storeEnd;
-            U32 seqPos = cur;
-
-            DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)",
-                        last_pos, cur); (void)last_pos;
-            assert(storeEnd < ZSTD_OPT_NUM);
-            DEBUGLOG(6, "last sequence copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
-                        storeEnd, lastSequence.litlen, lastSequence.mlen, lastSequence.off);
-            opt[storeEnd] = lastSequence;
-            while (seqPos > 0) {
-                U32 const backDist = ZSTD_totalLen(opt[seqPos]);
-                storeStart--;
-                DEBUGLOG(6, "sequence from rPos=%u copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
-                            seqPos, storeStart, opt[seqPos].litlen, opt[seqPos].mlen, opt[seqPos].off);
-                opt[storeStart] = opt[seqPos];
-                seqPos = (seqPos > backDist) ? seqPos - backDist : 0;
-            }
-
-            /* save sequences */
-            DEBUGLOG(6, "sending selected sequences into seqStore")
-            {   U32 storePos;
-                for (storePos=storeStart; storePos <= storeEnd; storePos++) {
-                    U32 const llen = opt[storePos].litlen;
-                    U32 const mlen = opt[storePos].mlen;
-                    U32 const offCode = opt[storePos].off;
-                    U32 const advance = llen + mlen;
-                    DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u",
-                                anchor - istart, (unsigned)llen, (unsigned)mlen);
-
-                    if (mlen==0) {  /* only literals => must be last "sequence", actually starting a new stream of sequences */
-                        assert(storePos == storeEnd);   /* must be last sequence */
-                        ip = anchor + llen;     /* last "sequence" is a bunch of literals => don't progress anchor */
-                        continue;   /* will finish */
-                    }
-
-                    /* repcodes update : like ZSTD_updateRep(), but update in place */
-                    if (offCode >= ZSTD_REP_NUM) {  /* full offset */
-                        rep[2] = rep[1];
-                        rep[1] = rep[0];
-                        rep[0] = offCode - ZSTD_REP_MOVE;
-                    } else {   /* repcode */
-                        U32 const repCode = offCode + (llen==0);
-                        if (repCode) {  /* note : if repCode==0, no change */
-                            U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
-                            if (repCode >= 2) rep[2] = rep[1];
-                            rep[1] = rep[0];
-                            rep[0] = currentOffset;
-                    }   }
-
-                    assert(anchor + llen <= iend);
-                    ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen);
-                    ZSTD_storeSeq(seqStore, llen, anchor, offCode, mlen-MINMATCH);
-                    anchor += advance;
-                    ip = anchor;
-            }   }
-            ZSTD_setBasePrices(optStatePtr, optLevel);
-        }
-
-    }   /* while (ip < ilimit) */
-
-    /* Return the last literals size */
-    return (size_t)(iend - anchor);
-}
-
-
-size_t ZSTD_compressBlock_btopt(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        const void* src, size_t srcSize)
-{
-    DEBUGLOG(5, "ZSTD_compressBlock_btopt");
-    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_noDict);
-}
-
-
-/* used in 2-pass strategy */
-static U32 ZSTD_upscaleStat(unsigned* table, U32 lastEltIndex, int bonus)
-{
-    U32 s, sum=0;
-    assert(ZSTD_FREQ_DIV+bonus >= 0);
-    for (s=0; s<lastEltIndex+1; s++) {
-        table[s] <<= ZSTD_FREQ_DIV+bonus;
-        table[s]--;
-        sum += table[s];
-    }
-    return sum;
-}
-
-/* used in 2-pass strategy */
-MEM_STATIC void ZSTD_upscaleStats(optState_t* optPtr)
-{
-    if (ZSTD_compressedLiterals(optPtr))
-        optPtr->litSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0);
-    optPtr->litLengthSum = ZSTD_upscaleStat(optPtr->litLengthFreq, MaxLL, 0);
-    optPtr->matchLengthSum = ZSTD_upscaleStat(optPtr->matchLengthFreq, MaxML, 0);
-    optPtr->offCodeSum = ZSTD_upscaleStat(optPtr->offCodeFreq, MaxOff, 0);
-}
-
-/* ZSTD_initStats_ultra():
- * make a first compression pass, just to seed stats with more accurate starting values.
- * only works on first block, with no dictionary and no ldm.
- * this function cannot error, hence its contract must be respected.
- */
-static void
-ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
-                     seqStore_t* seqStore,
-                     U32 rep[ZSTD_REP_NUM],
-               const void* src, size_t srcSize)
-{
-    U32 tmpRep[ZSTD_REP_NUM];  /* updated rep codes will sink here */
-    memcpy(tmpRep, rep, sizeof(tmpRep));
-
-    DEBUGLOG(4, "ZSTD_initStats_ultra (srcSize=%zu)", srcSize);
-    assert(ms->opt.litLengthSum == 0);    /* first block */
-    assert(seqStore->sequences == seqStore->sequencesStart);   /* no ldm */
-    assert(ms->window.dictLimit == ms->window.lowLimit);   /* no dictionary */
-    assert(ms->window.dictLimit - ms->nextToUpdate <= 1);  /* no prefix (note: intentional overflow, defined as 2-complement) */
-
-    ZSTD_compressBlock_opt_generic(ms, seqStore, tmpRep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);   /* generate stats into ms->opt*/
-
-    /* invalidate first scan from history */
-    ZSTD_resetSeqStore(seqStore);
-    ms->window.base -= srcSize;
-    ms->window.dictLimit += (U32)srcSize;
-    ms->window.lowLimit = ms->window.dictLimit;
-    ms->nextToUpdate = ms->window.dictLimit;
-
-    /* re-inforce weight of collected statistics */
-    ZSTD_upscaleStats(&ms->opt);
-}
-
-size_t ZSTD_compressBlock_btultra(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        const void* src, size_t srcSize)
-{
-    DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize);
-    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);
-}
-
-size_t ZSTD_compressBlock_btultra2(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        const void* src, size_t srcSize)
-{
-    U32 const current = (U32)((const BYTE*)src - ms->window.base);
-    DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize);
-
-    /* 2-pass strategy:
-     * this strategy makes a first pass over first block to collect statistics
-     * and seed next round's statistics with it.
-     * After 1st pass, function forgets everything, and starts a new block.
-     * Consequently, this can only work if no data has been previously loaded in tables,
-     * aka, no dictionary, no prefix, no ldm preprocessing.
-     * The compression ratio gain is generally small (~0.5% on first block),
-     * the cost is 2x cpu time on first block. */
-    assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
-    if ( (ms->opt.litLengthSum==0)   /* first block */
-      && (seqStore->sequences == seqStore->sequencesStart)  /* no ldm */
-      && (ms->window.dictLimit == ms->window.lowLimit)   /* no dictionary */
-      && (current == ms->window.dictLimit)   /* start of frame, nothing already loaded nor skipped */
-      && (srcSize > ZSTD_PREDEF_THRESHOLD)
-      ) {
-        ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize);
-    }
-
-    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);
-}
-
-size_t ZSTD_compressBlock_btopt_dictMatchState(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        const void* src, size_t srcSize)
-{
-    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_dictMatchState);
-}
-
-size_t ZSTD_compressBlock_btultra_dictMatchState(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        const void* src, size_t srcSize)
-{
-    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_dictMatchState);
-}
-
-size_t ZSTD_compressBlock_btopt_extDict(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        const void* src, size_t srcSize)
-{
-    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_extDict);
-}
-
-size_t ZSTD_compressBlock_btultra_extDict(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        const void* src, size_t srcSize)
-{
-    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_extDict);
-}
-
-/* note : no btultra2 variant for extDict nor dictMatchState,
- * because btultra2 is not meant to work with dictionaries
- * and is only specific for the first block (no prefix) */
diff --git a/vendor/github.com/DataDog/zstd/zstd_opt.h b/vendor/github.com/DataDog/zstd/zstd_opt.h
deleted file mode 100644
index 094f747..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_opt.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef ZSTD_OPT_H
-#define ZSTD_OPT_H
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-#include "zstd_compress_internal.h"
-
-/* used in ZSTD_loadDictionaryContent() */
-void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend);
-
-size_t ZSTD_compressBlock_btopt(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_btultra(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_btultra2(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize);
-
-
-size_t ZSTD_compressBlock_btopt_dictMatchState(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_btultra_dictMatchState(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize);
-
-size_t ZSTD_compressBlock_btopt_extDict(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_btultra_extDict(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize);
-
-        /* note : no btultra2 variant for extDict nor dictMatchState,
-         * because btultra2 is not meant to work with dictionaries
-         * and is only specific for the first block (no prefix) */
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* ZSTD_OPT_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_stream.go b/vendor/github.com/DataDog/zstd/zstd_stream.go
deleted file mode 100644
index 2330353..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_stream.go
+++ /dev/null
@@ -1,294 +0,0 @@
-package zstd
-
-/*
-#define ZSTD_STATIC_LINKING_ONLY
-#define ZBUFF_DISABLE_DEPRECATE_WARNINGS
-#include "zstd.h"
-#include "zbuff.h"
-*/
-import "C"
-import (
-	"errors"
-	"fmt"
-	"io"
-	"runtime"
-	"unsafe"
-)
-
-var errShortRead = errors.New("short read")
-
-// Writer is an io.WriteCloser that zstd-compresses its input.
-type Writer struct {
-	CompressionLevel int
-
-	ctx              *C.ZSTD_CCtx
-	dict             []byte
-	dstBuffer        []byte
-	firstError       error
-	underlyingWriter io.Writer
-}
-
-func resize(in []byte, newSize int) []byte {
-	if in == nil {
-		return make([]byte, newSize)
-	}
-	if newSize <= cap(in) {
-		return in[:newSize]
-	}
-	toAdd := newSize - len(in)
-	return append(in, make([]byte, toAdd)...)
-}
-
-// NewWriter creates a new Writer with default compression options.  Writes to
-// the writer will be written in compressed form to w.
-func NewWriter(w io.Writer) *Writer {
-	return NewWriterLevelDict(w, DefaultCompression, nil)
-}
-
-// NewWriterLevel is like NewWriter but specifies the compression level instead
-// of assuming default compression.
-//
-// The level can be DefaultCompression or any integer value between BestSpeed
-// and BestCompression inclusive.
-func NewWriterLevel(w io.Writer, level int) *Writer {
-	return NewWriterLevelDict(w, level, nil)
-
-}
-
-// NewWriterLevelDict is like NewWriterLevel but specifies a dictionary to
-// compress with.  If the dictionary is empty or nil it is ignored. The dictionary
-// should not be modified until the writer is closed.
-func NewWriterLevelDict(w io.Writer, level int, dict []byte) *Writer {
-	var err error
-	ctx := C.ZSTD_createCCtx()
-
-	if dict == nil {
-		err = getError(int(C.ZSTD_compressBegin(ctx,
-			C.int(level))))
-	} else {
-		err = getError(int(C.ZSTD_compressBegin_usingDict(
-			ctx,
-			unsafe.Pointer(&dict[0]),
-			C.size_t(len(dict)),
-			C.int(level))))
-	}
-
-	return &Writer{
-		CompressionLevel: level,
-		ctx:              ctx,
-		dict:             dict,
-		dstBuffer:        make([]byte, CompressBound(1024)),
-		firstError:       err,
-		underlyingWriter: w,
-	}
-}
-
-// Write writes a compressed form of p to the underlying io.Writer.
-func (w *Writer) Write(p []byte) (int, error) {
-	if w.firstError != nil {
-		return 0, w.firstError
-	}
-	if len(p) == 0 {
-		return 0, nil
-	}
-	// Check if dstBuffer is enough
-	if len(w.dstBuffer) < CompressBound(len(p)) {
-		w.dstBuffer = make([]byte, CompressBound(len(p)))
-	}
-
-	retCode := C.ZSTD_compressContinue(
-		w.ctx,
-		unsafe.Pointer(&w.dstBuffer[0]),
-		C.size_t(len(w.dstBuffer)),
-		unsafe.Pointer(&p[0]),
-		C.size_t(len(p)))
-
-	if err := getError(int(retCode)); err != nil {
-		return 0, err
-	}
-	written := int(retCode)
-
-	// Write to underlying buffer
-	_, err := w.underlyingWriter.Write(w.dstBuffer[:written])
-
-	// Same behaviour as zlib, we can't know how much data we wrote, only
-	// if there was an error
-	if err != nil {
-		return 0, err
-	}
-	return len(p), err
-}
-
-// Close closes the Writer, flushing any unwritten data to the underlying
-// io.Writer and freeing objects, but does not close the underlying io.Writer.
-func (w *Writer) Close() error {
-	retCode := C.ZSTD_compressEnd(
-		w.ctx,
-		unsafe.Pointer(&w.dstBuffer[0]),
-		C.size_t(len(w.dstBuffer)),
-		unsafe.Pointer(nil),
-		C.size_t(0))
-
-	if err := getError(int(retCode)); err != nil {
-		return err
-	}
-	written := int(retCode)
-	retCode = C.ZSTD_freeCCtx(w.ctx) // Safely close buffer before writing the end
-
-	if err := getError(int(retCode)); err != nil {
-		return err
-	}
-
-	_, err := w.underlyingWriter.Write(w.dstBuffer[:written])
-	if err != nil {
-		return err
-	}
-	return nil
-}
-
-// reader is an io.ReadCloser that decompresses when read from.
-type reader struct {
-	ctx                 *C.ZBUFF_DCtx
-	compressionBuffer   []byte
-	compressionLeft     int
-	decompressionBuffer []byte
-	decompOff           int
-	decompSize          int
-	dict                []byte
-	firstError          error
-	recommendedSrcSize  int
-	underlyingReader    io.Reader
-}
-
-// NewReader creates a new io.ReadCloser.  Reads from the returned ReadCloser
-// read and decompress data from r.  It is the caller's responsibility to call
-// Close on the ReadCloser when done.  If this is not done, underlying objects
-// in the zstd library will not be freed.
-func NewReader(r io.Reader) io.ReadCloser {
-	return NewReaderDict(r, nil)
-}
-
-// NewReaderDict is like NewReader but uses a preset dictionary.  NewReaderDict
-// ignores the dictionary if it is nil.
-func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
-	var err error
-	ctx := C.ZBUFF_createDCtx()
-	if len(dict) == 0 {
-		err = getError(int(C.ZBUFF_decompressInit(ctx)))
-	} else {
-		err = getError(int(C.ZBUFF_decompressInitDictionary(
-			ctx,
-			unsafe.Pointer(&dict[0]),
-			C.size_t(len(dict)))))
-	}
-	cSize := int(C.ZBUFF_recommendedDInSize())
-	dSize := int(C.ZBUFF_recommendedDOutSize())
-	if cSize <= 0 {
-		panic(fmt.Errorf("ZBUFF_recommendedDInSize() returned invalid size: %v", cSize))
-	}
-	if dSize <= 0 {
-		panic(fmt.Errorf("ZBUFF_recommendedDOutSize() returned invalid size: %v", dSize))
-	}
-
-	compressionBuffer := make([]byte, cSize)
-	decompressionBuffer := make([]byte, dSize)
-	return &reader{
-		ctx:                 ctx,
-		dict:                dict,
-		compressionBuffer:   compressionBuffer,
-		decompressionBuffer: decompressionBuffer,
-		firstError:          err,
-		recommendedSrcSize:  cSize,
-		underlyingReader:    r,
-	}
-}
-
-// Close frees the allocated C objects
-func (r *reader) Close() error {
-	return getError(int(C.ZBUFF_freeDCtx(r.ctx)))
-}
-
-func (r *reader) Read(p []byte) (int, error) {
-
-	// If we already have enough bytes, return
-	if r.decompSize-r.decompOff >= len(p) {
-		copy(p, r.decompressionBuffer[r.decompOff:])
-		r.decompOff += len(p)
-		return len(p), nil
-	}
-
-	copy(p, r.decompressionBuffer[r.decompOff:r.decompSize])
-	got := r.decompSize - r.decompOff
-	r.decompSize = 0
-	r.decompOff = 0
-
-	for got < len(p) {
-		// Populate src
-		src := r.compressionBuffer
-		reader := r.underlyingReader
-		n, err := TryReadFull(reader, src[r.compressionLeft:])
-		if err != nil && err != errShortRead { // Handle underlying reader errors first
-			return 0, fmt.Errorf("failed to read from underlying reader: %s", err)
-		} else if n == 0 && r.compressionLeft == 0 {
-			return got, io.EOF
-		}
-		src = src[:r.compressionLeft+n]
-
-		// C code
-		cSrcSize := C.size_t(len(src))
-		cDstSize := C.size_t(len(r.decompressionBuffer))
-		retCode := int(C.ZBUFF_decompressContinue(
-			r.ctx,
-			unsafe.Pointer(&r.decompressionBuffer[0]),
-			&cDstSize,
-			unsafe.Pointer(&src[0]),
-			&cSrcSize))
-
-		// Keep src here eventhough, we reuse later, the code might be deleted at some point
-		runtime.KeepAlive(src)
-		if err = getError(retCode); err != nil {
-			return 0, fmt.Errorf("failed to decompress: %s", err)
-		}
-
-		// Put everything in buffer
-		if int(cSrcSize) < len(src) {
-			left := src[int(cSrcSize):]
-			copy(r.compressionBuffer, left)
-		}
-		r.compressionLeft = len(src) - int(cSrcSize)
-		r.decompSize = int(cDstSize)
-		r.decompOff = copy(p[got:], r.decompressionBuffer[:r.decompSize])
-		got += r.decompOff
-
-		// Resize buffers
-		nsize := retCode // Hint for next src buffer size
-		if nsize <= 0 {
-			// Reset to recommended size
-			nsize = r.recommendedSrcSize
-		}
-		if nsize < r.compressionLeft {
-			nsize = r.compressionLeft
-		}
-		r.compressionBuffer = resize(r.compressionBuffer, nsize)
-	}
-	return got, nil
-}
-
-// TryReadFull reads buffer just as ReadFull does
-// Here we expect that buffer may end and we do not return ErrUnexpectedEOF as ReadAtLeast does.
-// We return errShortRead instead to distinguish short reads and failures.
-// We cannot use ReadFull/ReadAtLeast because it masks Reader errors, such as network failures
-// and causes panic instead of error.
-func TryReadFull(r io.Reader, buf []byte) (n int, err error) {
-	for n < len(buf) && err == nil {
-		var nn int
-		nn, err = r.Read(buf[n:])
-		n += nn
-	}
-	if n == len(buf) && err == io.EOF {
-		err = nil // EOF at the end is somewhat expected
-	} else if err == io.EOF {
-		err = errShortRead
-	}
-	return
-}
diff --git a/vendor/github.com/DataDog/zstd/zstd_v01.c b/vendor/github.com/DataDog/zstd/zstd_v01.c
deleted file mode 100644
index ae8cba2..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_v01.c
+++ /dev/null
@@ -1,2152 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-
-/******************************************
-*  Includes
-******************************************/
-#include <stddef.h>    /* size_t, ptrdiff_t */
-#include "zstd_v01.h"
-#include "error_private.h"
-
-
-/******************************************
-*  Static allocation
-******************************************/
-/* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */
-#define FSE_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<maxTableLog))
-
-/* You can statically allocate Huff0 DTable as a table of unsigned short using below macro */
-#define HUF_DTABLE_SIZE_U16(maxTableLog)   (1 + (1<<maxTableLog))
-#define HUF_CREATE_STATIC_DTABLE(DTable, maxTableLog) \
-        unsigned short DTable[HUF_DTABLE_SIZE_U16(maxTableLog)] = { maxTableLog }
-
-
-/******************************************
-*  Error Management
-******************************************/
-#define FSE_LIST_ERRORS(ITEM) \
-        ITEM(FSE_OK_NoError) ITEM(FSE_ERROR_GENERIC) \
-        ITEM(FSE_ERROR_tableLog_tooLarge) ITEM(FSE_ERROR_maxSymbolValue_tooLarge) ITEM(FSE_ERROR_maxSymbolValue_tooSmall) \
-        ITEM(FSE_ERROR_dstSize_tooSmall) ITEM(FSE_ERROR_srcSize_wrong)\
-        ITEM(FSE_ERROR_corruptionDetected) \
-        ITEM(FSE_ERROR_maxCode)
-
-#define FSE_GENERATE_ENUM(ENUM) ENUM,
-typedef enum { FSE_LIST_ERRORS(FSE_GENERATE_ENUM) } FSE_errorCodes;  /* enum is exposed, to detect & handle specific errors; compare function result to -enum value */
-
-
-/******************************************
-*  FSE symbol compression API
-******************************************/
-/*
-   This API consists of small unitary functions, which highly benefit from being inlined.
-   You will want to enable link-time-optimization to ensure these functions are properly inlined in your binary.
-   Visual seems to do it automatically.
-   For gcc or clang, you'll need to add -flto flag at compilation and linking stages.
-   If none of these solutions is applicable, include "fse.c" directly.
-*/
-
-typedef unsigned FSE_CTable;   /* don't allocate that. It's just a way to be more restrictive than void* */
-typedef unsigned FSE_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */
-
-typedef struct
-{
-    size_t bitContainer;
-    int    bitPos;
-    char*  startPtr;
-    char*  ptr;
-    char*  endPtr;
-} FSE_CStream_t;
-
-typedef struct
-{
-    ptrdiff_t   value;
-    const void* stateTable;
-    const void* symbolTT;
-    unsigned    stateLog;
-} FSE_CState_t;
-
-typedef struct
-{
-    size_t   bitContainer;
-    unsigned bitsConsumed;
-    const char* ptr;
-    const char* start;
-} FSE_DStream_t;
-
-typedef struct
-{
-    size_t      state;
-    const void* table;   /* precise table may vary, depending on U16 */
-} FSE_DState_t;
-
-typedef enum { FSE_DStream_unfinished = 0,
-               FSE_DStream_endOfBuffer = 1,
-               FSE_DStream_completed = 2,
-               FSE_DStream_tooFar = 3 } FSE_DStream_status;  /* result of FSE_reloadDStream() */
-               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... ?! */
-
-
-/****************************************************************
-*  Tuning parameters
-****************************************************************/
-/* MEMORY_USAGE :
-*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
-*  Increasing memory usage improves compression ratio
-*  Reduced memory usage can improve speed, due to cache effect
-*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
-#define FSE_MAX_MEMORY_USAGE 14
-#define FSE_DEFAULT_MEMORY_USAGE 13
-
-/* FSE_MAX_SYMBOL_VALUE :
-*  Maximum symbol value authorized.
-*  Required for proper stack allocation */
-#define FSE_MAX_SYMBOL_VALUE 255
-
-
-/****************************************************************
-*  template functions type & suffix
-****************************************************************/
-#define FSE_FUNCTION_TYPE BYTE
-#define FSE_FUNCTION_EXTENSION
-
-
-/****************************************************************
-*  Byte symbol type
-****************************************************************/
-typedef struct
-{
-    unsigned short newState;
-    unsigned char  symbol;
-    unsigned char  nbBits;
-} FSE_decode_t;   /* size == U32 */
-
-
-
-/****************************************************************
-*  Compiler specifics
-****************************************************************/
-#ifdef _MSC_VER    /* Visual Studio */
-#  define FORCE_INLINE static __forceinline
-#  include <intrin.h>                    /* For Visual 2005 */
-#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
-#  pragma warning(disable : 4214)        /* disable: C4214: non-int bitfields */
-#else
-#  define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
-#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
-#    ifdef __GNUC__
-#      define FORCE_INLINE static inline __attribute__((always_inline))
-#    else
-#      define FORCE_INLINE static inline
-#    endif
-#  else
-#    define FORCE_INLINE static
-#  endif /* __STDC_VERSION__ */
-#endif
-
-
-/****************************************************************
-*  Includes
-****************************************************************/
-#include <stdlib.h>     /* malloc, free, qsort */
-#include <string.h>     /* memcpy, memset */
-#include <stdio.h>      /* printf (debug) */
-
-
-#ifndef MEM_ACCESS_MODULE
-#define MEM_ACCESS_MODULE
-/****************************************************************
-*  Basic Types
-*****************************************************************/
-#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
-# include <stdint.h>
-typedef  uint8_t BYTE;
-typedef uint16_t U16;
-typedef  int16_t S16;
-typedef uint32_t U32;
-typedef  int32_t S32;
-typedef uint64_t U64;
-typedef  int64_t S64;
-#else
-typedef unsigned char       BYTE;
-typedef unsigned short      U16;
-typedef   signed short      S16;
-typedef unsigned int        U32;
-typedef   signed int        S32;
-typedef unsigned long long  U64;
-typedef   signed long long  S64;
-#endif
-
-#endif   /* MEM_ACCESS_MODULE */
-
-/****************************************************************
-*  Memory I/O
-*****************************************************************/
-/* FSE_FORCE_MEMORY_ACCESS
- * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
- * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
- * The below switch allow to select different access method for improved performance.
- * Method 0 (default) : use `memcpy()`. Safe and portable.
- * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
- *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
- * Method 2 : direct access. This method is portable but violate C standard.
- *            It can generate buggy code on targets generating assembly depending on alignment.
- *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
- * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
- * Prefer these methods in priority order (0 > 1 > 2)
- */
-#ifndef FSE_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
-#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
-#    define FSE_FORCE_MEMORY_ACCESS 2
-#  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
-  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
-#    define FSE_FORCE_MEMORY_ACCESS 1
-#  endif
-#endif
-
-
-static unsigned FSE_32bits(void)
-{
-    return sizeof(void*)==4;
-}
-
-static unsigned FSE_isLittleEndian(void)
-{
-    const union { U32 i; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */
-    return one.c[0];
-}
-
-#if defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==2)
-
-static U16 FSE_read16(const void* memPtr) { return *(const U16*) memPtr; }
-static U32 FSE_read32(const void* memPtr) { return *(const U32*) memPtr; }
-static U64 FSE_read64(const void* memPtr) { return *(const U64*) memPtr; }
-
-#elif defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==1)
-
-/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
-/* currently only defined for gcc and icc */
-typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign;
-
-static U16 FSE_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
-static U32 FSE_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
-static U64 FSE_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
-
-#else
-
-static U16 FSE_read16(const void* memPtr)
-{
-    U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-static U32 FSE_read32(const void* memPtr)
-{
-    U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-static U64 FSE_read64(const void* memPtr)
-{
-    U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-#endif // FSE_FORCE_MEMORY_ACCESS
-
-static U16 FSE_readLE16(const void* memPtr)
-{
-    if (FSE_isLittleEndian())
-        return FSE_read16(memPtr);
-    else
-    {
-        const BYTE* p = (const BYTE*)memPtr;
-        return (U16)(p[0] + (p[1]<<8));
-    }
-}
-
-static U32 FSE_readLE32(const void* memPtr)
-{
-    if (FSE_isLittleEndian())
-        return FSE_read32(memPtr);
-    else
-    {
-        const BYTE* p = (const BYTE*)memPtr;
-        return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));
-    }
-}
-
-
-static U64 FSE_readLE64(const void* memPtr)
-{
-    if (FSE_isLittleEndian())
-        return FSE_read64(memPtr);
-    else
-    {
-        const BYTE* p = (const BYTE*)memPtr;
-        return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24)
-                     + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56));
-    }
-}
-
-static size_t FSE_readLEST(const void* memPtr)
-{
-    if (FSE_32bits())
-        return (size_t)FSE_readLE32(memPtr);
-    else
-        return (size_t)FSE_readLE64(memPtr);
-}
-
-
-
-/****************************************************************
-*  Constants
-*****************************************************************/
-#define FSE_MAX_TABLELOG  (FSE_MAX_MEMORY_USAGE-2)
-#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
-#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
-#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
-#define FSE_MIN_TABLELOG 5
-
-#define FSE_TABLELOG_ABSOLUTE_MAX 15
-#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
-#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
-#endif
-
-
-/****************************************************************
-*  Error Management
-****************************************************************/
-#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
-
-
-/****************************************************************
-*  Complex types
-****************************************************************/
-typedef struct
-{
-    int deltaFindState;
-    U32 deltaNbBits;
-} FSE_symbolCompressionTransform; /* total 8 bytes */
-
-typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
-
-/****************************************************************
-*  Internal functions
-****************************************************************/
-FORCE_INLINE unsigned FSE_highbit32 (U32 val)
-{
-#   if defined(_MSC_VER)   /* Visual */
-    unsigned long r;
-    _BitScanReverse ( &r, val );
-    return (unsigned) r;
-#   elif defined(__GNUC__) && (GCC_VERSION >= 304)   /* GCC Intrinsic */
-    return 31 - __builtin_clz (val);
-#   else   /* Software version */
-    static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
-    U32 v = val;
-    unsigned r;
-    v |= v >> 1;
-    v |= v >> 2;
-    v |= v >> 4;
-    v |= v >> 8;
-    v |= v >> 16;
-    r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
-    return r;
-#   endif
-}
-
-
-/****************************************************************
-*  Templates
-****************************************************************/
-/*
-  designed to be included
-  for type-specific functions (template emulation in C)
-  Objective is to write these functions only once, for improved maintenance
-*/
-
-/* safety checks */
-#ifndef FSE_FUNCTION_EXTENSION
-#  error "FSE_FUNCTION_EXTENSION must be defined"
-#endif
-#ifndef FSE_FUNCTION_TYPE
-#  error "FSE_FUNCTION_TYPE must be defined"
-#endif
-
-/* Function names */
-#define FSE_CAT(X,Y) X##Y
-#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
-#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
-
-
-
-static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; }
-
-#define FSE_DECODE_TYPE FSE_decode_t
-
-
-typedef struct {
-    U16 tableLog;
-    U16 fastMode;
-} FSE_DTableHeader;   /* sizeof U32 */
-
-static size_t FSE_buildDTable
-(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
-{
-    void* ptr = dt;
-    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
-    FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)(ptr) + 1;   /* because dt is unsigned, 32-bits aligned on 32-bits */
-    const U32 tableSize = 1 << tableLog;
-    const U32 tableMask = tableSize-1;
-    const U32 step = FSE_tableStep(tableSize);
-    U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
-    U32 position = 0;
-    U32 highThreshold = tableSize-1;
-    const S16 largeLimit= (S16)(1 << (tableLog-1));
-    U32 noLarge = 1;
-    U32 s;
-
-    /* Sanity Checks */
-    if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return (size_t)-FSE_ERROR_maxSymbolValue_tooLarge;
-    if (tableLog > FSE_MAX_TABLELOG) return (size_t)-FSE_ERROR_tableLog_tooLarge;
-
-    /* Init, lay down lowprob symbols */
-    DTableH[0].tableLog = (U16)tableLog;
-    for (s=0; s<=maxSymbolValue; s++)
-    {
-        if (normalizedCounter[s]==-1)
-        {
-            tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
-            symbolNext[s] = 1;
-        }
-        else
-        {
-            if (normalizedCounter[s] >= largeLimit) noLarge=0;
-            symbolNext[s] = normalizedCounter[s];
-        }
-    }
-
-    /* Spread symbols */
-    for (s=0; s<=maxSymbolValue; s++)
-    {
-        int i;
-        for (i=0; i<normalizedCounter[s]; i++)
-        {
-            tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
-            position = (position + step) & tableMask;
-            while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
-        }
-    }
-
-    if (position!=0) return (size_t)-FSE_ERROR_GENERIC;   /* position must reach all cells once, otherwise normalizedCounter is incorrect */
-
-    /* Build Decoding table */
-    {
-        U32 i;
-        for (i=0; i<tableSize; i++)
-        {
-            FSE_FUNCTION_TYPE symbol = (FSE_FUNCTION_TYPE)(tableDecode[i].symbol);
-            U16 nextState = symbolNext[symbol]++;
-            tableDecode[i].nbBits = (BYTE) (tableLog - FSE_highbit32 ((U32)nextState) );
-            tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize);
-        }
-    }
-
-    DTableH->fastMode = (U16)noLarge;
-    return 0;
-}
-
-
-/******************************************
-*  FSE byte symbol
-******************************************/
-#ifndef FSE_COMMONDEFS_ONLY
-
-static unsigned FSE_isError(size_t code) { return (code > (size_t)(-FSE_ERROR_maxCode)); }
-
-static short FSE_abs(short a)
-{
-    return a<0? -a : a;
-}
-
-
-/****************************************************************
-*  Header bitstream management
-****************************************************************/
-static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
-                 const void* headerBuffer, size_t hbSize)
-{
-    const BYTE* const istart = (const BYTE*) headerBuffer;
-    const BYTE* const iend = istart + hbSize;
-    const BYTE* ip = istart;
-    int nbBits;
-    int remaining;
-    int threshold;
-    U32 bitStream;
-    int bitCount;
-    unsigned charnum = 0;
-    int previous0 = 0;
-
-    if (hbSize < 4) return (size_t)-FSE_ERROR_srcSize_wrong;
-    bitStream = FSE_readLE32(ip);
-    nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG;   /* extract tableLog */
-    if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return (size_t)-FSE_ERROR_tableLog_tooLarge;
-    bitStream >>= 4;
-    bitCount = 4;
-    *tableLogPtr = nbBits;
-    remaining = (1<<nbBits)+1;
-    threshold = 1<<nbBits;
-    nbBits++;
-
-    while ((remaining>1) && (charnum<=*maxSVPtr))
-    {
-        if (previous0)
-        {
-            unsigned n0 = charnum;
-            while ((bitStream & 0xFFFF) == 0xFFFF)
-            {
-                n0+=24;
-                if (ip < iend-5)
-                {
-                    ip+=2;
-                    bitStream = FSE_readLE32(ip) >> bitCount;
-                }
-                else
-                {
-                    bitStream >>= 16;
-                    bitCount+=16;
-                }
-            }
-            while ((bitStream & 3) == 3)
-            {
-                n0+=3;
-                bitStream>>=2;
-                bitCount+=2;
-            }
-            n0 += bitStream & 3;
-            bitCount += 2;
-            if (n0 > *maxSVPtr) return (size_t)-FSE_ERROR_maxSymbolValue_tooSmall;
-            while (charnum < n0) normalizedCounter[charnum++] = 0;
-            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
-            {
-                ip += bitCount>>3;
-                bitCount &= 7;
-                bitStream = FSE_readLE32(ip) >> bitCount;
-            }
-            else
-                bitStream >>= 2;
-        }
-        {
-            const short max = (short)((2*threshold-1)-remaining);
-            short count;
-
-            if ((bitStream & (threshold-1)) < (U32)max)
-            {
-                count = (short)(bitStream & (threshold-1));
-                bitCount   += nbBits-1;
-            }
-            else
-            {
-                count = (short)(bitStream & (2*threshold-1));
-                if (count >= threshold) count -= max;
-                bitCount   += nbBits;
-            }
-
-            count--;   /* extra accuracy */
-            remaining -= FSE_abs(count);
-            normalizedCounter[charnum++] = count;
-            previous0 = !count;
-            while (remaining < threshold)
-            {
-                nbBits--;
-                threshold >>= 1;
-            }
-
-            {
-                if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
-                {
-                    ip += bitCount>>3;
-                    bitCount &= 7;
-                }
-                else
-                {
-                    bitCount -= (int)(8 * (iend - 4 - ip));
-                    ip = iend - 4;
-                }
-                bitStream = FSE_readLE32(ip) >> (bitCount & 31);
-            }
-        }
-    }
-    if (remaining != 1) return (size_t)-FSE_ERROR_GENERIC;
-    *maxSVPtr = charnum-1;
-
-    ip += (bitCount+7)>>3;
-    if ((size_t)(ip-istart) > hbSize) return (size_t)-FSE_ERROR_srcSize_wrong;
-    return ip-istart;
-}
-
-
-/*********************************************************
-*  Decompression (Byte symbols)
-*********************************************************/
-static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
-{
-    void* ptr = dt;
-    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
-    FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1;   /* because dt is unsigned */
-
-    DTableH->tableLog = 0;
-    DTableH->fastMode = 0;
-
-    cell->newState = 0;
-    cell->symbol = symbolValue;
-    cell->nbBits = 0;
-
-    return 0;
-}
-
-
-static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
-{
-    void* ptr = dt;
-    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
-    FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1;   /* because dt is unsigned */
-    const unsigned tableSize = 1 << nbBits;
-    const unsigned tableMask = tableSize - 1;
-    const unsigned maxSymbolValue = tableMask;
-    unsigned s;
-
-    /* Sanity checks */
-    if (nbBits < 1) return (size_t)-FSE_ERROR_GENERIC;             /* min size */
-
-    /* Build Decoding Table */
-    DTableH->tableLog = (U16)nbBits;
-    DTableH->fastMode = 1;
-    for (s=0; s<=maxSymbolValue; s++)
-    {
-        dinfo[s].newState = 0;
-        dinfo[s].symbol = (BYTE)s;
-        dinfo[s].nbBits = (BYTE)nbBits;
-    }
-
-    return 0;
-}
-
-
-/* FSE_initDStream
- * Initialize a FSE_DStream_t.
- * srcBuffer must point at the beginning of an FSE block.
- * The function result is the size of the FSE_block (== srcSize).
- * If srcSize is too small, the function will return an errorCode;
- */
-static size_t FSE_initDStream(FSE_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
-{
-    if (srcSize < 1) return (size_t)-FSE_ERROR_srcSize_wrong;
-
-    if (srcSize >=  sizeof(size_t))
-    {
-        U32 contain32;
-        bitD->start = (const char*)srcBuffer;
-        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(size_t);
-        bitD->bitContainer = FSE_readLEST(bitD->ptr);
-        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
-        if (contain32 == 0) return (size_t)-FSE_ERROR_GENERIC;   /* stop bit not present */
-        bitD->bitsConsumed = 8 - FSE_highbit32(contain32);
-    }
-    else
-    {
-        U32 contain32;
-        bitD->start = (const char*)srcBuffer;
-        bitD->ptr   = bitD->start;
-        bitD->bitContainer = *(const BYTE*)(bitD->start);
-        switch(srcSize)
-        {
-            case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);
-                    /* fallthrough */
-            case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);
-                    /* fallthrough */
-            case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);
-                    /* fallthrough */
-            case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24;
-                    /* fallthrough */
-            case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16;
-                    /* fallthrough */
-            case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) <<  8;
-                    /* fallthrough */
-            default:;
-        }
-        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
-        if (contain32 == 0) return (size_t)-FSE_ERROR_GENERIC;   /* stop bit not present */
-        bitD->bitsConsumed = 8 - FSE_highbit32(contain32);
-        bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8;
-    }
-
-    return srcSize;
-}
-
-
-/*!FSE_lookBits
- * Provides next n bits from the bitContainer.
- * bitContainer is not modified (bits are still present for next read/look)
- * On 32-bits, maxNbBits==25
- * On 64-bits, maxNbBits==57
- * return : value extracted.
- */
-static size_t FSE_lookBits(FSE_DStream_t* bitD, U32 nbBits)
-{
-    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
-    return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
-}
-
-static size_t FSE_lookBitsFast(FSE_DStream_t* bitD, U32 nbBits)   /* only if nbBits >= 1 !! */
-{
-    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
-    return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
-}
-
-static void FSE_skipBits(FSE_DStream_t* bitD, U32 nbBits)
-{
-    bitD->bitsConsumed += nbBits;
-}
-
-
-/*!FSE_readBits
- * Read next n bits from the bitContainer.
- * On 32-bits, don't read more than maxNbBits==25
- * On 64-bits, don't read more than maxNbBits==57
- * Use the fast variant *only* if n >= 1.
- * return : value extracted.
- */
-static size_t FSE_readBits(FSE_DStream_t* bitD, U32 nbBits)
-{
-    size_t value = FSE_lookBits(bitD, nbBits);
-    FSE_skipBits(bitD, nbBits);
-    return value;
-}
-
-static size_t FSE_readBitsFast(FSE_DStream_t* bitD, U32 nbBits)   /* only if nbBits >= 1 !! */
-{
-    size_t value = FSE_lookBitsFast(bitD, nbBits);
-    FSE_skipBits(bitD, nbBits);
-    return value;
-}
-
-static unsigned FSE_reloadDStream(FSE_DStream_t* bitD)
-{
-    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should never happen */
-        return FSE_DStream_tooFar;
-
-    if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))
-    {
-        bitD->ptr -= bitD->bitsConsumed >> 3;
-        bitD->bitsConsumed &= 7;
-        bitD->bitContainer = FSE_readLEST(bitD->ptr);
-        return FSE_DStream_unfinished;
-    }
-    if (bitD->ptr == bitD->start)
-    {
-        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return FSE_DStream_endOfBuffer;
-        return FSE_DStream_completed;
-    }
-    {
-        U32 nbBytes = bitD->bitsConsumed >> 3;
-        U32 result = FSE_DStream_unfinished;
-        if (bitD->ptr - nbBytes < bitD->start)
-        {
-            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */
-            result = FSE_DStream_endOfBuffer;
-        }
-        bitD->ptr -= nbBytes;
-        bitD->bitsConsumed -= nbBytes*8;
-        bitD->bitContainer = FSE_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD) */
-        return result;
-    }
-}
-
-
-static void FSE_initDState(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD, const FSE_DTable* dt)
-{
-    const void* ptr = dt;
-    const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr;
-    DStatePtr->state = FSE_readBits(bitD, DTableH->tableLog);
-    FSE_reloadDStream(bitD);
-    DStatePtr->table = dt + 1;
-}
-
-static BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD)
-{
-    const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
-    const U32  nbBits = DInfo.nbBits;
-    BYTE symbol = DInfo.symbol;
-    size_t lowBits = FSE_readBits(bitD, nbBits);
-
-    DStatePtr->state = DInfo.newState + lowBits;
-    return symbol;
-}
-
-static BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD)
-{
-    const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
-    const U32 nbBits = DInfo.nbBits;
-    BYTE symbol = DInfo.symbol;
-    size_t lowBits = FSE_readBitsFast(bitD, nbBits);
-
-    DStatePtr->state = DInfo.newState + lowBits;
-    return symbol;
-}
-
-/* FSE_endOfDStream
-   Tells if bitD has reached end of bitStream or not */
-
-static unsigned FSE_endOfDStream(const FSE_DStream_t* bitD)
-{
-    return ((bitD->ptr == bitD->start) && (bitD->bitsConsumed == sizeof(bitD->bitContainer)*8));
-}
-
-static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
-{
-    return DStatePtr->state == 0;
-}
-
-
-FORCE_INLINE size_t FSE_decompress_usingDTable_generic(
-          void* dst, size_t maxDstSize,
-    const void* cSrc, size_t cSrcSize,
-    const FSE_DTable* dt, const unsigned fast)
-{
-    BYTE* const ostart = (BYTE*) dst;
-    BYTE* op = ostart;
-    BYTE* const omax = op + maxDstSize;
-    BYTE* const olimit = omax-3;
-
-    FSE_DStream_t bitD;
-    FSE_DState_t state1;
-    FSE_DState_t state2;
-    size_t errorCode;
-
-    /* Init */
-    errorCode = FSE_initDStream(&bitD, cSrc, cSrcSize);   /* replaced last arg by maxCompressed Size */
-    if (FSE_isError(errorCode)) return errorCode;
-
-    FSE_initDState(&state1, &bitD, dt);
-    FSE_initDState(&state2, &bitD, dt);
-
-#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
-
-    /* 4 symbols per loop */
-    for ( ; (FSE_reloadDStream(&bitD)==FSE_DStream_unfinished) && (op<olimit) ; op+=4)
-    {
-        op[0] = FSE_GETSYMBOL(&state1);
-
-        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
-            FSE_reloadDStream(&bitD);
-
-        op[1] = FSE_GETSYMBOL(&state2);
-
-        if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
-            { if (FSE_reloadDStream(&bitD) > FSE_DStream_unfinished) { op+=2; break; } }
-
-        op[2] = FSE_GETSYMBOL(&state1);
-
-        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
-            FSE_reloadDStream(&bitD);
-
-        op[3] = FSE_GETSYMBOL(&state2);
-    }
-
-    /* tail */
-    /* note : FSE_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly FSE_DStream_completed */
-    while (1)
-    {
-        if ( (FSE_reloadDStream(&bitD)>FSE_DStream_completed) || (op==omax) || (FSE_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) )
-            break;
-
-        *op++ = FSE_GETSYMBOL(&state1);
-
-        if ( (FSE_reloadDStream(&bitD)>FSE_DStream_completed) || (op==omax) || (FSE_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) )
-            break;
-
-        *op++ = FSE_GETSYMBOL(&state2);
-    }
-
-    /* end ? */
-    if (FSE_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2))
-        return op-ostart;
-
-    if (op==omax) return (size_t)-FSE_ERROR_dstSize_tooSmall;   /* dst buffer is full, but cSrc unfinished */
-
-    return (size_t)-FSE_ERROR_corruptionDetected;
-}
-
-
-static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
-                            const void* cSrc, size_t cSrcSize,
-                            const FSE_DTable* dt)
-{
-    FSE_DTableHeader DTableH;
-    memcpy(&DTableH, dt, sizeof(DTableH));   /* memcpy() into local variable, to avoid strict aliasing warning */
-
-    /* select fast mode (static) */
-    if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
-    return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
-}
-
-
-static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
-{
-    const BYTE* const istart = (const BYTE*)cSrc;
-    const BYTE* ip = istart;
-    short counting[FSE_MAX_SYMBOL_VALUE+1];
-    DTable_max_t dt;   /* Static analyzer seems unable to understand this table will be properly initialized later */
-    unsigned tableLog;
-    unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
-    size_t errorCode;
-
-    if (cSrcSize<2) return (size_t)-FSE_ERROR_srcSize_wrong;   /* too small input size */
-
-    /* normal FSE decoding mode */
-    errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
-    if (FSE_isError(errorCode)) return errorCode;
-    if (errorCode >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong;   /* too small input size */
-    ip += errorCode;
-    cSrcSize -= errorCode;
-
-    errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog);
-    if (FSE_isError(errorCode)) return errorCode;
-
-    /* always return, even if it is an error code */
-    return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);
-}
-
-
-
-/* *******************************************************
-*  Huff0 : Huffman block compression
-*********************************************************/
-#define HUF_MAX_SYMBOL_VALUE 255
-#define HUF_DEFAULT_TABLELOG  12       /* used by default, when not specified */
-#define HUF_MAX_TABLELOG  12           /* max possible tableLog; for allocation purpose; can be modified */
-#define HUF_ABSOLUTEMAX_TABLELOG  16   /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
-#if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG)
-#  error "HUF_MAX_TABLELOG is too large !"
-#endif
-
-typedef struct HUF_CElt_s {
-  U16  val;
-  BYTE nbBits;
-} HUF_CElt ;
-
-typedef struct nodeElt_s {
-    U32 count;
-    U16 parent;
-    BYTE byte;
-    BYTE nbBits;
-} nodeElt;
-
-
-/* *******************************************************
-*  Huff0 : Huffman block decompression
-*********************************************************/
-typedef struct {
-    BYTE byte;
-    BYTE nbBits;
-} HUF_DElt;
-
-static size_t HUF_readDTable (U16* DTable, const void* src, size_t srcSize)
-{
-    BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1];
-    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];  /* large enough for values from 0 to 16 */
-    U32 weightTotal;
-    U32 maxBits;
-    const BYTE* ip = (const BYTE*) src;
-    size_t iSize;
-    size_t oSize;
-    U32 n;
-    U32 nextRankStart;
-    void* ptr = DTable+1;
-    HUF_DElt* const dt = (HUF_DElt*)ptr;
-
-    if (!srcSize) return (size_t)-FSE_ERROR_srcSize_wrong;
-    iSize = ip[0];
-
-    FSE_STATIC_ASSERT(sizeof(HUF_DElt) == sizeof(U16));   /* if compilation fails here, assertion is false */
-    //memset(huffWeight, 0, sizeof(huffWeight));   /* should not be necessary, but some analyzer complain ... */
-    if (iSize >= 128)  /* special header */
-    {
-        if (iSize >= (242))   /* RLE */
-        {
-            static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
-            oSize = l[iSize-242];
-            memset(huffWeight, 1, sizeof(huffWeight));
-            iSize = 0;
-        }
-        else   /* Incompressible */
-        {
-            oSize = iSize - 127;
-            iSize = ((oSize+1)/2);
-            if (iSize+1 > srcSize) return (size_t)-FSE_ERROR_srcSize_wrong;
-            ip += 1;
-            for (n=0; n<oSize; n+=2)
-            {
-                huffWeight[n]   = ip[n/2] >> 4;
-                huffWeight[n+1] = ip[n/2] & 15;
-            }
-        }
-    }
-    else  /* header compressed with FSE (normal case) */
-    {
-        if (iSize+1 > srcSize) return (size_t)-FSE_ERROR_srcSize_wrong;
-        oSize = FSE_decompress(huffWeight, HUF_MAX_SYMBOL_VALUE, ip+1, iSize);   /* max 255 values decoded, last one is implied */
-        if (FSE_isError(oSize)) return oSize;
-    }
-
-    /* collect weight stats */
-    memset(rankVal, 0, sizeof(rankVal));
-    weightTotal = 0;
-    for (n=0; n<oSize; n++)
-    {
-        if (huffWeight[n] >= HUF_ABSOLUTEMAX_TABLELOG) return (size_t)-FSE_ERROR_corruptionDetected;
-        rankVal[huffWeight[n]]++;
-        weightTotal += (1 << huffWeight[n]) >> 1;
-    }
-    if (weightTotal == 0) return (size_t)-FSE_ERROR_corruptionDetected;
-
-    /* get last non-null symbol weight (implied, total must be 2^n) */
-    maxBits = FSE_highbit32(weightTotal) + 1;
-    if (maxBits > DTable[0]) return (size_t)-FSE_ERROR_tableLog_tooLarge;   /* DTable is too small */
-    DTable[0] = (U16)maxBits;
-    {
-        U32 total = 1 << maxBits;
-        U32 rest = total - weightTotal;
-        U32 verif = 1 << FSE_highbit32(rest);
-        U32 lastWeight = FSE_highbit32(rest) + 1;
-        if (verif != rest) return (size_t)-FSE_ERROR_corruptionDetected;    /* last value must be a clean power of 2 */
-        huffWeight[oSize] = (BYTE)lastWeight;
-        rankVal[lastWeight]++;
-    }
-
-    /* check tree construction validity */
-    if ((rankVal[1] < 2) || (rankVal[1] & 1)) return (size_t)-FSE_ERROR_corruptionDetected;   /* by construction : at least 2 elts of rank 1, must be even */
-
-    /* Prepare ranks */
-    nextRankStart = 0;
-    for (n=1; n<=maxBits; n++)
-    {
-        U32 current = nextRankStart;
-        nextRankStart += (rankVal[n] << (n-1));
-        rankVal[n] = current;
-    }
-
-    /* fill DTable */
-    for (n=0; n<=oSize; n++)
-    {
-        const U32 w = huffWeight[n];
-        const U32 length = (1 << w) >> 1;
-        U32 i;
-        HUF_DElt D;
-        D.byte = (BYTE)n; D.nbBits = (BYTE)(maxBits + 1 - w);
-        for (i = rankVal[w]; i < rankVal[w] + length; i++)
-            dt[i] = D;
-        rankVal[w] += length;
-    }
-
-    return iSize+1;
-}
-
-
-static BYTE HUF_decodeSymbol(FSE_DStream_t* Dstream, const HUF_DElt* dt, const U32 dtLog)
-{
-        const size_t val = FSE_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
-        const BYTE c = dt[val].byte;
-        FSE_skipBits(Dstream, dt[val].nbBits);
-        return c;
-}
-
-static size_t HUF_decompress_usingDTable(   /* -3% slower when non static */
-          void* dst, size_t maxDstSize,
-    const void* cSrc, size_t cSrcSize,
-    const U16* DTable)
-{
-    if (cSrcSize < 6) return (size_t)-FSE_ERROR_srcSize_wrong;
-    {
-        BYTE* const ostart = (BYTE*) dst;
-        BYTE* op = ostart;
-        BYTE* const omax = op + maxDstSize;
-        BYTE* const olimit = omax-15;
-
-        const void* ptr = DTable;
-        const HUF_DElt* const dt = (const HUF_DElt*)(ptr)+1;
-        const U32 dtLog = DTable[0];
-        size_t errorCode;
-        U32 reloadStatus;
-
-        /* Init */
-
-        const U16* jumpTable = (const U16*)cSrc;
-        const size_t length1 = FSE_readLE16(jumpTable);
-        const size_t length2 = FSE_readLE16(jumpTable+1);
-        const size_t length3 = FSE_readLE16(jumpTable+2);
-        const size_t length4 = cSrcSize - 6 - length1 - length2 - length3;   // check coherency !!
-        const char* const start1 = (const char*)(cSrc) + 6;
-        const char* const start2 = start1 + length1;
-        const char* const start3 = start2 + length2;
-        const char* const start4 = start3 + length3;
-        FSE_DStream_t bitD1, bitD2, bitD3, bitD4;
-
-        if (length1+length2+length3+6 >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong;
-
-        errorCode = FSE_initDStream(&bitD1, start1, length1);
-        if (FSE_isError(errorCode)) return errorCode;
-        errorCode = FSE_initDStream(&bitD2, start2, length2);
-        if (FSE_isError(errorCode)) return errorCode;
-        errorCode = FSE_initDStream(&bitD3, start3, length3);
-        if (FSE_isError(errorCode)) return errorCode;
-        errorCode = FSE_initDStream(&bitD4, start4, length4);
-        if (FSE_isError(errorCode)) return errorCode;
-
-        reloadStatus=FSE_reloadDStream(&bitD2);
-
-        /* 16 symbols per loop */
-        for ( ; (reloadStatus<FSE_DStream_completed) && (op<olimit);  /* D2-3-4 are supposed to be synchronized and finish together */
-            op+=16, reloadStatus = FSE_reloadDStream(&bitD2) | FSE_reloadDStream(&bitD3) | FSE_reloadDStream(&bitD4), FSE_reloadDStream(&bitD1))
-        {
-    #define HUF_DECODE_SYMBOL_0(n, Dstream) \
-            op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog);
-
-    #define HUF_DECODE_SYMBOL_1(n, Dstream) \
-            op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog); \
-            if (FSE_32bits() && (HUF_MAX_TABLELOG>12)) FSE_reloadDStream(&Dstream)
-
-    #define HUF_DECODE_SYMBOL_2(n, Dstream) \
-            op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog); \
-            if (FSE_32bits()) FSE_reloadDStream(&Dstream)
-
-            HUF_DECODE_SYMBOL_1( 0, bitD1);
-            HUF_DECODE_SYMBOL_1( 1, bitD2);
-            HUF_DECODE_SYMBOL_1( 2, bitD3);
-            HUF_DECODE_SYMBOL_1( 3, bitD4);
-            HUF_DECODE_SYMBOL_2( 4, bitD1);
-            HUF_DECODE_SYMBOL_2( 5, bitD2);
-            HUF_DECODE_SYMBOL_2( 6, bitD3);
-            HUF_DECODE_SYMBOL_2( 7, bitD4);
-            HUF_DECODE_SYMBOL_1( 8, bitD1);
-            HUF_DECODE_SYMBOL_1( 9, bitD2);
-            HUF_DECODE_SYMBOL_1(10, bitD3);
-            HUF_DECODE_SYMBOL_1(11, bitD4);
-            HUF_DECODE_SYMBOL_0(12, bitD1);
-            HUF_DECODE_SYMBOL_0(13, bitD2);
-            HUF_DECODE_SYMBOL_0(14, bitD3);
-            HUF_DECODE_SYMBOL_0(15, bitD4);
-        }
-
-        if (reloadStatus!=FSE_DStream_completed)   /* not complete : some bitStream might be FSE_DStream_unfinished */
-            return (size_t)-FSE_ERROR_corruptionDetected;
-
-        /* tail */
-        {
-            // bitTail = bitD1;   // *much* slower : -20% !??!
-            FSE_DStream_t bitTail;
-            bitTail.ptr = bitD1.ptr;
-            bitTail.bitsConsumed = bitD1.bitsConsumed;
-            bitTail.bitContainer = bitD1.bitContainer;   // required in case of FSE_DStream_endOfBuffer
-            bitTail.start = start1;
-            for ( ; (FSE_reloadDStream(&bitTail) < FSE_DStream_completed) && (op<omax) ; op++)
-            {
-                HUF_DECODE_SYMBOL_0(0, bitTail);
-            }
-
-            if (FSE_endOfDStream(&bitTail))
-                return op-ostart;
-        }
-
-        if (op==omax) return (size_t)-FSE_ERROR_dstSize_tooSmall;   /* dst buffer is full, but cSrc unfinished */
-
-        return (size_t)-FSE_ERROR_corruptionDetected;
-    }
-}
-
-
-static size_t HUF_decompress (void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
-{
-    HUF_CREATE_STATIC_DTABLE(DTable, HUF_MAX_TABLELOG);
-    const BYTE* ip = (const BYTE*) cSrc;
-    size_t errorCode;
-
-    errorCode = HUF_readDTable (DTable, cSrc, cSrcSize);
-    if (FSE_isError(errorCode)) return errorCode;
-    if (errorCode >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong;
-    ip += errorCode;
-    cSrcSize -= errorCode;
-
-    return HUF_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, DTable);
-}
-
-
-#endif   /* FSE_COMMONDEFS_ONLY */
-
-/*
-    zstd - standard compression library
-    Copyright (C) 2014-2015, Yann Collet.
-
-    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are
-    met:
-    * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-    copyright notice, this list of conditions and the following disclaimer
-    in the documentation and/or other materials provided with the
-    distribution.
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - zstd source repository : https://github.com/Cyan4973/zstd
-    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
-*/
-
-/****************************************************************
-*  Tuning parameters
-*****************************************************************/
-/* MEMORY_USAGE :
-*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
-*  Increasing memory usage improves compression ratio
-*  Reduced memory usage can improve speed, due to cache effect */
-#define ZSTD_MEMORY_USAGE 17
-
-
-/**************************************
-   CPU Feature Detection
-**************************************/
-/*
- * Automated efficient unaligned memory access detection
- * Based on known hardware architectures
- * This list will be updated thanks to feedbacks
- */
-#if defined(CPU_HAS_EFFICIENT_UNALIGNED_MEMORY_ACCESS) \
-    || defined(__ARM_FEATURE_UNALIGNED) \
-    || defined(__i386__) || defined(__x86_64__) \
-    || defined(_M_IX86) || defined(_M_X64) \
-    || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_8__) \
-    || (defined(_M_ARM) && (_M_ARM >= 7))
-#  define ZSTD_UNALIGNED_ACCESS 1
-#else
-#  define ZSTD_UNALIGNED_ACCESS 0
-#endif
-
-
-/********************************************************
-*  Includes
-*********************************************************/
-#include <stdlib.h>      /* calloc */
-#include <string.h>      /* memcpy, memmove */
-#include <stdio.h>       /* debug : printf */
-
-
-/********************************************************
-*  Compiler specifics
-*********************************************************/
-#ifdef __AVX2__
-#  include <immintrin.h>   /* AVX2 intrinsics */
-#endif
-
-#ifdef _MSC_VER    /* Visual Studio */
-#  include <intrin.h>                    /* For Visual 2005 */
-#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
-#  pragma warning(disable : 4324)        /* disable: C4324: padded structure */
-#endif
-
-
-#ifndef MEM_ACCESS_MODULE
-#define MEM_ACCESS_MODULE
-/********************************************************
-*  Basic Types
-*********************************************************/
-#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
-# include <stdint.h>
-typedef  uint8_t BYTE;
-typedef uint16_t U16;
-typedef  int16_t S16;
-typedef uint32_t U32;
-typedef  int32_t S32;
-typedef uint64_t U64;
-#else
-typedef unsigned char       BYTE;
-typedef unsigned short      U16;
-typedef   signed short      S16;
-typedef unsigned int        U32;
-typedef   signed int        S32;
-typedef unsigned long long  U64;
-#endif
-
-#endif   /* MEM_ACCESS_MODULE */
-
-
-/********************************************************
-*  Constants
-*********************************************************/
-static const U32 ZSTD_magicNumber = 0xFD2FB51E;   /* 3rd version : seqNb header */
-
-#define HASH_LOG (ZSTD_MEMORY_USAGE - 2)
-#define HASH_TABLESIZE (1 << HASH_LOG)
-#define HASH_MASK (HASH_TABLESIZE - 1)
-
-#define KNUTH 2654435761
-
-#define BIT7 128
-#define BIT6  64
-#define BIT5  32
-#define BIT4  16
-
-#define KB *(1 <<10)
-#define MB *(1 <<20)
-#define GB *(1U<<30)
-
-#define BLOCKSIZE (128 KB)                 /* define, for static allocation */
-
-#define WORKPLACESIZE (BLOCKSIZE*3)
-#define MINMATCH 4
-#define MLbits   7
-#define LLbits   6
-#define Offbits  5
-#define MaxML  ((1<<MLbits )-1)
-#define MaxLL  ((1<<LLbits )-1)
-#define MaxOff ((1<<Offbits)-1)
-#define LitFSELog  11
-#define MLFSELog   10
-#define LLFSELog   10
-#define OffFSELog   9
-#define MAX(a,b) ((a)<(b)?(b):(a))
-#define MaxSeq MAX(MaxLL, MaxML)
-
-#define LITERAL_NOENTROPY 63
-#define COMMAND_NOENTROPY 7   /* to remove */
-
-#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
-
-static const size_t ZSTD_blockHeaderSize = 3;
-static const size_t ZSTD_frameHeaderSize = 4;
-
-
-/********************************************************
-*  Memory operations
-*********************************************************/
-static unsigned ZSTD_32bits(void) { return sizeof(void*)==4; }
-
-static unsigned ZSTD_isLittleEndian(void)
-{
-    const union { U32 i; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */
-    return one.c[0];
-}
-
-static U16    ZSTD_read16(const void* p) { U16 r; memcpy(&r, p, sizeof(r)); return r; }
-
-static void   ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
-
-static void   ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
-
-#define COPY8(d,s)    { ZSTD_copy8(d,s); d+=8; s+=8; }
-
-static void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
-{
-    const BYTE* ip = (const BYTE*)src;
-    BYTE* op = (BYTE*)dst;
-    BYTE* const oend = op + length;
-    while (op < oend) COPY8(op, ip);
-}
-
-static U16 ZSTD_readLE16(const void* memPtr)
-{
-    if (ZSTD_isLittleEndian()) return ZSTD_read16(memPtr);
-    else
-    {
-        const BYTE* p = (const BYTE*)memPtr;
-        return (U16)((U16)p[0] + ((U16)p[1]<<8));
-    }
-}
-
-static U32 ZSTD_readLE24(const void* memPtr)
-{
-    return ZSTD_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
-}
-
-static U32 ZSTD_readBE32(const void* memPtr)
-{
-    const BYTE* p = (const BYTE*)memPtr;
-    return (U32)(((U32)p[0]<<24) + ((U32)p[1]<<16) + ((U32)p[2]<<8) + ((U32)p[3]<<0));
-}
-
-
-/**************************************
-*  Local structures
-***************************************/
-typedef struct ZSTD_Cctx_s ZSTD_Cctx;
-
-typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
-
-typedef struct
-{
-    blockType_t blockType;
-    U32 origSize;
-} blockProperties_t;
-
-typedef struct {
-    void* buffer;
-    U32*  offsetStart;
-    U32*  offset;
-    BYTE* offCodeStart;
-    BYTE* offCode;
-    BYTE* litStart;
-    BYTE* lit;
-    BYTE* litLengthStart;
-    BYTE* litLength;
-    BYTE* matchLengthStart;
-    BYTE* matchLength;
-    BYTE* dumpsStart;
-    BYTE* dumps;
-} seqStore_t;
-
-
-typedef struct ZSTD_Cctx_s
-{
-    const BYTE* base;
-    U32 current;
-    U32 nextUpdate;
-    seqStore_t seqStore;
-#ifdef __AVX2__
-    __m256i hashTable[HASH_TABLESIZE>>3];
-#else
-    U32 hashTable[HASH_TABLESIZE];
-#endif
-    BYTE buffer[WORKPLACESIZE];
-} cctxi_t;
-
-
-
-
-/**************************************
-*  Error Management
-**************************************/
-/* published entry point */
-unsigned ZSTDv01_isError(size_t code) { return ERR_isError(code); }
-
-
-/**************************************
-*  Tool functions
-**************************************/
-#define ZSTD_VERSION_MAJOR    0    /* for breaking interface changes  */
-#define ZSTD_VERSION_MINOR    1    /* for new (non-breaking) interface capabilities */
-#define ZSTD_VERSION_RELEASE  3    /* for tweaks, bug-fixes, or development */
-#define ZSTD_VERSION_NUMBER  (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
-
-/**************************************************************
-*   Decompression code
-**************************************************************/
-
-static size_t ZSTDv01_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
-{
-    const BYTE* const in = (const BYTE* const)src;
-    BYTE headerFlags;
-    U32 cSize;
-
-    if (srcSize < 3) return ERROR(srcSize_wrong);
-
-    headerFlags = *in;
-    cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
-
-    bpPtr->blockType = (blockType_t)(headerFlags >> 6);
-    bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
-
-    if (bpPtr->blockType == bt_end) return 0;
-    if (bpPtr->blockType == bt_rle) return 1;
-    return cSize;
-}
-
-
-static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
-    if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
-    memcpy(dst, src, srcSize);
-    return srcSize;
-}
-
-
-static size_t ZSTD_decompressLiterals(void* ctx,
-                                      void* dst, size_t maxDstSize,
-                                const void* src, size_t srcSize)
-{
-    BYTE* op = (BYTE*)dst;
-    BYTE* const oend = op + maxDstSize;
-    const BYTE* ip = (const BYTE*)src;
-    size_t errorCode;
-    size_t litSize;
-
-    /* check : minimum 2, for litSize, +1, for content */
-    if (srcSize <= 3) return ERROR(corruption_detected);
-
-    litSize = ip[1] + (ip[0]<<8);
-    litSize += ((ip[-3] >> 3) & 7) << 16;   // mmmmh....
-    op = oend - litSize;
-
-    (void)ctx;
-    if (litSize > maxDstSize) return ERROR(dstSize_tooSmall);
-    errorCode = HUF_decompress(op, litSize, ip+2, srcSize-2);
-    if (FSE_isError(errorCode)) return ERROR(GENERIC);
-    return litSize;
-}
-
-
-static size_t ZSTDv01_decodeLiteralsBlock(void* ctx,
-                                void* dst, size_t maxDstSize,
-                          const BYTE** litStart, size_t* litSize,
-                          const void* src, size_t srcSize)
-{
-    const BYTE* const istart = (const BYTE* const)src;
-    const BYTE* ip = istart;
-    BYTE* const ostart = (BYTE* const)dst;
-    BYTE* const oend = ostart + maxDstSize;
-    blockProperties_t litbp;
-
-    size_t litcSize = ZSTDv01_getcBlockSize(src, srcSize, &litbp);
-    if (ZSTDv01_isError(litcSize)) return litcSize;
-    if (litcSize > srcSize - ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
-    ip += ZSTD_blockHeaderSize;
-
-    switch(litbp.blockType)
-    {
-    case bt_raw:
-        *litStart = ip;
-        ip += litcSize;
-        *litSize = litcSize;
-        break;
-    case bt_rle:
-        {
-            size_t rleSize = litbp.origSize;
-            if (rleSize>maxDstSize) return ERROR(dstSize_tooSmall);
-            if (!srcSize) return ERROR(srcSize_wrong);
-            memset(oend - rleSize, *ip, rleSize);
-            *litStart = oend - rleSize;
-            *litSize = rleSize;
-            ip++;
-            break;
-        }
-    case bt_compressed:
-        {
-            size_t decodedLitSize = ZSTD_decompressLiterals(ctx, dst, maxDstSize, ip, litcSize);
-            if (ZSTDv01_isError(decodedLitSize)) return decodedLitSize;
-            *litStart = oend - decodedLitSize;
-            *litSize = decodedLitSize;
-            ip += litcSize;
-            break;
-        }
-    case bt_end:
-    default:
-        return ERROR(GENERIC);
-    }
-
-    return ip-istart;
-}
-
-
-static size_t ZSTDv01_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,
-                         FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb,
-                         const void* src, size_t srcSize)
-{
-    const BYTE* const istart = (const BYTE* const)src;
-    const BYTE* ip = istart;
-    const BYTE* const iend = istart + srcSize;
-    U32 LLtype, Offtype, MLtype;
-    U32 LLlog, Offlog, MLlog;
-    size_t dumpsLength;
-
-    /* check */
-    if (srcSize < 5) return ERROR(srcSize_wrong);
-
-    /* SeqHead */
-    *nbSeq = ZSTD_readLE16(ip); ip+=2;
-    LLtype  = *ip >> 6;
-    Offtype = (*ip >> 4) & 3;
-    MLtype  = (*ip >> 2) & 3;
-    if (*ip & 2)
-    {
-        dumpsLength  = ip[2];
-        dumpsLength += ip[1] << 8;
-        ip += 3;
-    }
-    else
-    {
-        dumpsLength  = ip[1];
-        dumpsLength += (ip[0] & 1) << 8;
-        ip += 2;
-    }
-    *dumpsPtr = ip;
-    ip += dumpsLength;
-    *dumpsLengthPtr = dumpsLength;
-
-    /* check */
-    if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
-
-    /* sequences */
-    {
-        S16 norm[MaxML+1];    /* assumption : MaxML >= MaxLL and MaxOff */
-        size_t headerSize;
-
-        /* Build DTables */
-        switch(LLtype)
-        {
-        case bt_rle :
-            LLlog = 0;
-            FSE_buildDTable_rle(DTableLL, *ip++); break;
-        case bt_raw :
-            LLlog = LLbits;
-            FSE_buildDTable_raw(DTableLL, LLbits); break;
-        default :
-            {   U32 max = MaxLL;
-                headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip);
-                if (FSE_isError(headerSize)) return ERROR(GENERIC);
-                if (LLlog > LLFSELog) return ERROR(corruption_detected);
-                ip += headerSize;
-                FSE_buildDTable(DTableLL, norm, max, LLlog);
-        }   }
-
-        switch(Offtype)
-        {
-        case bt_rle :
-            Offlog = 0;
-            if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
-            FSE_buildDTable_rle(DTableOffb, *ip++); break;
-        case bt_raw :
-            Offlog = Offbits;
-            FSE_buildDTable_raw(DTableOffb, Offbits); break;
-        default :
-            {   U32 max = MaxOff;
-                headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip);
-                if (FSE_isError(headerSize)) return ERROR(GENERIC);
-                if (Offlog > OffFSELog) return ERROR(corruption_detected);
-                ip += headerSize;
-                FSE_buildDTable(DTableOffb, norm, max, Offlog);
-        }   }
-
-        switch(MLtype)
-        {
-        case bt_rle :
-            MLlog = 0;
-            if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
-            FSE_buildDTable_rle(DTableML, *ip++); break;
-        case bt_raw :
-            MLlog = MLbits;
-            FSE_buildDTable_raw(DTableML, MLbits); break;
-        default :
-            {   U32 max = MaxML;
-                headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip);
-                if (FSE_isError(headerSize)) return ERROR(GENERIC);
-                if (MLlog > MLFSELog) return ERROR(corruption_detected);
-                ip += headerSize;
-                FSE_buildDTable(DTableML, norm, max, MLlog);
-    }   }   }
-
-    return ip-istart;
-}
-
-
-typedef struct {
-    size_t litLength;
-    size_t offset;
-    size_t matchLength;
-} seq_t;
-
-typedef struct {
-    FSE_DStream_t DStream;
-    FSE_DState_t stateLL;
-    FSE_DState_t stateOffb;
-    FSE_DState_t stateML;
-    size_t prevOffset;
-    const BYTE* dumps;
-    const BYTE* dumpsEnd;
-} seqState_t;
-
-
-static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState)
-{
-    size_t litLength;
-    size_t prevOffset;
-    size_t offset;
-    size_t matchLength;
-    const BYTE* dumps = seqState->dumps;
-    const BYTE* const de = seqState->dumpsEnd;
-
-    /* Literal length */
-    litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));
-    prevOffset = litLength ? seq->offset : seqState->prevOffset;
-    seqState->prevOffset = seq->offset;
-    if (litLength == MaxLL)
-    {
-        const U32 add = dumps<de ? *dumps++ : 0;
-        if (add < 255) litLength += add;
-        else
-        {
-            if (dumps<=(de-3))
-            {
-                litLength = ZSTD_readLE24(dumps);
-                dumps += 3;
-            }
-        }
-    }
-
-    /* Offset */
-    {
-        U32 offsetCode, nbBits;
-        offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream));
-        if (ZSTD_32bits()) FSE_reloadDStream(&(seqState->DStream));
-        nbBits = offsetCode - 1;
-        if (offsetCode==0) nbBits = 0;   /* cmove */
-        offset = ((size_t)1 << (nbBits & ((sizeof(offset)*8)-1))) + FSE_readBits(&(seqState->DStream), nbBits);
-        if (ZSTD_32bits()) FSE_reloadDStream(&(seqState->DStream));
-        if (offsetCode==0) offset = prevOffset;
-    }
-
-    /* MatchLength */
-    matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
-    if (matchLength == MaxML)
-    {
-        const U32 add = dumps<de ? *dumps++ : 0;
-        if (add < 255) matchLength += add;
-        else
-        {
-            if (dumps<=(de-3))
-            {
-                matchLength = ZSTD_readLE24(dumps);
-                dumps += 3;
-            }
-        }
-    }
-    matchLength += MINMATCH;
-
-    /* save result */
-    seq->litLength = litLength;
-    seq->offset = offset;
-    seq->matchLength = matchLength;
-    seqState->dumps = dumps;
-}
-
-
-static size_t ZSTD_execSequence(BYTE* op,
-                                seq_t sequence,
-                                const BYTE** litPtr, const BYTE* const litLimit,
-                                BYTE* const base, BYTE* const oend)
-{
-    static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};   /* added */
-    static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11};   /* subtracted */
-    const BYTE* const ostart = op;
-    const size_t litLength = sequence.litLength;
-    BYTE* const endMatch = op + litLength + sequence.matchLength;    /* risk : address space overflow (32-bits) */
-    const BYTE* const litEnd = *litPtr + litLength;
-
-    /* check */
-    if (endMatch > oend) return ERROR(dstSize_tooSmall);   /* overwrite beyond dst buffer */
-    if (litEnd > litLimit) return ERROR(corruption_detected);
-    if (sequence.matchLength > (size_t)(*litPtr-op))  return ERROR(dstSize_tooSmall);    /* overwrite literal segment */
-
-    /* copy Literals */
-    if (((size_t)(*litPtr - op) < 8) || ((size_t)(oend-litEnd) < 8) || (op+litLength > oend-8))
-        memmove(op, *litPtr, litLength);   /* overwrite risk */
-    else
-        ZSTD_wildcopy(op, *litPtr, litLength);
-    op += litLength;
-    *litPtr = litEnd;   /* update for next sequence */
-
-    /* check : last match must be at a minimum distance of 8 from end of dest buffer */
-    if (oend-op < 8) return ERROR(dstSize_tooSmall);
-
-    /* copy Match */
-    {
-        const U32 overlapRisk = (((size_t)(litEnd - endMatch)) < 12);
-        const BYTE* match = op - sequence.offset;            /* possible underflow at op - offset ? */
-        size_t qutt = 12;
-        U64 saved[2];
-
-        /* check */
-        if (match < base) return ERROR(corruption_detected);
-        if (sequence.offset > (size_t)base) return ERROR(corruption_detected);
-
-        /* save beginning of literal sequence, in case of write overlap */
-        if (overlapRisk)
-        {
-            if ((endMatch + qutt) > oend) qutt = oend-endMatch;
-            memcpy(saved, endMatch, qutt);
-        }
-
-        if (sequence.offset < 8)
-        {
-            const int dec64 = dec64table[sequence.offset];
-            op[0] = match[0];
-            op[1] = match[1];
-            op[2] = match[2];
-            op[3] = match[3];
-            match += dec32table[sequence.offset];
-            ZSTD_copy4(op+4, match);
-            match -= dec64;
-        } else { ZSTD_copy8(op, match); }
-        op += 8; match += 8;
-
-        if (endMatch > oend-(16-MINMATCH))
-        {
-            if (op < oend-8)
-            {
-                ZSTD_wildcopy(op, match, (oend-8) - op);
-                match += (oend-8) - op;
-                op = oend-8;
-            }
-            while (op<endMatch) *op++ = *match++;
-        }
-        else
-            ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8 */
-
-        /* restore, in case of overlap */
-        if (overlapRisk) memcpy(endMatch, saved, qutt);
-    }
-
-    return endMatch-ostart;
-}
-
-typedef struct ZSTDv01_Dctx_s
-{
-    U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
-    U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
-    U32 MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
-    void* previousDstEnd;
-    void* base;
-    size_t expected;
-    blockType_t bType;
-    U32 phase;
-} dctx_t;
-
-
-static size_t ZSTD_decompressSequences(
-                               void* ctx,
-                               void* dst, size_t maxDstSize,
-                         const void* seqStart, size_t seqSize,
-                         const BYTE* litStart, size_t litSize)
-{
-    dctx_t* dctx = (dctx_t*)ctx;
-    const BYTE* ip = (const BYTE*)seqStart;
-    const BYTE* const iend = ip + seqSize;
-    BYTE* const ostart = (BYTE* const)dst;
-    BYTE* op = ostart;
-    BYTE* const oend = ostart + maxDstSize;
-    size_t errorCode, dumpsLength;
-    const BYTE* litPtr = litStart;
-    const BYTE* const litEnd = litStart + litSize;
-    int nbSeq;
-    const BYTE* dumps;
-    U32* DTableLL = dctx->LLTable;
-    U32* DTableML = dctx->MLTable;
-    U32* DTableOffb = dctx->OffTable;
-    BYTE* const base = (BYTE*) (dctx->base);
-
-    /* Build Decoding Tables */
-    errorCode = ZSTDv01_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,
-                                      DTableLL, DTableML, DTableOffb,
-                                      ip, iend-ip);
-    if (ZSTDv01_isError(errorCode)) return errorCode;
-    ip += errorCode;
-
-    /* Regen sequences */
-    {
-        seq_t sequence;
-        seqState_t seqState;
-
-        memset(&sequence, 0, sizeof(sequence));
-        seqState.dumps = dumps;
-        seqState.dumpsEnd = dumps + dumpsLength;
-        seqState.prevOffset = 1;
-        errorCode = FSE_initDStream(&(seqState.DStream), ip, iend-ip);
-        if (FSE_isError(errorCode)) return ERROR(corruption_detected);
-        FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
-        FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
-        FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
-
-        for ( ; (FSE_reloadDStream(&(seqState.DStream)) <= FSE_DStream_completed) && (nbSeq>0) ; )
-        {
-            size_t oneSeqSize;
-            nbSeq--;
-            ZSTD_decodeSequence(&sequence, &seqState);
-            oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litEnd, base, oend);
-            if (ZSTDv01_isError(oneSeqSize)) return oneSeqSize;
-            op += oneSeqSize;
-        }
-
-        /* check if reached exact end */
-        if ( !FSE_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected);   /* requested too much : data is corrupted */
-        if (nbSeq<0) return ERROR(corruption_detected);   /* requested too many sequences : data is corrupted */
-
-        /* last literal segment */
-        {
-            size_t lastLLSize = litEnd - litPtr;
-            if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
-            if (op != litPtr) memmove(op, litPtr, lastLLSize);
-            op += lastLLSize;
-        }
-    }
-
-    return op-ostart;
-}
-
-
-static size_t ZSTD_decompressBlock(
-                            void* ctx,
-                            void* dst, size_t maxDstSize,
-                      const void* src, size_t srcSize)
-{
-    /* blockType == blockCompressed, srcSize is trusted */
-    const BYTE* ip = (const BYTE*)src;
-    const BYTE* litPtr = NULL;
-    size_t litSize = 0;
-    size_t errorCode;
-
-    /* Decode literals sub-block */
-    errorCode = ZSTDv01_decodeLiteralsBlock(ctx, dst, maxDstSize, &litPtr, &litSize, src, srcSize);
-    if (ZSTDv01_isError(errorCode)) return errorCode;
-    ip += errorCode;
-    srcSize -= errorCode;
-
-    return ZSTD_decompressSequences(ctx, dst, maxDstSize, ip, srcSize, litPtr, litSize);
-}
-
-
-size_t ZSTDv01_decompressDCtx(void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
-    const BYTE* ip = (const BYTE*)src;
-    const BYTE* iend = ip + srcSize;
-    BYTE* const ostart = (BYTE* const)dst;
-    BYTE* op = ostart;
-    BYTE* const oend = ostart + maxDstSize;
-    size_t remainingSize = srcSize;
-    U32 magicNumber;
-    size_t errorCode=0;
-    blockProperties_t blockProperties;
-
-    /* Frame Header */
-    if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
-    magicNumber = ZSTD_readBE32(src);
-    if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
-    ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
-
-    /* Loop on each block */
-    while (1)
-    {
-        size_t blockSize = ZSTDv01_getcBlockSize(ip, iend-ip, &blockProperties);
-        if (ZSTDv01_isError(blockSize)) return blockSize;
-
-        ip += ZSTD_blockHeaderSize;
-        remainingSize -= ZSTD_blockHeaderSize;
-        if (blockSize > remainingSize) return ERROR(srcSize_wrong);
-
-        switch(blockProperties.blockType)
-        {
-        case bt_compressed:
-            errorCode = ZSTD_decompressBlock(ctx, op, oend-op, ip, blockSize);
-            break;
-        case bt_raw :
-            errorCode = ZSTD_copyUncompressedBlock(op, oend-op, ip, blockSize);
-            break;
-        case bt_rle :
-            return ERROR(GENERIC);   /* not yet supported */
-            break;
-        case bt_end :
-            /* end of frame */
-            if (remainingSize) return ERROR(srcSize_wrong);
-            break;
-        default:
-            return ERROR(GENERIC);
-        }
-        if (blockSize == 0) break;   /* bt_end */
-
-        if (ZSTDv01_isError(errorCode)) return errorCode;
-        op += errorCode;
-        ip += blockSize;
-        remainingSize -= blockSize;
-    }
-
-    return op-ostart;
-}
-
-size_t ZSTDv01_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
-    dctx_t ctx;
-    ctx.base = dst;
-    return ZSTDv01_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize);
-}
-
-/* ZSTD_errorFrameSizeInfoLegacy() :
-   assumes `cSize` and `dBound` are _not_ NULL */
-static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
-{
-    *cSize = ret;
-    *dBound = ZSTD_CONTENTSIZE_ERROR;
-}
-
-void ZSTDv01_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
-{
-    const BYTE* ip = (const BYTE*)src;
-    size_t remainingSize = srcSize;
-    size_t nbBlocks = 0;
-    U32 magicNumber;
-    blockProperties_t blockProperties;
-
-    /* Frame Header */
-    if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) {
-        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
-        return;
-    }
-    magicNumber = ZSTD_readBE32(src);
-    if (magicNumber != ZSTD_magicNumber) {
-        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
-        return;
-    }
-    ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
-
-    /* Loop on each block */
-    while (1)
-    {
-        size_t blockSize = ZSTDv01_getcBlockSize(ip, remainingSize, &blockProperties);
-        if (ZSTDv01_isError(blockSize)) {
-            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, blockSize);
-            return;
-        }
-
-        ip += ZSTD_blockHeaderSize;
-        remainingSize -= ZSTD_blockHeaderSize;
-        if (blockSize > remainingSize) {
-            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
-            return;
-        }
-
-        if (blockSize == 0) break;   /* bt_end */
-
-        ip += blockSize;
-        remainingSize -= blockSize;
-        nbBlocks++;
-    }
-
-    *cSize = ip - (const BYTE*)src;
-    *dBound = nbBlocks * BLOCKSIZE;
-}
-
-/*******************************
-*  Streaming Decompression API
-*******************************/
-
-size_t ZSTDv01_resetDCtx(ZSTDv01_Dctx* dctx)
-{
-    dctx->expected = ZSTD_frameHeaderSize;
-    dctx->phase = 0;
-    dctx->previousDstEnd = NULL;
-    dctx->base = NULL;
-    return 0;
-}
-
-ZSTDv01_Dctx* ZSTDv01_createDCtx(void)
-{
-    ZSTDv01_Dctx* dctx = (ZSTDv01_Dctx*)malloc(sizeof(ZSTDv01_Dctx));
-    if (dctx==NULL) return NULL;
-    ZSTDv01_resetDCtx(dctx);
-    return dctx;
-}
-
-size_t ZSTDv01_freeDCtx(ZSTDv01_Dctx* dctx)
-{
-    free(dctx);
-    return 0;
-}
-
-size_t ZSTDv01_nextSrcSizeToDecompress(ZSTDv01_Dctx* dctx)
-{
-    return ((dctx_t*)dctx)->expected;
-}
-
-size_t ZSTDv01_decompressContinue(ZSTDv01_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
-    dctx_t* ctx = (dctx_t*)dctx;
-
-    /* Sanity check */
-    if (srcSize != ctx->expected) return ERROR(srcSize_wrong);
-    if (dst != ctx->previousDstEnd)  /* not contiguous */
-        ctx->base = dst;
-
-    /* Decompress : frame header */
-    if (ctx->phase == 0)
-    {
-        /* Check frame magic header */
-        U32 magicNumber = ZSTD_readBE32(src);
-        if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
-        ctx->phase = 1;
-        ctx->expected = ZSTD_blockHeaderSize;
-        return 0;
-    }
-
-    /* Decompress : block header */
-    if (ctx->phase == 1)
-    {
-        blockProperties_t bp;
-        size_t blockSize = ZSTDv01_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
-        if (ZSTDv01_isError(blockSize)) return blockSize;
-        if (bp.blockType == bt_end)
-        {
-            ctx->expected = 0;
-            ctx->phase = 0;
-        }
-        else
-        {
-            ctx->expected = blockSize;
-            ctx->bType = bp.blockType;
-            ctx->phase = 2;
-        }
-
-        return 0;
-    }
-
-    /* Decompress : block content */
-    {
-        size_t rSize;
-        switch(ctx->bType)
-        {
-        case bt_compressed:
-            rSize = ZSTD_decompressBlock(ctx, dst, maxDstSize, src, srcSize);
-            break;
-        case bt_raw :
-            rSize = ZSTD_copyUncompressedBlock(dst, maxDstSize, src, srcSize);
-            break;
-        case bt_rle :
-            return ERROR(GENERIC);   /* not yet handled */
-            break;
-        case bt_end :   /* should never happen (filtered at phase 1) */
-            rSize = 0;
-            break;
-        default:
-            return ERROR(GENERIC);
-        }
-        ctx->phase = 1;
-        ctx->expected = ZSTD_blockHeaderSize;
-        ctx->previousDstEnd = (void*)( ((char*)dst) + rSize);
-        return rSize;
-    }
-
-}
diff --git a/vendor/github.com/DataDog/zstd/zstd_v01.h b/vendor/github.com/DataDog/zstd/zstd_v01.h
deleted file mode 100644
index 245f9dd..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_v01.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef ZSTD_V01_H_28739879432
-#define ZSTD_V01_H_28739879432
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/* *************************************
-*  Includes
-***************************************/
-#include <stddef.h>   /* size_t */
-
-
-/* *************************************
-*  Simple one-step function
-***************************************/
-/**
-ZSTDv01_decompress() : decompress ZSTD frames compliant with v0.1.x format
-    compressedSize : is the exact source size
-    maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated.
-                      It must be equal or larger than originalSize, otherwise decompression will fail.
-    return : the number of bytes decompressed into destination buffer (originalSize)
-             or an errorCode if it fails (which can be tested using ZSTDv01_isError())
-*/
-size_t ZSTDv01_decompress( void* dst, size_t maxOriginalSize,
-                     const void* src, size_t compressedSize);
-
- /**
- ZSTDv01_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.1.x format
-     srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
-     cSize (output parameter)  : the number of bytes that would be read to decompress this frame
-                                 or an error code if it fails (which can be tested using ZSTDv01_isError())
-     dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
-                                 or ZSTD_CONTENTSIZE_ERROR if an error occurs
-
-     note : assumes `cSize` and `dBound` are _not_ NULL.
- */
-void ZSTDv01_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
-                                     size_t* cSize, unsigned long long* dBound);
-
-/**
-ZSTDv01_isError() : tells if the result of ZSTDv01_decompress() is an error
-*/
-unsigned ZSTDv01_isError(size_t code);
-
-
-/* *************************************
-*  Advanced functions
-***************************************/
-typedef struct ZSTDv01_Dctx_s ZSTDv01_Dctx;
-ZSTDv01_Dctx* ZSTDv01_createDCtx(void);
-size_t ZSTDv01_freeDCtx(ZSTDv01_Dctx* dctx);
-
-size_t ZSTDv01_decompressDCtx(void* ctx,
-                              void* dst, size_t maxOriginalSize,
-                        const void* src, size_t compressedSize);
-
-/* *************************************
-*  Streaming functions
-***************************************/
-size_t ZSTDv01_resetDCtx(ZSTDv01_Dctx* dctx);
-
-size_t ZSTDv01_nextSrcSizeToDecompress(ZSTDv01_Dctx* dctx);
-size_t ZSTDv01_decompressContinue(ZSTDv01_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
-/**
-  Use above functions alternatively.
-  ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
-  ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
-  Result is the number of bytes regenerated within 'dst'.
-  It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
-*/
-
-/* *************************************
-*  Prefix - version detection
-***************************************/
-#define ZSTDv01_magicNumber   0xFD2FB51E   /* Big Endian version */
-#define ZSTDv01_magicNumberLE 0x1EB52FFD   /* Little Endian version */
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* ZSTD_V01_H_28739879432 */
diff --git a/vendor/github.com/DataDog/zstd/zstd_v02.c b/vendor/github.com/DataDog/zstd/zstd_v02.c
deleted file mode 100644
index 793df60..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_v02.c
+++ /dev/null
@@ -1,3513 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-
-#include <stddef.h>    /* size_t, ptrdiff_t */
-#include "zstd_v02.h"
-#include "error_private.h"
-
-
-/******************************************
-*  Compiler-specific
-******************************************/
-#if defined(_MSC_VER)   /* Visual Studio */
-#   include <stdlib.h>  /* _byteswap_ulong */
-#   include <intrin.h>  /* _byteswap_* */
-#endif
-
-
-/* ******************************************************************
-   mem.h
-   low-level memory access routines
-   Copyright (C) 2013-2015, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
-    - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-#ifndef MEM_H_MODULE
-#define MEM_H_MODULE
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/******************************************
-*  Includes
-******************************************/
-#include <stddef.h>    /* size_t, ptrdiff_t */
-#include <string.h>    /* memcpy */
-
-
-/******************************************
-*  Compiler-specific
-******************************************/
-#if defined(__GNUC__)
-#  define MEM_STATIC static __attribute__((unused))
-#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-#  define MEM_STATIC static inline
-#elif defined(_MSC_VER)
-#  define MEM_STATIC static __inline
-#else
-#  define MEM_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */
-#endif
-
-
-/****************************************************************
-*  Basic Types
-*****************************************************************/
-#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-# include <stdint.h>
-  typedef  uint8_t BYTE;
-  typedef uint16_t U16;
-  typedef  int16_t S16;
-  typedef uint32_t U32;
-  typedef  int32_t S32;
-  typedef uint64_t U64;
-  typedef  int64_t S64;
-#else
-  typedef unsigned char       BYTE;
-  typedef unsigned short      U16;
-  typedef   signed short      S16;
-  typedef unsigned int        U32;
-  typedef   signed int        S32;
-  typedef unsigned long long  U64;
-  typedef   signed long long  S64;
-#endif
-
-
-/****************************************************************
-*  Memory I/O
-*****************************************************************/
-/* MEM_FORCE_MEMORY_ACCESS
- * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
- * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
- * The below switch allow to select different access method for improved performance.
- * Method 0 (default) : use `memcpy()`. Safe and portable.
- * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
- *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
- * Method 2 : direct access. This method is portable but violate C standard.
- *            It can generate buggy code on targets generating assembly depending on alignment.
- *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
- * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
- * Prefer these methods in priority order (0 > 1 > 2)
- */
-#ifndef MEM_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
-#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
-#    define MEM_FORCE_MEMORY_ACCESS 2
-#  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
-  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
-#    define MEM_FORCE_MEMORY_ACCESS 1
-#  endif
-#endif
-
-MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; }
-MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; }
-
-MEM_STATIC unsigned MEM_isLittleEndian(void)
-{
-    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */
-    return one.c[0];
-}
-
-#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
-
-/* violates C standard on structure alignment.
-Only use if no other choice to achieve best performance on target platform */
-MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
-MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
-MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
-
-MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
-
-#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
-
-/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
-/* currently only defined for gcc and icc */
-typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign;
-
-MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
-MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
-MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
-
-MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
-
-#else
-
-/* default method, safe and standard.
-   can sometimes prove slower */
-
-MEM_STATIC U16 MEM_read16(const void* memPtr)
-{
-    U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-MEM_STATIC U32 MEM_read32(const void* memPtr)
-{
-    U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-MEM_STATIC U64 MEM_read64(const void* memPtr)
-{
-    U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-MEM_STATIC void MEM_write16(void* memPtr, U16 value)
-{
-    memcpy(memPtr, &value, sizeof(value));
-}
-
-#endif // MEM_FORCE_MEMORY_ACCESS
-
-
-MEM_STATIC U16 MEM_readLE16(const void* memPtr)
-{
-    if (MEM_isLittleEndian())
-        return MEM_read16(memPtr);
-    else
-    {
-        const BYTE* p = (const BYTE*)memPtr;
-        return (U16)(p[0] + (p[1]<<8));
-    }
-}
-
-MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
-{
-    if (MEM_isLittleEndian())
-    {
-        MEM_write16(memPtr, val);
-    }
-    else
-    {
-        BYTE* p = (BYTE*)memPtr;
-        p[0] = (BYTE)val;
-        p[1] = (BYTE)(val>>8);
-    }
-}
-
-MEM_STATIC U32 MEM_readLE24(const void* memPtr)
-{
-    return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
-}
-
-MEM_STATIC U32 MEM_readLE32(const void* memPtr)
-{
-    if (MEM_isLittleEndian())
-        return MEM_read32(memPtr);
-    else
-    {
-        const BYTE* p = (const BYTE*)memPtr;
-        return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));
-    }
-}
-
-
-MEM_STATIC U64 MEM_readLE64(const void* memPtr)
-{
-    if (MEM_isLittleEndian())
-        return MEM_read64(memPtr);
-    else
-    {
-        const BYTE* p = (const BYTE*)memPtr;
-        return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24)
-                     + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56));
-    }
-}
-
-
-MEM_STATIC size_t MEM_readLEST(const void* memPtr)
-{
-    if (MEM_32bits())
-        return (size_t)MEM_readLE32(memPtr);
-    else
-        return (size_t)MEM_readLE64(memPtr);
-}
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* MEM_H_MODULE */
-
-
-/* ******************************************************************
-   bitstream
-   Part of NewGen Entropy library
-   header file (to include)
-   Copyright (C) 2013-2015, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-   - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-#ifndef BITSTREAM_H_MODULE
-#define BITSTREAM_H_MODULE
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-/*
-*  This API consists of small unitary functions, which highly benefit from being inlined.
-*  Since link-time-optimization is not available for all compilers,
-*  these functions are defined into a .h to be included.
-*/
-
-
-/**********************************************
-*  bitStream decompression API (read backward)
-**********************************************/
-typedef struct
-{
-    size_t   bitContainer;
-    unsigned bitsConsumed;
-    const char* ptr;
-    const char* start;
-} BIT_DStream_t;
-
-typedef enum { BIT_DStream_unfinished = 0,
-               BIT_DStream_endOfBuffer = 1,
-               BIT_DStream_completed = 2,
-               BIT_DStream_overflow = 3 } BIT_DStream_status;  /* result of BIT_reloadDStream() */
-               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
-
-MEM_STATIC size_t   BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
-MEM_STATIC size_t   BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
-MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
-MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
-
-
-/******************************************
-*  unsafe API
-******************************************/
-MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
-/* faster, but works only if nbBits >= 1 */
-
-
-
-/****************************************************************
-*  Helper functions
-****************************************************************/
-MEM_STATIC unsigned BIT_highbit32 (U32 val)
-{
-#   if defined(_MSC_VER)   /* Visual */
-    unsigned long r=0;
-    _BitScanReverse ( &r, val );
-    return (unsigned) r;
-#   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* Use GCC Intrinsic */
-    return 31 - __builtin_clz (val);
-#   else   /* Software version */
-    static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
-    U32 v = val;
-    unsigned r;
-    v |= v >> 1;
-    v |= v >> 2;
-    v |= v >> 4;
-    v |= v >> 8;
-    v |= v >> 16;
-    r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
-    return r;
-#   endif
-}
-
-
-
-/**********************************************************
-* bitStream decoding
-**********************************************************/
-
-/*!BIT_initDStream
-*  Initialize a BIT_DStream_t.
-*  @bitD : a pointer to an already allocated BIT_DStream_t structure
-*  @srcBuffer must point at the beginning of a bitStream
-*  @srcSize must be the exact size of the bitStream
-*  @result : size of stream (== srcSize) or an errorCode if a problem is detected
-*/
-MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
-{
-    if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
-
-    if (srcSize >=  sizeof(size_t))   /* normal case */
-    {
-        U32 contain32;
-        bitD->start = (const char*)srcBuffer;
-        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(size_t);
-        bitD->bitContainer = MEM_readLEST(bitD->ptr);
-        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
-        if (contain32 == 0) return ERROR(GENERIC);   /* endMark not present */
-        bitD->bitsConsumed = 8 - BIT_highbit32(contain32);
-    }
-    else
-    {
-        U32 contain32;
-        bitD->start = (const char*)srcBuffer;
-        bitD->ptr   = bitD->start;
-        bitD->bitContainer = *(const BYTE*)(bitD->start);
-        switch(srcSize)
-        {
-            case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);
-                    /* fallthrough */
-            case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);
-                    /* fallthrough */
-            case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);
-                    /* fallthrough */
-            case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24;
-                    /* fallthrough */
-            case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16;
-                    /* fallthrough */
-            case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) <<  8;
-                    /* fallthrough */
-            default:;
-        }
-        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
-        if (contain32 == 0) return ERROR(GENERIC);   /* endMark not present */
-        bitD->bitsConsumed = 8 - BIT_highbit32(contain32);
-        bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8;
-    }
-
-    return srcSize;
-}
-
-MEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits)
-{
-    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
-    return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
-}
-
-/*! BIT_lookBitsFast :
-*   unsafe version; only works only if nbBits >= 1 */
-MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits)
-{
-    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
-    return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
-}
-
-MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
-{
-    bitD->bitsConsumed += nbBits;
-}
-
-MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)
-{
-    size_t value = BIT_lookBits(bitD, nbBits);
-    BIT_skipBits(bitD, nbBits);
-    return value;
-}
-
-/*!BIT_readBitsFast :
-*  unsafe version; only works only if nbBits >= 1 */
-MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
-{
-    size_t value = BIT_lookBitsFast(bitD, nbBits);
-    BIT_skipBits(bitD, nbBits);
-    return value;
-}
-
-MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
-{
-    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should never happen */
-        return BIT_DStream_overflow;
-
-    if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))
-    {
-        bitD->ptr -= bitD->bitsConsumed >> 3;
-        bitD->bitsConsumed &= 7;
-        bitD->bitContainer = MEM_readLEST(bitD->ptr);
-        return BIT_DStream_unfinished;
-    }
-    if (bitD->ptr == bitD->start)
-    {
-        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
-        return BIT_DStream_completed;
-    }
-    {
-        U32 nbBytes = bitD->bitsConsumed >> 3;
-        BIT_DStream_status result = BIT_DStream_unfinished;
-        if (bitD->ptr - nbBytes < bitD->start)
-        {
-            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */
-            result = BIT_DStream_endOfBuffer;
-        }
-        bitD->ptr -= nbBytes;
-        bitD->bitsConsumed -= nbBytes*8;
-        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD) */
-        return result;
-    }
-}
-
-/*! BIT_endOfDStream
-*   @return Tells if DStream has reached its exact end
-*/
-MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
-{
-    return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
-}
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* BITSTREAM_H_MODULE */
-/* ******************************************************************
-   Error codes and messages
-   Copyright (C) 2013-2015, Yann Collet
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-   - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-#ifndef ERROR_H_MODULE
-#define ERROR_H_MODULE
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-/******************************************
-*  Compiler-specific
-******************************************/
-#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-#  define ERR_STATIC static inline
-#elif defined(_MSC_VER)
-#  define ERR_STATIC static __inline
-#elif defined(__GNUC__)
-#  define ERR_STATIC static __attribute__((unused))
-#else
-#  define ERR_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */
-#endif
-
-
-/******************************************
-*  Error Management
-******************************************/
-#define PREFIX(name) ZSTD_error_##name
-
-#define ERROR(name) (size_t)-PREFIX(name)
-
-#define ERROR_LIST(ITEM) \
-        ITEM(PREFIX(No_Error)) ITEM(PREFIX(GENERIC)) \
-        ITEM(PREFIX(dstSize_tooSmall)) ITEM(PREFIX(srcSize_wrong)) \
-        ITEM(PREFIX(prefix_unknown)) ITEM(PREFIX(corruption_detected)) \
-        ITEM(PREFIX(tableLog_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooSmall)) \
-        ITEM(PREFIX(maxCode))
-
-#define ERROR_GENERATE_ENUM(ENUM) ENUM,
-typedef enum { ERROR_LIST(ERROR_GENERATE_ENUM) } ERR_codes;  /* enum is exposed, to detect & handle specific errors; compare function result to -enum value */
-
-#define ERROR_CONVERTTOSTRING(STRING) #STRING,
-#define ERROR_GENERATE_STRING(EXPR) ERROR_CONVERTTOSTRING(EXPR)
-static const char* ERR_strings[] = { ERROR_LIST(ERROR_GENERATE_STRING) };
-
-ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
-
-ERR_STATIC const char* ERR_getErrorName(size_t code)
-{
-    static const char* codeError = "Unspecified error code";
-    if (ERR_isError(code)) return ERR_strings[-(int)(code)];
-    return codeError;
-}
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* ERROR_H_MODULE */
-/*
-Constructor and Destructor of type FSE_CTable
-    Note that its size depends on 'tableLog' and 'maxSymbolValue' */
-typedef unsigned FSE_CTable;   /* don't allocate that. It's just a way to be more restrictive than void* */
-typedef unsigned FSE_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */
-
-
-/* ******************************************************************
-   FSE : Finite State Entropy coder
-   header file for static linking (only)
-   Copyright (C) 2013-2015, Yann Collet
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-   - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-/******************************************
-*  Static allocation
-******************************************/
-/* FSE buffer bounds */
-#define FSE_NCOUNTBOUND 512
-#define FSE_BLOCKBOUND(size) (size + (size>>7))
-#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
-
-/* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */
-#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue)   (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))
-#define FSE_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<maxTableLog))
-
-
-/******************************************
-*  FSE advanced API
-******************************************/
-static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
-/* build a fake FSE_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */
-
-static size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
-/* build a fake FSE_DTable, designed to always generate the same symbolValue */
-
-
-/******************************************
-*  FSE symbol decompression API
-******************************************/
-typedef struct
-{
-    size_t      state;
-    const void* table;   /* precise table may vary, depending on U16 */
-} FSE_DState_t;
-
-
-static void     FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
-
-static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
-
-static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
-
-
-/******************************************
-*  FSE unsafe API
-******************************************/
-static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
-/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
-
-
-/******************************************
-*  Implementation of inline functions
-******************************************/
-
-/* decompression */
-
-typedef struct {
-    U16 tableLog;
-    U16 fastMode;
-} FSE_DTableHeader;   /* sizeof U32 */
-
-typedef struct
-{
-    unsigned short newState;
-    unsigned char  symbol;
-    unsigned char  nbBits;
-} FSE_decode_t;   /* size == U32 */
-
-MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
-{
-    FSE_DTableHeader DTableH;
-    memcpy(&DTableH, dt, sizeof(DTableH));
-    DStatePtr->state = BIT_readBits(bitD, DTableH.tableLog);
-    BIT_reloadDStream(bitD);
-    DStatePtr->table = dt + 1;
-}
-
-MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
-{
-    const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
-    const U32  nbBits = DInfo.nbBits;
-    BYTE symbol = DInfo.symbol;
-    size_t lowBits = BIT_readBits(bitD, nbBits);
-
-    DStatePtr->state = DInfo.newState + lowBits;
-    return symbol;
-}
-
-MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
-{
-    const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
-    const U32 nbBits = DInfo.nbBits;
-    BYTE symbol = DInfo.symbol;
-    size_t lowBits = BIT_readBitsFast(bitD, nbBits);
-
-    DStatePtr->state = DInfo.newState + lowBits;
-    return symbol;
-}
-
-MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
-{
-    return DStatePtr->state == 0;
-}
-
-
-#if defined (__cplusplus)
-}
-#endif
-/* ******************************************************************
-   Huff0 : Huffman coder, part of New Generation Entropy library
-   header file for static linking (only)
-   Copyright (C) 2013-2015, Yann Collet
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-   - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/******************************************
-*  Static allocation macros
-******************************************/
-/* Huff0 buffer bounds */
-#define HUF_CTABLEBOUND 129
-#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8)   /* only true if incompressible pre-filtered with fast heuristic */
-#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
-
-/* static allocation of Huff0's DTable */
-#define HUF_DTABLE_SIZE(maxTableLog)   (1 + (1<<maxTableLog))  /* nb Cells; use unsigned short for X2, unsigned int for X4 */
-#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
-        unsigned short DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
-#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
-        unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
-#define HUF_CREATE_STATIC_DTABLEX6(DTable, maxTableLog) \
-        unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog) * 3 / 2] = { maxTableLog }
-
-
-/******************************************
-*  Advanced functions
-******************************************/
-static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */
-static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbols decoder */
-static size_t HUF_decompress4X6 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* quad-symbols decoder */
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-/*
-    zstd - standard compression library
-    Header File
-    Copyright (C) 2014-2015, Yann Collet.
-
-    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are
-    met:
-    * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-    copyright notice, this list of conditions and the following disclaimer
-    in the documentation and/or other materials provided with the
-    distribution.
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - zstd source repository : https://github.com/Cyan4973/zstd
-    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
-*/
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/* *************************************
-*  Includes
-***************************************/
-#include <stddef.h>   /* size_t */
-
-
-/* *************************************
-*  Version
-***************************************/
-#define ZSTD_VERSION_MAJOR    0    /* for breaking interface changes  */
-#define ZSTD_VERSION_MINOR    2    /* for new (non-breaking) interface capabilities */
-#define ZSTD_VERSION_RELEASE  2    /* for tweaks, bug-fixes, or development */
-#define ZSTD_VERSION_NUMBER  (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
-
-
-/* *************************************
-*  Advanced functions
-***************************************/
-typedef struct ZSTD_CCtx_s ZSTD_CCtx;   /* incomplete type */
-
-#if defined (__cplusplus)
-}
-#endif
-/*
-    zstd - standard compression library
-    Header File for static linking only
-    Copyright (C) 2014-2015, Yann Collet.
-
-    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are
-    met:
-    * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-    copyright notice, this list of conditions and the following disclaimer
-    in the documentation and/or other materials provided with the
-    distribution.
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - zstd source repository : https://github.com/Cyan4973/zstd
-    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
-*/
-
-/* The objects defined into this file should be considered experimental.
- * They are not labelled stable, as their prototype may change in the future.
- * You can use them for tests, provide feedback, or if you can endure risk of future changes.
- */
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/* *************************************
-*  Streaming functions
-***************************************/
-
-typedef struct ZSTD_DCtx_s ZSTD_DCtx;
-
-/*
-  Use above functions alternatively.
-  ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
-  ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
-  Result is the number of bytes regenerated within 'dst'.
-  It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
-*/
-
-/* *************************************
-*  Prefix - version detection
-***************************************/
-#define ZSTD_magicNumber 0xFD2FB522   /* v0.2 (current)*/
-
-
-#if defined (__cplusplus)
-}
-#endif
-/* ******************************************************************
-   FSE : Finite State Entropy coder
-   Copyright (C) 2013-2015, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
-    - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-
-#ifndef FSE_COMMONDEFS_ONLY
-
-/****************************************************************
-*  Tuning parameters
-****************************************************************/
-/* MEMORY_USAGE :
-*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
-*  Increasing memory usage improves compression ratio
-*  Reduced memory usage can improve speed, due to cache effect
-*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
-#define FSE_MAX_MEMORY_USAGE 14
-#define FSE_DEFAULT_MEMORY_USAGE 13
-
-/* FSE_MAX_SYMBOL_VALUE :
-*  Maximum symbol value authorized.
-*  Required for proper stack allocation */
-#define FSE_MAX_SYMBOL_VALUE 255
-
-
-/****************************************************************
-*  template functions type & suffix
-****************************************************************/
-#define FSE_FUNCTION_TYPE BYTE
-#define FSE_FUNCTION_EXTENSION
-
-
-/****************************************************************
-*  Byte symbol type
-****************************************************************/
-#endif   /* !FSE_COMMONDEFS_ONLY */
-
-
-/****************************************************************
-*  Compiler specifics
-****************************************************************/
-#ifdef _MSC_VER    /* Visual Studio */
-#  define FORCE_INLINE static __forceinline
-#  include <intrin.h>                    /* For Visual 2005 */
-#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
-#  pragma warning(disable : 4214)        /* disable: C4214: non-int bitfields */
-#else
-#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
-#    ifdef __GNUC__
-#      define FORCE_INLINE static inline __attribute__((always_inline))
-#    else
-#      define FORCE_INLINE static inline
-#    endif
-#  else
-#    define FORCE_INLINE static
-#  endif /* __STDC_VERSION__ */
-#endif
-
-
-/****************************************************************
-*  Includes
-****************************************************************/
-#include <stdlib.h>     /* malloc, free, qsort */
-#include <string.h>     /* memcpy, memset */
-#include <stdio.h>      /* printf (debug) */
-
-/****************************************************************
-*  Constants
-*****************************************************************/
-#define FSE_MAX_TABLELOG  (FSE_MAX_MEMORY_USAGE-2)
-#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
-#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
-#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
-#define FSE_MIN_TABLELOG 5
-
-#define FSE_TABLELOG_ABSOLUTE_MAX 15
-#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
-#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
-#endif
-
-
-/****************************************************************
-*  Error Management
-****************************************************************/
-#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
-
-
-/****************************************************************
-*  Complex types
-****************************************************************/
-typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
-
-
-/****************************************************************
-*  Templates
-****************************************************************/
-/*
-  designed to be included
-  for type-specific functions (template emulation in C)
-  Objective is to write these functions only once, for improved maintenance
-*/
-
-/* safety checks */
-#ifndef FSE_FUNCTION_EXTENSION
-#  error "FSE_FUNCTION_EXTENSION must be defined"
-#endif
-#ifndef FSE_FUNCTION_TYPE
-#  error "FSE_FUNCTION_TYPE must be defined"
-#endif
-
-/* Function names */
-#define FSE_CAT(X,Y) X##Y
-#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
-#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
-
-
-/* Function templates */
-
-#define FSE_DECODE_TYPE FSE_decode_t
-
-static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; }
-
-static size_t FSE_buildDTable
-(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
-{
-    void* ptr = dt+1;
-    FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)ptr;
-    FSE_DTableHeader DTableH;
-    const U32 tableSize = 1 << tableLog;
-    const U32 tableMask = tableSize-1;
-    const U32 step = FSE_tableStep(tableSize);
-    U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
-    U32 position = 0;
-    U32 highThreshold = tableSize-1;
-    const S16 largeLimit= (S16)(1 << (tableLog-1));
-    U32 noLarge = 1;
-    U32 s;
-
-    /* Sanity Checks */
-    if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
-    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
-
-    /* Init, lay down lowprob symbols */
-    DTableH.tableLog = (U16)tableLog;
-    for (s=0; s<=maxSymbolValue; s++)
-    {
-        if (normalizedCounter[s]==-1)
-        {
-            tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
-            symbolNext[s] = 1;
-        }
-        else
-        {
-            if (normalizedCounter[s] >= largeLimit) noLarge=0;
-            symbolNext[s] = normalizedCounter[s];
-        }
-    }
-
-    /* Spread symbols */
-    for (s=0; s<=maxSymbolValue; s++)
-    {
-        int i;
-        for (i=0; i<normalizedCounter[s]; i++)
-        {
-            tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
-            position = (position + step) & tableMask;
-            while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
-        }
-    }
-
-    if (position!=0) return ERROR(GENERIC);   /* position must reach all cells once, otherwise normalizedCounter is incorrect */
-
-    /* Build Decoding table */
-    {
-        U32 i;
-        for (i=0; i<tableSize; i++)
-        {
-            FSE_FUNCTION_TYPE symbol = (FSE_FUNCTION_TYPE)(tableDecode[i].symbol);
-            U16 nextState = symbolNext[symbol]++;
-            tableDecode[i].nbBits = (BYTE) (tableLog - BIT_highbit32 ((U32)nextState) );
-            tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize);
-        }
-    }
-
-    DTableH.fastMode = (U16)noLarge;
-    memcpy(dt, &DTableH, sizeof(DTableH));   /* memcpy(), to avoid strict aliasing warnings */
-    return 0;
-}
-
-
-#ifndef FSE_COMMONDEFS_ONLY
-/******************************************
-*  FSE helper functions
-******************************************/
-static unsigned FSE_isError(size_t code) { return ERR_isError(code); }
-
-
-/****************************************************************
-*  FSE NCount encoding-decoding
-****************************************************************/
-static short FSE_abs(short a)
-{
-    return (short)(a<0 ? -a : a);
-}
-
-static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
-                 const void* headerBuffer, size_t hbSize)
-{
-    const BYTE* const istart = (const BYTE*) headerBuffer;
-    const BYTE* const iend = istart + hbSize;
-    const BYTE* ip = istart;
-    int nbBits;
-    int remaining;
-    int threshold;
-    U32 bitStream;
-    int bitCount;
-    unsigned charnum = 0;
-    int previous0 = 0;
-
-    if (hbSize < 4) return ERROR(srcSize_wrong);
-    bitStream = MEM_readLE32(ip);
-    nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG;   /* extract tableLog */
-    if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
-    bitStream >>= 4;
-    bitCount = 4;
-    *tableLogPtr = nbBits;
-    remaining = (1<<nbBits)+1;
-    threshold = 1<<nbBits;
-    nbBits++;
-
-    while ((remaining>1) && (charnum<=*maxSVPtr))
-    {
-        if (previous0)
-        {
-            unsigned n0 = charnum;
-            while ((bitStream & 0xFFFF) == 0xFFFF)
-            {
-                n0+=24;
-                if (ip < iend-5)
-                {
-                    ip+=2;
-                    bitStream = MEM_readLE32(ip) >> bitCount;
-                }
-                else
-                {
-                    bitStream >>= 16;
-                    bitCount+=16;
-                }
-            }
-            while ((bitStream & 3) == 3)
-            {
-                n0+=3;
-                bitStream>>=2;
-                bitCount+=2;
-            }
-            n0 += bitStream & 3;
-            bitCount += 2;
-            if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
-            while (charnum < n0) normalizedCounter[charnum++] = 0;
-            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
-            {
-                ip += bitCount>>3;
-                bitCount &= 7;
-                bitStream = MEM_readLE32(ip) >> bitCount;
-            }
-            else
-                bitStream >>= 2;
-        }
-        {
-            const short max = (short)((2*threshold-1)-remaining);
-            short count;
-
-            if ((bitStream & (threshold-1)) < (U32)max)
-            {
-                count = (short)(bitStream & (threshold-1));
-                bitCount   += nbBits-1;
-            }
-            else
-            {
-                count = (short)(bitStream & (2*threshold-1));
-                if (count >= threshold) count -= max;
-                bitCount   += nbBits;
-            }
-
-            count--;   /* extra accuracy */
-            remaining -= FSE_abs(count);
-            normalizedCounter[charnum++] = count;
-            previous0 = !count;
-            while (remaining < threshold)
-            {
-                nbBits--;
-                threshold >>= 1;
-            }
-
-            {
-                if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
-                {
-                    ip += bitCount>>3;
-                    bitCount &= 7;
-                }
-                else
-                {
-                    bitCount -= (int)(8 * (iend - 4 - ip));
-                    ip = iend - 4;
-                }
-                bitStream = MEM_readLE32(ip) >> (bitCount & 31);
-            }
-        }
-    }
-    if (remaining != 1) return ERROR(GENERIC);
-    *maxSVPtr = charnum-1;
-
-    ip += (bitCount+7)>>3;
-    if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);
-    return ip-istart;
-}
-
-
-/*********************************************************
-*  Decompression (Byte symbols)
-*********************************************************/
-static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
-{
-    void* ptr = dt;
-    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
-    FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1;   /* because dt is unsigned */
-
-    DTableH->tableLog = 0;
-    DTableH->fastMode = 0;
-
-    cell->newState = 0;
-    cell->symbol = symbolValue;
-    cell->nbBits = 0;
-
-    return 0;
-}
-
-
-static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
-{
-    void* ptr = dt;
-    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
-    FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1;   /* because dt is unsigned */
-    const unsigned tableSize = 1 << nbBits;
-    const unsigned tableMask = tableSize - 1;
-    const unsigned maxSymbolValue = tableMask;
-    unsigned s;
-
-    /* Sanity checks */
-    if (nbBits < 1) return ERROR(GENERIC);         /* min size */
-
-    /* Build Decoding Table */
-    DTableH->tableLog = (U16)nbBits;
-    DTableH->fastMode = 1;
-    for (s=0; s<=maxSymbolValue; s++)
-    {
-        dinfo[s].newState = 0;
-        dinfo[s].symbol = (BYTE)s;
-        dinfo[s].nbBits = (BYTE)nbBits;
-    }
-
-    return 0;
-}
-
-FORCE_INLINE size_t FSE_decompress_usingDTable_generic(
-          void* dst, size_t maxDstSize,
-    const void* cSrc, size_t cSrcSize,
-    const FSE_DTable* dt, const unsigned fast)
-{
-    BYTE* const ostart = (BYTE*) dst;
-    BYTE* op = ostart;
-    BYTE* const omax = op + maxDstSize;
-    BYTE* const olimit = omax-3;
-
-    BIT_DStream_t bitD;
-    FSE_DState_t state1;
-    FSE_DState_t state2;
-    size_t errorCode;
-
-    /* Init */
-    errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);   /* replaced last arg by maxCompressed Size */
-    if (FSE_isError(errorCode)) return errorCode;
-
-    FSE_initDState(&state1, &bitD, dt);
-    FSE_initDState(&state2, &bitD, dt);
-
-#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
-
-    /* 4 symbols per loop */
-    for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) && (op<olimit) ; op+=4)
-    {
-        op[0] = FSE_GETSYMBOL(&state1);
-
-        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
-            BIT_reloadDStream(&bitD);
-
-        op[1] = FSE_GETSYMBOL(&state2);
-
-        if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
-            { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
-
-        op[2] = FSE_GETSYMBOL(&state1);
-
-        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
-            BIT_reloadDStream(&bitD);
-
-        op[3] = FSE_GETSYMBOL(&state2);
-    }
-
-    /* tail */
-    /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
-    while (1)
-    {
-        if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) )
-            break;
-
-        *op++ = FSE_GETSYMBOL(&state1);
-
-        if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) )
-            break;
-
-        *op++ = FSE_GETSYMBOL(&state2);
-    }
-
-    /* end ? */
-    if (BIT_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2))
-        return op-ostart;
-
-    if (op==omax) return ERROR(dstSize_tooSmall);   /* dst buffer is full, but cSrc unfinished */
-
-    return ERROR(corruption_detected);
-}
-
-
-static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
-                            const void* cSrc, size_t cSrcSize,
-                            const FSE_DTable* dt)
-{
-    FSE_DTableHeader DTableH;
-    memcpy(&DTableH, dt, sizeof(DTableH));
-
-    /* select fast mode (static) */
-    if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
-    return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
-}
-
-
-static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
-{
-    const BYTE* const istart = (const BYTE*)cSrc;
-    const BYTE* ip = istart;
-    short counting[FSE_MAX_SYMBOL_VALUE+1];
-    DTable_max_t dt;   /* Static analyzer seems unable to understand this table will be properly initialized later */
-    unsigned tableLog;
-    unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
-    size_t errorCode;
-
-    if (cSrcSize<2) return ERROR(srcSize_wrong);   /* too small input size */
-
-    /* normal FSE decoding mode */
-    errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
-    if (FSE_isError(errorCode)) return errorCode;
-    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);   /* too small input size */
-    ip += errorCode;
-    cSrcSize -= errorCode;
-
-    errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog);
-    if (FSE_isError(errorCode)) return errorCode;
-
-    /* always return, even if it is an error code */
-    return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);
-}
-
-
-
-#endif   /* FSE_COMMONDEFS_ONLY */
-/* ******************************************************************
-   Huff0 : Huffman coder, part of New Generation Entropy library
-   Copyright (C) 2013-2015, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - FSE+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy
-    - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-
-/****************************************************************
-*  Compiler specifics
-****************************************************************/
-#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-/* inline is defined */
-#elif defined(_MSC_VER)
-#  define inline __inline
-#else
-#  define inline /* disable inline */
-#endif
-
-
-#ifdef _MSC_VER    /* Visual Studio */
-#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
-#endif
-
-
-/****************************************************************
-*  Includes
-****************************************************************/
-#include <stdlib.h>     /* malloc, free, qsort */
-#include <string.h>     /* memcpy, memset */
-#include <stdio.h>      /* printf (debug) */
-
-/****************************************************************
-*  Error Management
-****************************************************************/
-#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
-
-
-/******************************************
-*  Helper functions
-******************************************/
-static unsigned HUF_isError(size_t code) { return ERR_isError(code); }
-
-#define HUF_ABSOLUTEMAX_TABLELOG  16   /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
-#define HUF_MAX_TABLELOG  12           /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
-#define HUF_DEFAULT_TABLELOG  HUF_MAX_TABLELOG   /* tableLog by default, when not specified */
-#define HUF_MAX_SYMBOL_VALUE 255
-#if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG)
-#  error "HUF_MAX_TABLELOG is too large !"
-#endif
-
-
-
-/*********************************************************
-*  Huff0 : Huffman block decompression
-*********************************************************/
-typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2;   /* single-symbol decoding */
-
-typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4;  /* double-symbols decoding */
-
-typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
-
-/*! HUF_readStats
-    Read compact Huffman tree, saved by HUF_writeCTable
-    @huffWeight : destination buffer
-    @return : size read from `src`
-*/
-static size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
-                            U32* nbSymbolsPtr, U32* tableLogPtr,
-                            const void* src, size_t srcSize)
-{
-    U32 weightTotal;
-    U32 tableLog;
-    const BYTE* ip = (const BYTE*) src;
-    size_t iSize;
-    size_t oSize;
-    U32 n;
-
-    if (!srcSize) return ERROR(srcSize_wrong);
-    iSize = ip[0];
-    //memset(huffWeight, 0, hwSize);   /* is not necessary, even though some analyzer complain ... */
-
-    if (iSize >= 128)  /* special header */
-    {
-        if (iSize >= (242))   /* RLE */
-        {
-            static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
-            oSize = l[iSize-242];
-            memset(huffWeight, 1, hwSize);
-            iSize = 0;
-        }
-        else   /* Incompressible */
-        {
-            oSize = iSize - 127;
-            iSize = ((oSize+1)/2);
-            if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
-            if (oSize >= hwSize) return ERROR(corruption_detected);
-            ip += 1;
-            for (n=0; n<oSize; n+=2)
-            {
-                huffWeight[n]   = ip[n/2] >> 4;
-                huffWeight[n+1] = ip[n/2] & 15;
-            }
-        }
-    }
-    else  /* header compressed with FSE (normal case) */
-    {
-        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
-        oSize = FSE_decompress(huffWeight, hwSize-1, ip+1, iSize);   /* max (hwSize-1) values decoded, as last one is implied */
-        if (FSE_isError(oSize)) return oSize;
-    }
-
-    /* collect weight stats */
-    memset(rankStats, 0, (HUF_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));
-    weightTotal = 0;
-    for (n=0; n<oSize; n++)
-    {
-        if (huffWeight[n] >= HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
-        rankStats[huffWeight[n]]++;
-        weightTotal += (1 << huffWeight[n]) >> 1;
-    }
-    if (weightTotal == 0) return ERROR(corruption_detected);
-
-    /* get last non-null symbol weight (implied, total must be 2^n) */
-    tableLog = BIT_highbit32(weightTotal) + 1;
-    if (tableLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
-    {
-        U32 total = 1 << tableLog;
-        U32 rest = total - weightTotal;
-        U32 verif = 1 << BIT_highbit32(rest);
-        U32 lastWeight = BIT_highbit32(rest) + 1;
-        if (verif != rest) return ERROR(corruption_detected);    /* last value must be a clean power of 2 */
-        huffWeight[oSize] = (BYTE)lastWeight;
-        rankStats[lastWeight]++;
-    }
-
-    /* check tree construction validity */
-    if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected);   /* by construction : at least 2 elts of rank 1, must be even */
-
-    /* results */
-    *nbSymbolsPtr = (U32)(oSize+1);
-    *tableLogPtr = tableLog;
-    return iSize+1;
-}
-
-
-/**************************/
-/* single-symbol decoding */
-/**************************/
-
-static size_t HUF_readDTableX2 (U16* DTable, const void* src, size_t srcSize)
-{
-    BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1];
-    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];   /* large enough for values from 0 to 16 */
-    U32 tableLog = 0;
-    const BYTE* ip = (const BYTE*) src;
-    size_t iSize = ip[0];
-    U32 nbSymbols = 0;
-    U32 n;
-    U32 nextRankStart;
-    void* ptr = DTable+1;
-    HUF_DEltX2* const dt = (HUF_DEltX2*)ptr;
-
-    HUF_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U16));   /* if compilation fails here, assertion is false */
-    //memset(huffWeight, 0, sizeof(huffWeight));   /* is not necessary, even though some analyzer complain ... */
-
-    iSize = HUF_readStats(huffWeight, HUF_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
-    if (HUF_isError(iSize)) return iSize;
-
-    /* check result */
-    if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge);   /* DTable is too small */
-    DTable[0] = (U16)tableLog;   /* maybe should separate sizeof DTable, as allocated, from used size of DTable, in case of DTable re-use */
-
-    /* Prepare ranks */
-    nextRankStart = 0;
-    for (n=1; n<=tableLog; n++)
-    {
-        U32 current = nextRankStart;
-        nextRankStart += (rankVal[n] << (n-1));
-        rankVal[n] = current;
-    }
-
-    /* fill DTable */
-    for (n=0; n<nbSymbols; n++)
-    {
-        const U32 w = huffWeight[n];
-        const U32 length = (1 << w) >> 1;
-        U32 i;
-        HUF_DEltX2 D;
-        D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
-        for (i = rankVal[w]; i < rankVal[w] + length; i++)
-            dt[i] = D;
-        rankVal[w] += length;
-    }
-
-    return iSize;
-}
-
-static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)
-{
-        const size_t val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
-        const BYTE c = dt[val].byte;
-        BIT_skipBits(Dstream, dt[val].nbBits);
-        return c;
-}
-
-#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
-    *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)
-
-#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
-    if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
-        HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
-
-#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
-    if (MEM_64bits()) \
-        HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
-
-static inline size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)
-{
-    BYTE* const pStart = p;
-
-    /* up to 4 symbols at a time */
-    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4))
-    {
-        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
-        HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
-        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
-        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
-    }
-
-    /* closer to the end */
-    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))
-        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
-
-    /* no more data to retrieve from bitstream, hence no need to reload */
-    while (p < pEnd)
-        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
-
-    return pEnd-pStart;
-}
-
-
-static size_t HUF_decompress4X2_usingDTable(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const U16* DTable)
-{
-    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
-
-    {
-        const BYTE* const istart = (const BYTE*) cSrc;
-        BYTE* const ostart = (BYTE*) dst;
-        BYTE* const oend = ostart + dstSize;
-
-        const void* ptr = DTable;
-        const HUF_DEltX2* const dt = ((const HUF_DEltX2*)ptr) +1;
-        const U32 dtLog = DTable[0];
-        size_t errorCode;
-
-        /* Init */
-        BIT_DStream_t bitD1;
-        BIT_DStream_t bitD2;
-        BIT_DStream_t bitD3;
-        BIT_DStream_t bitD4;
-        const size_t length1 = MEM_readLE16(istart);
-        const size_t length2 = MEM_readLE16(istart+2);
-        const size_t length3 = MEM_readLE16(istart+4);
-        size_t length4;
-        const BYTE* const istart1 = istart + 6;  /* jumpTable */
-        const BYTE* const istart2 = istart1 + length1;
-        const BYTE* const istart3 = istart2 + length2;
-        const BYTE* const istart4 = istart3 + length3;
-        const size_t segmentSize = (dstSize+3) / 4;
-        BYTE* const opStart2 = ostart + segmentSize;
-        BYTE* const opStart3 = opStart2 + segmentSize;
-        BYTE* const opStart4 = opStart3 + segmentSize;
-        BYTE* op1 = ostart;
-        BYTE* op2 = opStart2;
-        BYTE* op3 = opStart3;
-        BYTE* op4 = opStart4;
-        U32 endSignal;
-
-        length4 = cSrcSize - (length1 + length2 + length3 + 6);
-        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
-        errorCode = BIT_initDStream(&bitD1, istart1, length1);
-        if (HUF_isError(errorCode)) return errorCode;
-        errorCode = BIT_initDStream(&bitD2, istart2, length2);
-        if (HUF_isError(errorCode)) return errorCode;
-        errorCode = BIT_initDStream(&bitD3, istart3, length3);
-        if (HUF_isError(errorCode)) return errorCode;
-        errorCode = BIT_initDStream(&bitD4, istart4, length4);
-        if (HUF_isError(errorCode)) return errorCode;
-
-        /* 16-32 symbols per loop (4-8 symbols per stream) */
-        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
-        for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )
-        {
-            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
-            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
-            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
-            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
-            HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
-            HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
-            HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
-            HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
-            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
-            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
-            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
-            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
-            HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
-            HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
-            HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
-            HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
-
-            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
-        }
-
-        /* check corruption */
-        if (op1 > opStart2) return ERROR(corruption_detected);
-        if (op2 > opStart3) return ERROR(corruption_detected);
-        if (op3 > opStart4) return ERROR(corruption_detected);
-        /* note : op4 supposed already verified within main loop */
-
-        /* finish bitStreams one by one */
-        HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
-        HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
-        HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
-        HUF_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);
-
-        /* check */
-        endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
-        if (!endSignal) return ERROR(corruption_detected);
-
-        /* decoded size */
-        return dstSize;
-    }
-}
-
-
-static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG);
-    const BYTE* ip = (const BYTE*) cSrc;
-    size_t errorCode;
-
-    errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize);
-    if (HUF_isError(errorCode)) return errorCode;
-    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
-    ip += errorCode;
-    cSrcSize -= errorCode;
-
-    return HUF_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
-}
-
-
-/***************************/
-/* double-symbols decoding */
-/***************************/
-
-static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed,
-                           const U32* rankValOrigin, const int minWeight,
-                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
-                           U32 nbBitsBaseline, U16 baseSeq)
-{
-    HUF_DEltX4 DElt;
-    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
-    U32 s;
-
-    /* get pre-calculated rankVal */
-    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
-
-    /* fill skipped values */
-    if (minWeight>1)
-    {
-        U32 i, skipSize = rankVal[minWeight];
-        MEM_writeLE16(&(DElt.sequence), baseSeq);
-        DElt.nbBits   = (BYTE)(consumed);
-        DElt.length   = 1;
-        for (i = 0; i < skipSize; i++)
-            DTable[i] = DElt;
-    }
-
-    /* fill DTable */
-    for (s=0; s<sortedListSize; s++)   /* note : sortedSymbols already skipped */
-    {
-        const U32 symbol = sortedSymbols[s].symbol;
-        const U32 weight = sortedSymbols[s].weight;
-        const U32 nbBits = nbBitsBaseline - weight;
-        const U32 length = 1 << (sizeLog-nbBits);
-        const U32 start = rankVal[weight];
-        U32 i = start;
-        const U32 end = start + length;
-
-        MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
-        DElt.nbBits = (BYTE)(nbBits + consumed);
-        DElt.length = 2;
-        do { DTable[i++] = DElt; } while (i<end);   /* since length >= 1 */
-
-        rankVal[weight] += length;
-    }
-}
-
-typedef U32 rankVal_t[HUF_ABSOLUTEMAX_TABLELOG][HUF_ABSOLUTEMAX_TABLELOG + 1];
-
-static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,
-                           const sortedSymbol_t* sortedList, const U32 sortedListSize,
-                           const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
-                           const U32 nbBitsBaseline)
-{
-    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
-    const int scaleLog = nbBitsBaseline - targetLog;   /* note : targetLog >= srcLog, hence scaleLog <= 1 */
-    const U32 minBits  = nbBitsBaseline - maxWeight;
-    U32 s;
-
-    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
-
-    /* fill DTable */
-    for (s=0; s<sortedListSize; s++)
-    {
-        const U16 symbol = sortedList[s].symbol;
-        const U32 weight = sortedList[s].weight;
-        const U32 nbBits = nbBitsBaseline - weight;
-        const U32 start = rankVal[weight];
-        const U32 length = 1 << (targetLog-nbBits);
-
-        if (targetLog-nbBits >= minBits)   /* enough room for a second symbol */
-        {
-            U32 sortedRank;
-            int minWeight = nbBits + scaleLog;
-            if (minWeight < 1) minWeight = 1;
-            sortedRank = rankStart[minWeight];
-            HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
-                           rankValOrigin[nbBits], minWeight,
-                           sortedList+sortedRank, sortedListSize-sortedRank,
-                           nbBitsBaseline, symbol);
-        }
-        else
-        {
-            U32 i;
-            const U32 end = start + length;
-            HUF_DEltX4 DElt;
-
-            MEM_writeLE16(&(DElt.sequence), symbol);
-            DElt.nbBits   = (BYTE)(nbBits);
-            DElt.length   = 1;
-            for (i = start; i < end; i++)
-                DTable[i] = DElt;
-        }
-        rankVal[weight] += length;
-    }
-}
-
-static size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
-{
-    BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1];
-    sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1];
-    U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 };
-    U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 };
-    U32* const rankStart = rankStart0+1;
-    rankVal_t rankVal;
-    U32 tableLog, maxW, sizeOfSort, nbSymbols;
-    const U32 memLog = DTable[0];
-    const BYTE* ip = (const BYTE*) src;
-    size_t iSize = ip[0];
-    void* ptr = DTable;
-    HUF_DEltX4* const dt = ((HUF_DEltX4*)ptr) + 1;
-
-    HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(U32));   /* if compilation fails here, assertion is false */
-    if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
-    //memset(weightList, 0, sizeof(weightList));   /* is not necessary, even though some analyzer complain ... */
-
-    iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
-    if (HUF_isError(iSize)) return iSize;
-
-    /* check result */
-    if (tableLog > memLog) return ERROR(tableLog_tooLarge);   /* DTable can't fit code depth */
-
-    /* find maxWeight */
-    for (maxW = tableLog; rankStats[maxW]==0; maxW--)
-        {if (!maxW) return ERROR(GENERIC); }  /* necessarily finds a solution before maxW==0 */
-
-    /* Get start index of each weight */
-    {
-        U32 w, nextRankStart = 0;
-        for (w=1; w<=maxW; w++)
-        {
-            U32 current = nextRankStart;
-            nextRankStart += rankStats[w];
-            rankStart[w] = current;
-        }
-        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/
-        sizeOfSort = nextRankStart;
-    }
-
-    /* sort symbols by weight */
-    {
-        U32 s;
-        for (s=0; s<nbSymbols; s++)
-        {
-            U32 w = weightList[s];
-            U32 r = rankStart[w]++;
-            sortedSymbol[r].symbol = (BYTE)s;
-            sortedSymbol[r].weight = (BYTE)w;
-        }
-        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */
-    }
-
-    /* Build rankVal */
-    {
-        const U32 minBits = tableLog+1 - maxW;
-        U32 nextRankVal = 0;
-        U32 w, consumed;
-        const int rescale = (memLog-tableLog) - 1;   /* tableLog <= memLog */
-        U32* rankVal0 = rankVal[0];
-        for (w=1; w<=maxW; w++)
-        {
-            U32 current = nextRankVal;
-            nextRankVal += rankStats[w] << (w+rescale);
-            rankVal0[w] = current;
-        }
-        for (consumed = minBits; consumed <= memLog - minBits; consumed++)
-        {
-            U32* rankValPtr = rankVal[consumed];
-            for (w = 1; w <= maxW; w++)
-            {
-                rankValPtr[w] = rankVal0[w] >> consumed;
-            }
-        }
-    }
-
-    HUF_fillDTableX4(dt, memLog,
-                   sortedSymbol, sizeOfSort,
-                   rankStart0, rankVal, maxW,
-                   tableLog+1);
-
-    return iSize;
-}
-
-
-static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
-{
-    const size_t val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
-    memcpy(op, dt+val, 2);
-    BIT_skipBits(DStream, dt[val].nbBits);
-    return dt[val].length;
-}
-
-static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
-{
-    const size_t val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
-    memcpy(op, dt+val, 1);
-    if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
-    else
-    {
-        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8))
-        {
-            BIT_skipBits(DStream, dt[val].nbBits);
-            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
-                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);   /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
-        }
-    }
-    return 1;
-}
-
-
-#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
-    ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
-    if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
-        ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
-    if (MEM_64bits()) \
-        ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-static inline size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog)
-{
-    BYTE* const pStart = p;
-
-    /* up to 8 symbols at a time */
-    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd-7))
-    {
-        HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
-        HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
-        HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
-        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
-    }
-
-    /* closer to the end */
-    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-2))
-        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
-
-    while (p <= pEnd-2)
-        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */
-
-    if (p < pEnd)
-        p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
-
-    return p-pStart;
-}
-
-
-
-static size_t HUF_decompress4X4_usingDTable(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const U32* DTable)
-{
-    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
-
-    {
-        const BYTE* const istart = (const BYTE*) cSrc;
-        BYTE* const ostart = (BYTE*) dst;
-        BYTE* const oend = ostart + dstSize;
-
-        const void* ptr = DTable;
-        const HUF_DEltX4* const dt = ((const HUF_DEltX4*)ptr) +1;
-        const U32 dtLog = DTable[0];
-        size_t errorCode;
-
-        /* Init */
-        BIT_DStream_t bitD1;
-        BIT_DStream_t bitD2;
-        BIT_DStream_t bitD3;
-        BIT_DStream_t bitD4;
-        const size_t length1 = MEM_readLE16(istart);
-        const size_t length2 = MEM_readLE16(istart+2);
-        const size_t length3 = MEM_readLE16(istart+4);
-        size_t length4;
-        const BYTE* const istart1 = istart + 6;  /* jumpTable */
-        const BYTE* const istart2 = istart1 + length1;
-        const BYTE* const istart3 = istart2 + length2;
-        const BYTE* const istart4 = istart3 + length3;
-        const size_t segmentSize = (dstSize+3) / 4;
-        BYTE* const opStart2 = ostart + segmentSize;
-        BYTE* const opStart3 = opStart2 + segmentSize;
-        BYTE* const opStart4 = opStart3 + segmentSize;
-        BYTE* op1 = ostart;
-        BYTE* op2 = opStart2;
-        BYTE* op3 = opStart3;
-        BYTE* op4 = opStart4;
-        U32 endSignal;
-
-        length4 = cSrcSize - (length1 + length2 + length3 + 6);
-        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
-        errorCode = BIT_initDStream(&bitD1, istart1, length1);
-        if (HUF_isError(errorCode)) return errorCode;
-        errorCode = BIT_initDStream(&bitD2, istart2, length2);
-        if (HUF_isError(errorCode)) return errorCode;
-        errorCode = BIT_initDStream(&bitD3, istart3, length3);
-        if (HUF_isError(errorCode)) return errorCode;
-        errorCode = BIT_initDStream(&bitD4, istart4, length4);
-        if (HUF_isError(errorCode)) return errorCode;
-
-        /* 16-32 symbols per loop (4-8 symbols per stream) */
-        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
-        for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )
-        {
-            HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
-            HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
-            HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
-            HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
-            HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
-            HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
-            HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
-            HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
-            HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
-            HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
-            HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
-            HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
-            HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
-            HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
-            HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
-            HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
-
-            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
-        }
-
-        /* check corruption */
-        if (op1 > opStart2) return ERROR(corruption_detected);
-        if (op2 > opStart3) return ERROR(corruption_detected);
-        if (op3 > opStart4) return ERROR(corruption_detected);
-        /* note : op4 supposed already verified within main loop */
-
-        /* finish bitStreams one by one */
-        HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
-        HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
-        HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
-        HUF_decodeStreamX4(op4, &bitD4, oend,     dt, dtLog);
-
-        /* check */
-        endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
-        if (!endSignal) return ERROR(corruption_detected);
-
-        /* decoded size */
-        return dstSize;
-    }
-}
-
-
-static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_MAX_TABLELOG);
-    const BYTE* ip = (const BYTE*) cSrc;
-
-    size_t hSize = HUF_readDTableX4 (DTable, cSrc, cSrcSize);
-    if (HUF_isError(hSize)) return hSize;
-    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
-    ip += hSize;
-    cSrcSize -= hSize;
-
-    return HUF_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
-}
-
-
-/**********************************/
-/* quad-symbol decoding           */
-/**********************************/
-typedef struct { BYTE nbBits; BYTE nbBytes; } HUF_DDescX6;
-typedef union { BYTE byte[4]; U32 sequence; } HUF_DSeqX6;
-
-/* recursive, up to level 3; may benefit from <template>-like strategy to nest each level inline */
-static void HUF_fillDTableX6LevelN(HUF_DDescX6* DDescription, HUF_DSeqX6* DSequence, int sizeLog,
-                           const rankVal_t rankValOrigin, const U32 consumed, const int minWeight, const U32 maxWeight,
-                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, const U32* rankStart,
-                           const U32 nbBitsBaseline, HUF_DSeqX6 baseSeq, HUF_DDescX6 DDesc)
-{
-    const int scaleLog = nbBitsBaseline - sizeLog;   /* note : targetLog >= (nbBitsBaseline-1), hence scaleLog <= 1 */
-    const int minBits  = nbBitsBaseline - maxWeight;
-    const U32 level = DDesc.nbBytes;
-    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
-    U32 symbolStartPos, s;
-
-    /* local rankVal, will be modified */
-    memcpy(rankVal, rankValOrigin[consumed], sizeof(rankVal));
-
-    /* fill skipped values */
-    if (minWeight>1)
-    {
-        U32 i;
-        const U32 skipSize = rankVal[minWeight];
-        for (i = 0; i < skipSize; i++)
-        {
-            DSequence[i] = baseSeq;
-            DDescription[i] = DDesc;
-        }
-    }
-
-    /* fill DTable */
-    DDesc.nbBytes++;
-    symbolStartPos = rankStart[minWeight];
-    for (s=symbolStartPos; s<sortedListSize; s++)
-    {
-        const BYTE symbol = sortedSymbols[s].symbol;
-        const U32  weight = sortedSymbols[s].weight;   /* >= 1 (sorted) */
-        const int  nbBits = nbBitsBaseline - weight;   /* >= 1 (by construction) */
-        const int  totalBits = consumed+nbBits;
-        const U32  start  = rankVal[weight];
-        const U32  length = 1 << (sizeLog-nbBits);
-        baseSeq.byte[level] = symbol;
-        DDesc.nbBits = (BYTE)totalBits;
-
-        if ((level<3) && (sizeLog-totalBits >= minBits))   /* enough room for another symbol */
-        {
-            int nextMinWeight = totalBits + scaleLog;
-            if (nextMinWeight < 1) nextMinWeight = 1;
-            HUF_fillDTableX6LevelN(DDescription+start, DSequence+start, sizeLog-nbBits,
-                           rankValOrigin, totalBits, nextMinWeight, maxWeight,
-                           sortedSymbols, sortedListSize, rankStart,
-                           nbBitsBaseline, baseSeq, DDesc);   /* recursive (max : level 3) */
-        }
-        else
-        {
-            U32 i;
-            const U32 end = start + length;
-            for (i = start; i < end; i++)
-            {
-                DDescription[i] = DDesc;
-                DSequence[i] = baseSeq;
-            }
-        }
-        rankVal[weight] += length;
-    }
-}
-
-
-/* note : same preparation as X4 */
-static size_t HUF_readDTableX6 (U32* DTable, const void* src, size_t srcSize)
-{
-    BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1];
-    sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1];
-    U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 };
-    U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 };
-    U32* const rankStart = rankStart0+1;
-    U32 tableLog, maxW, sizeOfSort, nbSymbols;
-    rankVal_t rankVal;
-    const U32 memLog = DTable[0];
-    const BYTE* ip = (const BYTE*) src;
-    size_t iSize = ip[0];
-
-    if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
-    //memset(weightList, 0, sizeof(weightList));   /* is not necessary, even though some analyzer complain ... */
-
-    iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
-    if (HUF_isError(iSize)) return iSize;
-
-    /* check result */
-    if (tableLog > memLog) return ERROR(tableLog_tooLarge);   /* DTable is too small */
-
-    /* find maxWeight */
-    for (maxW = tableLog; rankStats[maxW]==0; maxW--)
-        { if (!maxW) return ERROR(GENERIC); }  /* necessarily finds a solution before maxW==0 */
-
-
-    /* Get start index of each weight */
-    {
-        U32 w, nextRankStart = 0;
-        for (w=1; w<=maxW; w++)
-        {
-            U32 current = nextRankStart;
-            nextRankStart += rankStats[w];
-            rankStart[w] = current;
-        }
-        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/
-        sizeOfSort = nextRankStart;
-    }
-
-    /* sort symbols by weight */
-    {
-        U32 s;
-        for (s=0; s<nbSymbols; s++)
-        {
-            U32 w = weightList[s];
-            U32 r = rankStart[w]++;
-            sortedSymbol[r].symbol = (BYTE)s;
-            sortedSymbol[r].weight = (BYTE)w;
-        }
-        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */
-    }
-
-    /* Build rankVal */
-    {
-        const U32 minBits = tableLog+1 - maxW;
-        U32 nextRankVal = 0;
-        U32 w, consumed;
-        const int rescale = (memLog-tableLog) - 1;   /* tableLog <= memLog */
-        U32* rankVal0 = rankVal[0];
-        for (w=1; w<=maxW; w++)
-        {
-            U32 current = nextRankVal;
-            nextRankVal += rankStats[w] << (w+rescale);
-            rankVal0[w] = current;
-        }
-        for (consumed = minBits; consumed <= memLog - minBits; consumed++)
-        {
-            U32* rankValPtr = rankVal[consumed];
-            for (w = 1; w <= maxW; w++)
-            {
-                rankValPtr[w] = rankVal0[w] >> consumed;
-            }
-        }
-    }
-
-
-    /* fill tables */
-    {
-        void* ptr = DTable+1;
-        HUF_DDescX6* DDescription = (HUF_DDescX6*)(ptr);
-        void* dSeqStart = DTable + 1 + ((size_t)1<<(memLog-1));
-        HUF_DSeqX6* DSequence = (HUF_DSeqX6*)(dSeqStart);
-        HUF_DSeqX6 DSeq;
-        HUF_DDescX6 DDesc;
-        DSeq.sequence = 0;
-        DDesc.nbBits = 0;
-        DDesc.nbBytes = 0;
-        HUF_fillDTableX6LevelN(DDescription, DSequence, memLog,
-                       (const U32 (*)[HUF_ABSOLUTEMAX_TABLELOG + 1])rankVal, 0, 1, maxW,
-                       sortedSymbol, sizeOfSort, rankStart0,
-                       tableLog+1, DSeq, DDesc);
-    }
-
-    return iSize;
-}
-
-
-static U32 HUF_decodeSymbolX6(void* op, BIT_DStream_t* DStream, const HUF_DDescX6* dd, const HUF_DSeqX6* ds, const U32 dtLog)
-{
-    const size_t val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
-    memcpy(op, ds+val, sizeof(HUF_DSeqX6));
-    BIT_skipBits(DStream, dd[val].nbBits);
-    return dd[val].nbBytes;
-}
-
-static U32 HUF_decodeLastSymbolsX6(void* op, const U32 maxL, BIT_DStream_t* DStream,
-                                  const HUF_DDescX6* dd, const HUF_DSeqX6* ds, const U32 dtLog)
-{
-    const size_t val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
-    U32 length = dd[val].nbBytes;
-    if (length <= maxL)
-    {
-        memcpy(op, ds+val, length);
-        BIT_skipBits(DStream, dd[val].nbBits);
-        return length;
-    }
-    memcpy(op, ds+val, maxL);
-    if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8))
-    {
-        BIT_skipBits(DStream, dd[val].nbBits);
-        if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
-            DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);   /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
-    }
-    return maxL;
-}
-
-
-#define HUF_DECODE_SYMBOLX6_0(ptr, DStreamPtr) \
-    ptr += HUF_decodeSymbolX6(ptr, DStreamPtr, dd, ds, dtLog)
-
-#define HUF_DECODE_SYMBOLX6_1(ptr, DStreamPtr) \
-    if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
-        HUF_DECODE_SYMBOLX6_0(ptr, DStreamPtr)
-
-#define HUF_DECODE_SYMBOLX6_2(ptr, DStreamPtr) \
-    if (MEM_64bits()) \
-        HUF_DECODE_SYMBOLX6_0(ptr, DStreamPtr)
-
-static inline size_t HUF_decodeStreamX6(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const U32* DTable, const U32 dtLog)
-{
-    const void* ddPtr = DTable+1;
-    const HUF_DDescX6* dd = (const HUF_DDescX6*)(ddPtr);
-    const void* dsPtr = DTable + 1 + ((size_t)1<<(dtLog-1));
-    const HUF_DSeqX6* ds = (const HUF_DSeqX6*)(dsPtr);
-    BYTE* const pStart = p;
-
-    /* up to 16 symbols at a time */
-    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-16))
-    {
-        HUF_DECODE_SYMBOLX6_2(p, bitDPtr);
-        HUF_DECODE_SYMBOLX6_1(p, bitDPtr);
-        HUF_DECODE_SYMBOLX6_2(p, bitDPtr);
-        HUF_DECODE_SYMBOLX6_0(p, bitDPtr);
-    }
-
-    /* closer to the end, up to 4 symbols at a time */
-    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4))
-        HUF_DECODE_SYMBOLX6_0(p, bitDPtr);
-
-    while (p <= pEnd-4)
-        HUF_DECODE_SYMBOLX6_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */
-
-    while (p < pEnd)
-        p += HUF_decodeLastSymbolsX6(p, (U32)(pEnd-p), bitDPtr, dd, ds, dtLog);
-
-    return p-pStart;
-}
-
-
-
-static size_t HUF_decompress4X6_usingDTable(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const U32* DTable)
-{
-    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
-
-    {
-        const BYTE* const istart = (const BYTE*) cSrc;
-        BYTE* const ostart = (BYTE*) dst;
-        BYTE* const oend = ostart + dstSize;
-
-        const U32 dtLog = DTable[0];
-        const void* ddPtr = DTable+1;
-        const HUF_DDescX6* dd = (const HUF_DDescX6*)(ddPtr);
-        const void* dsPtr = DTable + 1 + ((size_t)1<<(dtLog-1));
-        const HUF_DSeqX6* ds = (const HUF_DSeqX6*)(dsPtr);
-        size_t errorCode;
-
-        /* Init */
-        BIT_DStream_t bitD1;
-        BIT_DStream_t bitD2;
-        BIT_DStream_t bitD3;
-        BIT_DStream_t bitD4;
-        const size_t length1 = MEM_readLE16(istart);
-        const size_t length2 = MEM_readLE16(istart+2);
-        const size_t length3 = MEM_readLE16(istart+4);
-        size_t length4;
-        const BYTE* const istart1 = istart + 6;  /* jumpTable */
-        const BYTE* const istart2 = istart1 + length1;
-        const BYTE* const istart3 = istart2 + length2;
-        const BYTE* const istart4 = istart3 + length3;
-        const size_t segmentSize = (dstSize+3) / 4;
-        BYTE* const opStart2 = ostart + segmentSize;
-        BYTE* const opStart3 = opStart2 + segmentSize;
-        BYTE* const opStart4 = opStart3 + segmentSize;
-        BYTE* op1 = ostart;
-        BYTE* op2 = opStart2;
-        BYTE* op3 = opStart3;
-        BYTE* op4 = opStart4;
-        U32 endSignal;
-
-        length4 = cSrcSize - (length1 + length2 + length3 + 6);
-        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
-        errorCode = BIT_initDStream(&bitD1, istart1, length1);
-        if (HUF_isError(errorCode)) return errorCode;
-        errorCode = BIT_initDStream(&bitD2, istart2, length2);
-        if (HUF_isError(errorCode)) return errorCode;
-        errorCode = BIT_initDStream(&bitD3, istart3, length3);
-        if (HUF_isError(errorCode)) return errorCode;
-        errorCode = BIT_initDStream(&bitD4, istart4, length4);
-        if (HUF_isError(errorCode)) return errorCode;
-
-        /* 16-64 symbols per loop (4-16 symbols per stream) */
-        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
-        for ( ; (op3 <= opStart4) && (endSignal==BIT_DStream_unfinished) && (op4<=(oend-16)) ; )
-        {
-            HUF_DECODE_SYMBOLX6_2(op1, &bitD1);
-            HUF_DECODE_SYMBOLX6_2(op2, &bitD2);
-            HUF_DECODE_SYMBOLX6_2(op3, &bitD3);
-            HUF_DECODE_SYMBOLX6_2(op4, &bitD4);
-            HUF_DECODE_SYMBOLX6_1(op1, &bitD1);
-            HUF_DECODE_SYMBOLX6_1(op2, &bitD2);
-            HUF_DECODE_SYMBOLX6_1(op3, &bitD3);
-            HUF_DECODE_SYMBOLX6_1(op4, &bitD4);
-            HUF_DECODE_SYMBOLX6_2(op1, &bitD1);
-            HUF_DECODE_SYMBOLX6_2(op2, &bitD2);
-            HUF_DECODE_SYMBOLX6_2(op3, &bitD3);
-            HUF_DECODE_SYMBOLX6_2(op4, &bitD4);
-            HUF_DECODE_SYMBOLX6_0(op1, &bitD1);
-            HUF_DECODE_SYMBOLX6_0(op2, &bitD2);
-            HUF_DECODE_SYMBOLX6_0(op3, &bitD3);
-            HUF_DECODE_SYMBOLX6_0(op4, &bitD4);
-
-            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
-        }
-
-        /* check corruption */
-        if (op1 > opStart2) return ERROR(corruption_detected);
-        if (op2 > opStart3) return ERROR(corruption_detected);
-        if (op3 > opStart4) return ERROR(corruption_detected);
-        /* note : op4 supposed already verified within main loop */
-
-        /* finish bitStreams one by one */
-        HUF_decodeStreamX6(op1, &bitD1, opStart2, DTable, dtLog);
-        HUF_decodeStreamX6(op2, &bitD2, opStart3, DTable, dtLog);
-        HUF_decodeStreamX6(op3, &bitD3, opStart4, DTable, dtLog);
-        HUF_decodeStreamX6(op4, &bitD4, oend,     DTable, dtLog);
-
-        /* check */
-        endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
-        if (!endSignal) return ERROR(corruption_detected);
-
-        /* decoded size */
-        return dstSize;
-    }
-}
-
-
-static size_t HUF_decompress4X6 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    HUF_CREATE_STATIC_DTABLEX6(DTable, HUF_MAX_TABLELOG);
-    const BYTE* ip = (const BYTE*) cSrc;
-
-    size_t hSize = HUF_readDTableX6 (DTable, cSrc, cSrcSize);
-    if (HUF_isError(hSize)) return hSize;
-    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
-    ip += hSize;
-    cSrcSize -= hSize;
-
-    return HUF_decompress4X6_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
-}
-
-
-/**********************************/
-/* Generic decompression selector */
-/**********************************/
-
-typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
-static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
-{
-    /* single, double, quad */
-    {{0,0}, {1,1}, {2,2}},  /* Q==0 : impossible */
-    {{0,0}, {1,1}, {2,2}},  /* Q==1 : impossible */
-    {{  38,130}, {1313, 74}, {2151, 38}},   /* Q == 2 : 12-18% */
-    {{ 448,128}, {1353, 74}, {2238, 41}},   /* Q == 3 : 18-25% */
-    {{ 556,128}, {1353, 74}, {2238, 47}},   /* Q == 4 : 25-32% */
-    {{ 714,128}, {1418, 74}, {2436, 53}},   /* Q == 5 : 32-38% */
-    {{ 883,128}, {1437, 74}, {2464, 61}},   /* Q == 6 : 38-44% */
-    {{ 897,128}, {1515, 75}, {2622, 68}},   /* Q == 7 : 44-50% */
-    {{ 926,128}, {1613, 75}, {2730, 75}},   /* Q == 8 : 50-56% */
-    {{ 947,128}, {1729, 77}, {3359, 77}},   /* Q == 9 : 56-62% */
-    {{1107,128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */
-    {{1177,128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */
-    {{1242,128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */
-    {{1349,128}, {2644,106}, {5260,106}},   /* Q ==13 : 81-87% */
-    {{1455,128}, {2422,124}, {4174,124}},   /* Q ==14 : 87-93% */
-    {{ 722,128}, {1891,145}, {1936,146}},   /* Q ==15 : 93-99% */
-};
-
-typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
-
-static size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    static const decompressionAlgo decompress[3] = { HUF_decompress4X2, HUF_decompress4X4, HUF_decompress4X6 };
-    /* estimate decompression time */
-    U32 Q;
-    const U32 D256 = (U32)(dstSize >> 8);
-    U32 Dtime[3];
-    U32 algoNb = 0;
-    int n;
-
-    /* validation checks */
-    if (dstSize == 0) return ERROR(dstSize_tooSmall);
-    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */
-    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */
-    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
-
-    /* decoder timing evaluation */
-    Q = (U32)(cSrcSize * 16 / dstSize);   /* Q < 16 since dstSize > cSrcSize */
-    for (n=0; n<3; n++)
-        Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256);
-
-    Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */
-
-    if (Dtime[1] < Dtime[0]) algoNb = 1;
-    if (Dtime[2] < Dtime[algoNb]) algoNb = 2;
-
-    return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
-
-    //return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize);   /* multi-streams single-symbol decoding */
-    //return HUF_decompress4X4(dst, dstSize, cSrc, cSrcSize);   /* multi-streams double-symbols decoding */
-    //return HUF_decompress4X6(dst, dstSize, cSrc, cSrcSize);   /* multi-streams quad-symbols decoding */
-}
-/*
-    zstd - standard compression library
-    Copyright (C) 2014-2015, Yann Collet.
-
-    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are
-    met:
-    * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-    copyright notice, this list of conditions and the following disclaimer
-    in the documentation and/or other materials provided with the
-    distribution.
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - zstd source repository : https://github.com/Cyan4973/zstd
-    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
-*/
-
-/* ***************************************************************
-*  Tuning parameters
-*****************************************************************/
-/*!
-*  MEMORY_USAGE :
-*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
-*  Increasing memory usage improves compression ratio
-*  Reduced memory usage can improve speed, due to cache effect
-*/
-#define ZSTD_MEMORY_USAGE 17
-
-/*!
- * HEAPMODE :
- * Select how default compression functions will allocate memory for their hash table,
- * in memory stack (0, fastest), or in memory heap (1, requires malloc())
- * Note that compression context is fairly large, as a consequence heap memory is recommended.
- */
-#ifndef ZSTD_HEAPMODE
-#  define ZSTD_HEAPMODE 1
-#endif /* ZSTD_HEAPMODE */
-
-/*!
-*  LEGACY_SUPPORT :
-*  decompressor can decode older formats (starting from Zstd 0.1+)
-*/
-#ifndef ZSTD_LEGACY_SUPPORT
-#  define ZSTD_LEGACY_SUPPORT 1
-#endif
-
-
-/* *******************************************************
-*  Includes
-*********************************************************/
-#include <stdlib.h>      /* calloc */
-#include <string.h>      /* memcpy, memmove */
-#include <stdio.h>       /* debug : printf */
-
-
-/* *******************************************************
-*  Compiler specifics
-*********************************************************/
-#ifdef __AVX2__
-#  include <immintrin.h>   /* AVX2 intrinsics */
-#endif
-
-#ifdef _MSC_VER    /* Visual Studio */
-#  include <intrin.h>                    /* For Visual 2005 */
-#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
-#  pragma warning(disable : 4324)        /* disable: C4324: padded structure */
-#endif
-
-
-/* *******************************************************
-*  Constants
-*********************************************************/
-#define HASH_LOG (ZSTD_MEMORY_USAGE - 2)
-#define HASH_TABLESIZE (1 << HASH_LOG)
-#define HASH_MASK (HASH_TABLESIZE - 1)
-
-#define KNUTH 2654435761
-
-#define BIT7 128
-#define BIT6  64
-#define BIT5  32
-#define BIT4  16
-#define BIT1   2
-#define BIT0   1
-
-#define KB *(1 <<10)
-#define MB *(1 <<20)
-#define GB *(1U<<30)
-
-#define BLOCKSIZE (128 KB)                 /* define, for static allocation */
-#define MIN_SEQUENCES_SIZE (2 /*seqNb*/ + 2 /*dumps*/ + 3 /*seqTables*/ + 1 /*bitStream*/)
-#define MIN_CBLOCK_SIZE (3 /*litCSize*/ + MIN_SEQUENCES_SIZE)
-#define IS_RAW BIT0
-#define IS_RLE BIT1
-
-#define WORKPLACESIZE (BLOCKSIZE*3)
-#define MINMATCH 4
-#define MLbits   7
-#define LLbits   6
-#define Offbits  5
-#define MaxML  ((1<<MLbits )-1)
-#define MaxLL  ((1<<LLbits )-1)
-#define MaxOff   31
-#define LitFSELog  11
-#define MLFSELog   10
-#define LLFSELog   10
-#define OffFSELog   9
-#define MAX(a,b) ((a)<(b)?(b):(a))
-#define MaxSeq MAX(MaxLL, MaxML)
-
-#define LITERAL_NOENTROPY 63
-#define COMMAND_NOENTROPY 7   /* to remove */
-
-#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
-
-static const size_t ZSTD_blockHeaderSize = 3;
-static const size_t ZSTD_frameHeaderSize = 4;
-
-
-/* *******************************************************
-*  Memory operations
-**********************************************************/
-static void   ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
-
-static void   ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
-
-#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
-
-/*! ZSTD_wildcopy : custom version of memcpy(), can copy up to 7-8 bytes too many */
-static void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
-{
-    const BYTE* ip = (const BYTE*)src;
-    BYTE* op = (BYTE*)dst;
-    BYTE* const oend = op + length;
-    do COPY8(op, ip) while (op < oend);
-}
-
-
-/* **************************************
-*  Local structures
-****************************************/
-typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
-
-typedef struct
-{
-    blockType_t blockType;
-    U32 origSize;
-} blockProperties_t;
-
-typedef struct {
-    void* buffer;
-    U32*  offsetStart;
-    U32*  offset;
-    BYTE* offCodeStart;
-    BYTE* offCode;
-    BYTE* litStart;
-    BYTE* lit;
-    BYTE* litLengthStart;
-    BYTE* litLength;
-    BYTE* matchLengthStart;
-    BYTE* matchLength;
-    BYTE* dumpsStart;
-    BYTE* dumps;
-} seqStore_t;
-
-
-/* *************************************
-*  Error Management
-***************************************/
-/*! ZSTD_isError
-*   tells if a return value is an error code */
-static unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
-
-
-
-/* *************************************************************
-*   Decompression section
-***************************************************************/
-struct ZSTD_DCtx_s
-{
-    U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
-    U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
-    U32 MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
-    void* previousDstEnd;
-    void* base;
-    size_t expected;
-    blockType_t bType;
-    U32 phase;
-    const BYTE* litPtr;
-    size_t litSize;
-    BYTE litBuffer[BLOCKSIZE + 8 /* margin for wildcopy */];
-};   /* typedef'd to ZSTD_Dctx within "zstd_static.h" */
-
-
-static size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
-{
-    const BYTE* const in = (const BYTE* const)src;
-    BYTE headerFlags;
-    U32 cSize;
-
-    if (srcSize < 3) return ERROR(srcSize_wrong);
-
-    headerFlags = *in;
-    cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
-
-    bpPtr->blockType = (blockType_t)(headerFlags >> 6);
-    bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
-
-    if (bpPtr->blockType == bt_end) return 0;
-    if (bpPtr->blockType == bt_rle) return 1;
-    return cSize;
-}
-
-static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
-    if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
-    memcpy(dst, src, srcSize);
-    return srcSize;
-}
-
-
-/** ZSTD_decompressLiterals
-    @return : nb of bytes read from src, or an error code*/
-static size_t ZSTD_decompressLiterals(void* dst, size_t* maxDstSizePtr,
-                                const void* src, size_t srcSize)
-{
-    const BYTE* ip = (const BYTE*)src;
-
-    const size_t litSize = (MEM_readLE32(src) & 0x1FFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
-    const size_t litCSize = (MEM_readLE32(ip+2) & 0xFFFFFF) >> 5;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
-
-    if (litSize > *maxDstSizePtr) return ERROR(corruption_detected);
-    if (litCSize + 5 > srcSize) return ERROR(corruption_detected);
-
-    if (HUF_isError(HUF_decompress(dst, litSize, ip+5, litCSize))) return ERROR(corruption_detected);
-
-    *maxDstSizePtr = litSize;
-    return litCSize + 5;
-}
-
-
-/** ZSTD_decodeLiteralsBlock
-    @return : nb of bytes read from src (< srcSize )*/
-static size_t ZSTD_decodeLiteralsBlock(void* ctx,
-                          const void* src, size_t srcSize)
-{
-    ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx;
-    const BYTE* const istart = (const BYTE* const)src;
-
-    /* any compressed block with literals segment must be at least this size */
-    if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
-
-    switch(*istart & 3)
-    {
-    default:
-    case 0:
-        {
-            size_t litSize = BLOCKSIZE;
-            const size_t readSize = ZSTD_decompressLiterals(dctx->litBuffer, &litSize, src, srcSize);
-            dctx->litPtr = dctx->litBuffer;
-            dctx->litSize = litSize;
-            memset(dctx->litBuffer + dctx->litSize, 0, 8);
-            return readSize;   /* works if it's an error too */
-        }
-    case IS_RAW:
-        {
-            const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
-            if (litSize > srcSize-11)   /* risk of reading too far with wildcopy */
-            {
-                if (litSize > srcSize-3) return ERROR(corruption_detected);
-                memcpy(dctx->litBuffer, istart, litSize);
-                dctx->litPtr = dctx->litBuffer;
-                dctx->litSize = litSize;
-                memset(dctx->litBuffer + dctx->litSize, 0, 8);
-                return litSize+3;
-            }
-            /* direct reference into compressed stream */
-            dctx->litPtr = istart+3;
-            dctx->litSize = litSize;
-            return litSize+3;
-        }
-    case IS_RLE:
-        {
-            const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
-            if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
-            memset(dctx->litBuffer, istart[3], litSize + 8);
-            dctx->litPtr = dctx->litBuffer;
-            dctx->litSize = litSize;
-            return 4;
-        }
-    }
-}
-
-
-static size_t ZSTD_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,
-                         FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb,
-                         const void* src, size_t srcSize)
-{
-    const BYTE* const istart = (const BYTE* const)src;
-    const BYTE* ip = istart;
-    const BYTE* const iend = istart + srcSize;
-    U32 LLtype, Offtype, MLtype;
-    U32 LLlog, Offlog, MLlog;
-    size_t dumpsLength;
-
-    /* check */
-    if (srcSize < 5) return ERROR(srcSize_wrong);
-
-    /* SeqHead */
-    *nbSeq = MEM_readLE16(ip); ip+=2;
-    LLtype  = *ip >> 6;
-    Offtype = (*ip >> 4) & 3;
-    MLtype  = (*ip >> 2) & 3;
-    if (*ip & 2)
-    {
-        dumpsLength  = ip[2];
-        dumpsLength += ip[1] << 8;
-        ip += 3;
-    }
-    else
-    {
-        dumpsLength  = ip[1];
-        dumpsLength += (ip[0] & 1) << 8;
-        ip += 2;
-    }
-    *dumpsPtr = ip;
-    ip += dumpsLength;
-    *dumpsLengthPtr = dumpsLength;
-
-    /* check */
-    if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
-
-    /* sequences */
-    {
-        S16 norm[MaxML+1];    /* assumption : MaxML >= MaxLL and MaxOff */
-        size_t headerSize;
-
-        /* Build DTables */
-        switch(LLtype)
-        {
-        case bt_rle :
-            LLlog = 0;
-            FSE_buildDTable_rle(DTableLL, *ip++); break;
-        case bt_raw :
-            LLlog = LLbits;
-            FSE_buildDTable_raw(DTableLL, LLbits); break;
-        default :
-            {   U32 max = MaxLL;
-                headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip);
-                if (FSE_isError(headerSize)) return ERROR(GENERIC);
-                if (LLlog > LLFSELog) return ERROR(corruption_detected);
-                ip += headerSize;
-                FSE_buildDTable(DTableLL, norm, max, LLlog);
-        }   }
-
-        switch(Offtype)
-        {
-        case bt_rle :
-            Offlog = 0;
-            if (ip > iend-2) return ERROR(srcSize_wrong);   /* min : "raw", hence no header, but at least xxLog bits */
-            FSE_buildDTable_rle(DTableOffb, *ip++ & MaxOff); /* if *ip > MaxOff, data is corrupted */
-            break;
-        case bt_raw :
-            Offlog = Offbits;
-            FSE_buildDTable_raw(DTableOffb, Offbits); break;
-        default :
-            {   U32 max = MaxOff;
-                headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip);
-                if (FSE_isError(headerSize)) return ERROR(GENERIC);
-                if (Offlog > OffFSELog) return ERROR(corruption_detected);
-                ip += headerSize;
-                FSE_buildDTable(DTableOffb, norm, max, Offlog);
-        }   }
-
-        switch(MLtype)
-        {
-        case bt_rle :
-            MLlog = 0;
-            if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
-            FSE_buildDTable_rle(DTableML, *ip++); break;
-        case bt_raw :
-            MLlog = MLbits;
-            FSE_buildDTable_raw(DTableML, MLbits); break;
-        default :
-            {   U32 max = MaxML;
-                headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip);
-                if (FSE_isError(headerSize)) return ERROR(GENERIC);
-                if (MLlog > MLFSELog) return ERROR(corruption_detected);
-                ip += headerSize;
-                FSE_buildDTable(DTableML, norm, max, MLlog);
-    }   }   }
-
-    return ip-istart;
-}
-
-
-typedef struct {
-    size_t litLength;
-    size_t offset;
-    size_t matchLength;
-} seq_t;
-
-typedef struct {
-    BIT_DStream_t DStream;
-    FSE_DState_t stateLL;
-    FSE_DState_t stateOffb;
-    FSE_DState_t stateML;
-    size_t prevOffset;
-    const BYTE* dumps;
-    const BYTE* dumpsEnd;
-} seqState_t;
-
-
-static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState)
-{
-    size_t litLength;
-    size_t prevOffset;
-    size_t offset;
-    size_t matchLength;
-    const BYTE* dumps = seqState->dumps;
-    const BYTE* const de = seqState->dumpsEnd;
-
-    /* Literal length */
-    litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));
-    prevOffset = litLength ? seq->offset : seqState->prevOffset;
-    seqState->prevOffset = seq->offset;
-    if (litLength == MaxLL)
-    {
-        const U32 add = dumps<de ? *dumps++ : 0;
-        if (add < 255) litLength += add;
-        else if (dumps + 3 <= de)
-        {
-            litLength = MEM_readLE24(dumps);
-            dumps += 3;
-        }
-        if (dumps >= de) dumps = de-1;   /* late correction, to avoid read overflow (data is now corrupted anyway) */
-    }
-
-    /* Offset */
-    {
-        static const size_t offsetPrefix[MaxOff+1] = {  /* note : size_t faster than U32 */
-                1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256,
-                512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144,
-                524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 };
-        U32 offsetCode, nbBits;
-        offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream));   /* <= maxOff, by table construction */
-        if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
-        nbBits = offsetCode - 1;
-        if (offsetCode==0) nbBits = 0;   /* cmove */
-        offset = offsetPrefix[offsetCode] + BIT_readBits(&(seqState->DStream), nbBits);
-        if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
-        if (offsetCode==0) offset = prevOffset;   /* cmove */
-    }
-
-    /* MatchLength */
-    matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
-    if (matchLength == MaxML)
-    {
-        const U32 add = dumps<de ? *dumps++ : 0;
-        if (add < 255) matchLength += add;
-        else if (dumps + 3 <= de)
-        {
-            matchLength = MEM_readLE24(dumps);
-            dumps += 3;
-        }
-        if (dumps >= de) dumps = de-1;   /* late correction, to avoid read overflow (data is now corrupted anyway) */
-    }
-    matchLength += MINMATCH;
-
-    /* save result */
-    seq->litLength = litLength;
-    seq->offset = offset;
-    seq->matchLength = matchLength;
-    seqState->dumps = dumps;
-}
-
-
-static size_t ZSTD_execSequence(BYTE* op,
-                                seq_t sequence,
-                                const BYTE** litPtr, const BYTE* const litLimit,
-                                BYTE* const base, BYTE* const oend)
-{
-    static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};   /* added */
-    static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11};   /* subtracted */
-    const BYTE* const ostart = op;
-    BYTE* const oLitEnd = op + sequence.litLength;
-    BYTE* const oMatchEnd = op + sequence.litLength + sequence.matchLength;   /* risk : address space overflow (32-bits) */
-    BYTE* const oend_8 = oend-8;
-    const BYTE* const litEnd = *litPtr + sequence.litLength;
-
-    /* checks */
-    if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall);   /* last match must start at a minimum distance of 8 from oend */
-    if (oMatchEnd > oend) return ERROR(dstSize_tooSmall);   /* overwrite beyond dst buffer */
-    if (litEnd > litLimit) return ERROR(corruption_detected);   /* overRead beyond lit buffer */
-
-    /* copy Literals */
-    ZSTD_wildcopy(op, *litPtr, sequence.litLength);   /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
-    op = oLitEnd;
-    *litPtr = litEnd;   /* update for next sequence */
-
-    /* copy Match */
-    {
-        const BYTE* match = op - sequence.offset;
-
-        /* check */
-        if (sequence.offset > (size_t)op) return ERROR(corruption_detected);   /* address space overflow test (this test seems kept by clang optimizer) */
-        //if (match > op) return ERROR(corruption_detected);   /* address space overflow test (is clang optimizer removing this test ?) */
-        if (match < base) return ERROR(corruption_detected);
-
-        /* close range match, overlap */
-        if (sequence.offset < 8)
-        {
-            const int dec64 = dec64table[sequence.offset];
-            op[0] = match[0];
-            op[1] = match[1];
-            op[2] = match[2];
-            op[3] = match[3];
-            match += dec32table[sequence.offset];
-            ZSTD_copy4(op+4, match);
-            match -= dec64;
-        }
-        else
-        {
-            ZSTD_copy8(op, match);
-        }
-        op += 8; match += 8;
-
-        if (oMatchEnd > oend-(16-MINMATCH))
-        {
-            if (op < oend_8)
-            {
-                ZSTD_wildcopy(op, match, oend_8 - op);
-                match += oend_8 - op;
-                op = oend_8;
-            }
-            while (op < oMatchEnd) *op++ = *match++;
-        }
-        else
-        {
-            ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8 */
-        }
-    }
-
-    return oMatchEnd - ostart;
-}
-
-static size_t ZSTD_decompressSequences(
-                               void* ctx,
-                               void* dst, size_t maxDstSize,
-                         const void* seqStart, size_t seqSize)
-{
-    ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx;
-    const BYTE* ip = (const BYTE*)seqStart;
-    const BYTE* const iend = ip + seqSize;
-    BYTE* const ostart = (BYTE* const)dst;
-    BYTE* op = ostart;
-    BYTE* const oend = ostart + maxDstSize;
-    size_t errorCode, dumpsLength;
-    const BYTE* litPtr = dctx->litPtr;
-    const BYTE* const litEnd = litPtr + dctx->litSize;
-    int nbSeq;
-    const BYTE* dumps;
-    U32* DTableLL = dctx->LLTable;
-    U32* DTableML = dctx->MLTable;
-    U32* DTableOffb = dctx->OffTable;
-    BYTE* const base = (BYTE*) (dctx->base);
-
-    /* Build Decoding Tables */
-    errorCode = ZSTD_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,
-                                      DTableLL, DTableML, DTableOffb,
-                                      ip, iend-ip);
-    if (ZSTD_isError(errorCode)) return errorCode;
-    ip += errorCode;
-
-    /* Regen sequences */
-    {
-        seq_t sequence;
-        seqState_t seqState;
-
-        memset(&sequence, 0, sizeof(sequence));
-        seqState.dumps = dumps;
-        seqState.dumpsEnd = dumps + dumpsLength;
-        seqState.prevOffset = 1;
-        errorCode = BIT_initDStream(&(seqState.DStream), ip, iend-ip);
-        if (ERR_isError(errorCode)) return ERROR(corruption_detected);
-        FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
-        FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
-        FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
-
-        for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (nbSeq>0) ; )
-        {
-            size_t oneSeqSize;
-            nbSeq--;
-            ZSTD_decodeSequence(&sequence, &seqState);
-            oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litEnd, base, oend);
-            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
-            op += oneSeqSize;
-        }
-
-        /* check if reached exact end */
-        if ( !BIT_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected);   /* requested too much : data is corrupted */
-        if (nbSeq<0) return ERROR(corruption_detected);   /* requested too many sequences : data is corrupted */
-
-        /* last literal segment */
-        {
-            size_t lastLLSize = litEnd - litPtr;
-            if (litPtr > litEnd) return ERROR(corruption_detected);
-            if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
-            if (op != litPtr) memmove(op, litPtr, lastLLSize);
-            op += lastLLSize;
-        }
-    }
-
-    return op-ostart;
-}
-
-
-static size_t ZSTD_decompressBlock(
-                            void* ctx,
-                            void* dst, size_t maxDstSize,
-                      const void* src, size_t srcSize)
-{
-    /* blockType == blockCompressed */
-    const BYTE* ip = (const BYTE*)src;
-
-    /* Decode literals sub-block */
-    size_t litCSize = ZSTD_decodeLiteralsBlock(ctx, src, srcSize);
-    if (ZSTD_isError(litCSize)) return litCSize;
-    ip += litCSize;
-    srcSize -= litCSize;
-
-    return ZSTD_decompressSequences(ctx, dst, maxDstSize, ip, srcSize);
-}
-
-
-static size_t ZSTD_decompressDCtx(void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
-    const BYTE* ip = (const BYTE*)src;
-    const BYTE* iend = ip + srcSize;
-    BYTE* const ostart = (BYTE* const)dst;
-    BYTE* op = ostart;
-    BYTE* const oend = ostart + maxDstSize;
-    size_t remainingSize = srcSize;
-    U32 magicNumber;
-    blockProperties_t blockProperties;
-
-    /* Frame Header */
-    if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
-    magicNumber = MEM_readLE32(src);
-    if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
-    ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
-
-    /* Loop on each block */
-    while (1)
-    {
-        size_t decodedSize=0;
-        size_t cBlockSize = ZSTD_getcBlockSize(ip, iend-ip, &blockProperties);
-        if (ZSTD_isError(cBlockSize)) return cBlockSize;
-
-        ip += ZSTD_blockHeaderSize;
-        remainingSize -= ZSTD_blockHeaderSize;
-        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
-
-        switch(blockProperties.blockType)
-        {
-        case bt_compressed:
-            decodedSize = ZSTD_decompressBlock(ctx, op, oend-op, ip, cBlockSize);
-            break;
-        case bt_raw :
-            decodedSize = ZSTD_copyUncompressedBlock(op, oend-op, ip, cBlockSize);
-            break;
-        case bt_rle :
-            return ERROR(GENERIC);   /* not yet supported */
-            break;
-        case bt_end :
-            /* end of frame */
-            if (remainingSize) return ERROR(srcSize_wrong);
-            break;
-        default:
-            return ERROR(GENERIC);   /* impossible */
-        }
-        if (cBlockSize == 0) break;   /* bt_end */
-
-        if (ZSTD_isError(decodedSize)) return decodedSize;
-        op += decodedSize;
-        ip += cBlockSize;
-        remainingSize -= cBlockSize;
-    }
-
-    return op-ostart;
-}
-
-static size_t ZSTD_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
-    ZSTD_DCtx ctx;
-    ctx.base = dst;
-    return ZSTD_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize);
-}
-
-/* ZSTD_errorFrameSizeInfoLegacy() :
-   assumes `cSize` and `dBound` are _not_ NULL */
-static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
-{
-    *cSize = ret;
-    *dBound = ZSTD_CONTENTSIZE_ERROR;
-}
-
-void ZSTDv02_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
-{
-    const BYTE* ip = (const BYTE*)src;
-    size_t remainingSize = srcSize;
-    size_t nbBlocks = 0;
-    U32 magicNumber;
-    blockProperties_t blockProperties;
-
-    /* Frame Header */
-    if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) {
-        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
-        return;
-    }
-    magicNumber = MEM_readLE32(src);
-    if (magicNumber != ZSTD_magicNumber) {
-        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
-        return;
-    }
-    ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
-
-    /* Loop on each block */
-    while (1)
-    {
-        size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
-        if (ZSTD_isError(cBlockSize)) {
-            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
-            return;
-        }
-
-        ip += ZSTD_blockHeaderSize;
-        remainingSize -= ZSTD_blockHeaderSize;
-        if (cBlockSize > remainingSize) {
-            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
-            return;
-        }
-
-        if (cBlockSize == 0) break;   /* bt_end */
-
-        ip += cBlockSize;
-        remainingSize -= cBlockSize;
-        nbBlocks++;
-    }
-
-    *cSize = ip - (const BYTE*)src;
-    *dBound = nbBlocks * BLOCKSIZE;
-}
-
-/*******************************
-*  Streaming Decompression API
-*******************************/
-
-static size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx)
-{
-    dctx->expected = ZSTD_frameHeaderSize;
-    dctx->phase = 0;
-    dctx->previousDstEnd = NULL;
-    dctx->base = NULL;
-    return 0;
-}
-
-static ZSTD_DCtx* ZSTD_createDCtx(void)
-{
-    ZSTD_DCtx* dctx = (ZSTD_DCtx*)malloc(sizeof(ZSTD_DCtx));
-    if (dctx==NULL) return NULL;
-    ZSTD_resetDCtx(dctx);
-    return dctx;
-}
-
-static size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
-{
-    free(dctx);
-    return 0;
-}
-
-static size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx)
-{
-    return dctx->expected;
-}
-
-static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
-    /* Sanity check */
-    if (srcSize != ctx->expected) return ERROR(srcSize_wrong);
-    if (dst != ctx->previousDstEnd)  /* not contiguous */
-        ctx->base = dst;
-
-    /* Decompress : frame header */
-    if (ctx->phase == 0)
-    {
-        /* Check frame magic header */
-        U32 magicNumber = MEM_readLE32(src);
-        if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
-        ctx->phase = 1;
-        ctx->expected = ZSTD_blockHeaderSize;
-        return 0;
-    }
-
-    /* Decompress : block header */
-    if (ctx->phase == 1)
-    {
-        blockProperties_t bp;
-        size_t blockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
-        if (ZSTD_isError(blockSize)) return blockSize;
-        if (bp.blockType == bt_end)
-        {
-            ctx->expected = 0;
-            ctx->phase = 0;
-        }
-        else
-        {
-            ctx->expected = blockSize;
-            ctx->bType = bp.blockType;
-            ctx->phase = 2;
-        }
-
-        return 0;
-    }
-
-    /* Decompress : block content */
-    {
-        size_t rSize;
-        switch(ctx->bType)
-        {
-        case bt_compressed:
-            rSize = ZSTD_decompressBlock(ctx, dst, maxDstSize, src, srcSize);
-            break;
-        case bt_raw :
-            rSize = ZSTD_copyUncompressedBlock(dst, maxDstSize, src, srcSize);
-            break;
-        case bt_rle :
-            return ERROR(GENERIC);   /* not yet handled */
-            break;
-        case bt_end :   /* should never happen (filtered at phase 1) */
-            rSize = 0;
-            break;
-        default:
-            return ERROR(GENERIC);
-        }
-        ctx->phase = 1;
-        ctx->expected = ZSTD_blockHeaderSize;
-        ctx->previousDstEnd = (void*)( ((char*)dst) + rSize);
-        return rSize;
-    }
-
-}
-
-
-/* wrapper layer */
-
-unsigned ZSTDv02_isError(size_t code)
-{
-    return ZSTD_isError(code);
-}
-
-size_t ZSTDv02_decompress( void* dst, size_t maxOriginalSize,
-                     const void* src, size_t compressedSize)
-{
-    return ZSTD_decompress(dst, maxOriginalSize, src, compressedSize);
-}
-
-ZSTDv02_Dctx* ZSTDv02_createDCtx(void)
-{
-    return (ZSTDv02_Dctx*)ZSTD_createDCtx();
-}
-
-size_t ZSTDv02_freeDCtx(ZSTDv02_Dctx* dctx)
-{
-    return ZSTD_freeDCtx((ZSTD_DCtx*)dctx);
-}
-
-size_t ZSTDv02_resetDCtx(ZSTDv02_Dctx* dctx)
-{
-    return ZSTD_resetDCtx((ZSTD_DCtx*)dctx);
-}
-
-size_t ZSTDv02_nextSrcSizeToDecompress(ZSTDv02_Dctx* dctx)
-{
-    return ZSTD_nextSrcSizeToDecompress((ZSTD_DCtx*)dctx);
-}
-
-size_t ZSTDv02_decompressContinue(ZSTDv02_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
-    return ZSTD_decompressContinue((ZSTD_DCtx*)dctx, dst, maxDstSize, src, srcSize);
-}
diff --git a/vendor/github.com/DataDog/zstd/zstd_v02.h b/vendor/github.com/DataDog/zstd/zstd_v02.h
deleted file mode 100644
index 9d7d8d9..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_v02.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef ZSTD_V02_H_4174539423
-#define ZSTD_V02_H_4174539423
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/* *************************************
-*  Includes
-***************************************/
-#include <stddef.h>   /* size_t */
-
-
-/* *************************************
-*  Simple one-step function
-***************************************/
-/**
-ZSTDv02_decompress() : decompress ZSTD frames compliant with v0.2.x format
-    compressedSize : is the exact source size
-    maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated.
-                      It must be equal or larger than originalSize, otherwise decompression will fail.
-    return : the number of bytes decompressed into destination buffer (originalSize)
-             or an errorCode if it fails (which can be tested using ZSTDv01_isError())
-*/
-size_t ZSTDv02_decompress( void* dst, size_t maxOriginalSize,
-                     const void* src, size_t compressedSize);
-
- /**
- ZSTDv02_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.2.x format
-     srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
-     cSize (output parameter)  : the number of bytes that would be read to decompress this frame
-                                 or an error code if it fails (which can be tested using ZSTDv01_isError())
-     dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
-                                 or ZSTD_CONTENTSIZE_ERROR if an error occurs
-
-    note : assumes `cSize` and `dBound` are _not_ NULL.
- */
-void ZSTDv02_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
-                                     size_t* cSize, unsigned long long* dBound);
-
-/**
-ZSTDv02_isError() : tells if the result of ZSTDv02_decompress() is an error
-*/
-unsigned ZSTDv02_isError(size_t code);
-
-
-/* *************************************
-*  Advanced functions
-***************************************/
-typedef struct ZSTDv02_Dctx_s ZSTDv02_Dctx;
-ZSTDv02_Dctx* ZSTDv02_createDCtx(void);
-size_t ZSTDv02_freeDCtx(ZSTDv02_Dctx* dctx);
-
-size_t ZSTDv02_decompressDCtx(void* ctx,
-                              void* dst, size_t maxOriginalSize,
-                        const void* src, size_t compressedSize);
-
-/* *************************************
-*  Streaming functions
-***************************************/
-size_t ZSTDv02_resetDCtx(ZSTDv02_Dctx* dctx);
-
-size_t ZSTDv02_nextSrcSizeToDecompress(ZSTDv02_Dctx* dctx);
-size_t ZSTDv02_decompressContinue(ZSTDv02_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
-/**
-  Use above functions alternatively.
-  ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
-  ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
-  Result is the number of bytes regenerated within 'dst'.
-  It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
-*/
-
-/* *************************************
-*  Prefix - version detection
-***************************************/
-#define ZSTDv02_magicNumber 0xFD2FB522   /* v0.2 */
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* ZSTD_V02_H_4174539423 */
diff --git a/vendor/github.com/DataDog/zstd/zstd_v03.c b/vendor/github.com/DataDog/zstd/zstd_v03.c
deleted file mode 100644
index 7a0e7c9..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_v03.c
+++ /dev/null
@@ -1,3155 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-
-#include <stddef.h>    /* size_t, ptrdiff_t */
-#include "zstd_v03.h"
-#include "error_private.h"
-
-
-/******************************************
-*  Compiler-specific
-******************************************/
-#if defined(_MSC_VER)   /* Visual Studio */
-#   include <stdlib.h>  /* _byteswap_ulong */
-#   include <intrin.h>  /* _byteswap_* */
-#endif
-
-
-
-/* ******************************************************************
-   mem.h
-   low-level memory access routines
-   Copyright (C) 2013-2015, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
-    - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-#ifndef MEM_H_MODULE
-#define MEM_H_MODULE
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/******************************************
-*  Includes
-******************************************/
-#include <stddef.h>    /* size_t, ptrdiff_t */
-#include <string.h>    /* memcpy */
-
-
-/******************************************
-*  Compiler-specific
-******************************************/
-#if defined(__GNUC__)
-#  define MEM_STATIC static __attribute__((unused))
-#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-#  define MEM_STATIC static inline
-#elif defined(_MSC_VER)
-#  define MEM_STATIC static __inline
-#else
-#  define MEM_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */
-#endif
-
-
-/****************************************************************
-*  Basic Types
-*****************************************************************/
-#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-# include <stdint.h>
-  typedef  uint8_t BYTE;
-  typedef uint16_t U16;
-  typedef  int16_t S16;
-  typedef uint32_t U32;
-  typedef  int32_t S32;
-  typedef uint64_t U64;
-  typedef  int64_t S64;
-#else
-  typedef unsigned char       BYTE;
-  typedef unsigned short      U16;
-  typedef   signed short      S16;
-  typedef unsigned int        U32;
-  typedef   signed int        S32;
-  typedef unsigned long long  U64;
-  typedef   signed long long  S64;
-#endif
-
-
-/****************************************************************
-*  Memory I/O
-*****************************************************************/
-/* MEM_FORCE_MEMORY_ACCESS
- * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
- * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
- * The below switch allow to select different access method for improved performance.
- * Method 0 (default) : use `memcpy()`. Safe and portable.
- * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
- *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
- * Method 2 : direct access. This method is portable but violate C standard.
- *            It can generate buggy code on targets generating assembly depending on alignment.
- *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
- * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
- * Prefer these methods in priority order (0 > 1 > 2)
- */
-#ifndef MEM_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
-#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
-#    define MEM_FORCE_MEMORY_ACCESS 2
-#  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
-  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
-#    define MEM_FORCE_MEMORY_ACCESS 1
-#  endif
-#endif
-
-MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; }
-MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; }
-
-MEM_STATIC unsigned MEM_isLittleEndian(void)
-{
-    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */
-    return one.c[0];
-}
-
-#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
-
-/* violates C standard on structure alignment.
-Only use if no other choice to achieve best performance on target platform */
-MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
-MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
-MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
-
-MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
-
-#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
-
-/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
-/* currently only defined for gcc and icc */
-typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign;
-
-MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
-MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
-MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
-
-MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
-
-#else
-
-/* default method, safe and standard.
-   can sometimes prove slower */
-
-MEM_STATIC U16 MEM_read16(const void* memPtr)
-{
-    U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-MEM_STATIC U32 MEM_read32(const void* memPtr)
-{
-    U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-MEM_STATIC U64 MEM_read64(const void* memPtr)
-{
-    U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-MEM_STATIC void MEM_write16(void* memPtr, U16 value)
-{
-    memcpy(memPtr, &value, sizeof(value));
-}
-
-
-#endif // MEM_FORCE_MEMORY_ACCESS
-
-
-MEM_STATIC U16 MEM_readLE16(const void* memPtr)
-{
-    if (MEM_isLittleEndian())
-        return MEM_read16(memPtr);
-    else
-    {
-        const BYTE* p = (const BYTE*)memPtr;
-        return (U16)(p[0] + (p[1]<<8));
-    }
-}
-
-MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
-{
-    if (MEM_isLittleEndian())
-    {
-        MEM_write16(memPtr, val);
-    }
-    else
-    {
-        BYTE* p = (BYTE*)memPtr;
-        p[0] = (BYTE)val;
-        p[1] = (BYTE)(val>>8);
-    }
-}
-
-MEM_STATIC U32 MEM_readLE24(const void* memPtr)
-{
-    return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
-}
-
-MEM_STATIC U32 MEM_readLE32(const void* memPtr)
-{
-    if (MEM_isLittleEndian())
-        return MEM_read32(memPtr);
-    else
-    {
-        const BYTE* p = (const BYTE*)memPtr;
-        return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));
-    }
-}
-
-MEM_STATIC U64 MEM_readLE64(const void* memPtr)
-{
-    if (MEM_isLittleEndian())
-        return MEM_read64(memPtr);
-    else
-    {
-        const BYTE* p = (const BYTE*)memPtr;
-        return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24)
-                     + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56));
-    }
-}
-
-
-MEM_STATIC size_t MEM_readLEST(const void* memPtr)
-{
-    if (MEM_32bits())
-        return (size_t)MEM_readLE32(memPtr);
-    else
-        return (size_t)MEM_readLE64(memPtr);
-}
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* MEM_H_MODULE */
-
-
-/* ******************************************************************
-   bitstream
-   Part of NewGen Entropy library
-   header file (to include)
-   Copyright (C) 2013-2015, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-   - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-#ifndef BITSTREAM_H_MODULE
-#define BITSTREAM_H_MODULE
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-/*
-*  This API consists of small unitary functions, which highly benefit from being inlined.
-*  Since link-time-optimization is not available for all compilers,
-*  these functions are defined into a .h to be included.
-*/
-
-
-/**********************************************
-*  bitStream decompression API (read backward)
-**********************************************/
-typedef struct
-{
-    size_t   bitContainer;
-    unsigned bitsConsumed;
-    const char* ptr;
-    const char* start;
-} BIT_DStream_t;
-
-typedef enum { BIT_DStream_unfinished = 0,
-               BIT_DStream_endOfBuffer = 1,
-               BIT_DStream_completed = 2,
-               BIT_DStream_overflow = 3 } BIT_DStream_status;  /* result of BIT_reloadDStream() */
-               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
-
-MEM_STATIC size_t   BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
-MEM_STATIC size_t   BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
-MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
-MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
-
-
-
-/******************************************
-*  unsafe API
-******************************************/
-MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
-/* faster, but works only if nbBits >= 1 */
-
-
-
-/****************************************************************
-*  Helper functions
-****************************************************************/
-MEM_STATIC unsigned BIT_highbit32 (U32 val)
-{
-#   if defined(_MSC_VER)   /* Visual */
-    unsigned long r=0;
-    _BitScanReverse ( &r, val );
-    return (unsigned) r;
-#   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* Use GCC Intrinsic */
-    return 31 - __builtin_clz (val);
-#   else   /* Software version */
-    static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
-    U32 v = val;
-    unsigned r;
-    v |= v >> 1;
-    v |= v >> 2;
-    v |= v >> 4;
-    v |= v >> 8;
-    v |= v >> 16;
-    r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
-    return r;
-#   endif
-}
-
-
-
-/**********************************************************
-* bitStream decoding
-**********************************************************/
-
-/*!BIT_initDStream
-*  Initialize a BIT_DStream_t.
-*  @bitD : a pointer to an already allocated BIT_DStream_t structure
-*  @srcBuffer must point at the beginning of a bitStream
-*  @srcSize must be the exact size of the bitStream
-*  @result : size of stream (== srcSize) or an errorCode if a problem is detected
-*/
-MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
-{
-    if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
-
-    if (srcSize >=  sizeof(size_t))   /* normal case */
-    {
-        U32 contain32;
-        bitD->start = (const char*)srcBuffer;
-        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(size_t);
-        bitD->bitContainer = MEM_readLEST(bitD->ptr);
-        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
-        if (contain32 == 0) return ERROR(GENERIC);   /* endMark not present */
-        bitD->bitsConsumed = 8 - BIT_highbit32(contain32);
-    }
-    else
-    {
-        U32 contain32;
-        bitD->start = (const char*)srcBuffer;
-        bitD->ptr   = bitD->start;
-        bitD->bitContainer = *(const BYTE*)(bitD->start);
-        switch(srcSize)
-        {
-            case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);
-                    /* fallthrough */
-            case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);
-                    /* fallthrough */
-            case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);
-                    /* fallthrough */
-            case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24;
-                    /* fallthrough */
-            case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16;
-                    /* fallthrough */
-            case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) <<  8;
-                    /* fallthrough */
-            default:;
-        }
-        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
-        if (contain32 == 0) return ERROR(GENERIC);   /* endMark not present */
-        bitD->bitsConsumed = 8 - BIT_highbit32(contain32);
-        bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8;
-    }
-
-    return srcSize;
-}
-MEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits)
-{
-    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
-    return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
-}
-
-/*! BIT_lookBitsFast :
-*   unsafe version; only works only if nbBits >= 1 */
-MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits)
-{
-    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
-    return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
-}
-
-MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
-{
-    bitD->bitsConsumed += nbBits;
-}
-
-MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)
-{
-    size_t value = BIT_lookBits(bitD, nbBits);
-    BIT_skipBits(bitD, nbBits);
-    return value;
-}
-
-/*!BIT_readBitsFast :
-*  unsafe version; only works only if nbBits >= 1 */
-MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
-{
-    size_t value = BIT_lookBitsFast(bitD, nbBits);
-    BIT_skipBits(bitD, nbBits);
-    return value;
-}
-
-MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
-{
-    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should never happen */
-        return BIT_DStream_overflow;
-
-    if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))
-    {
-        bitD->ptr -= bitD->bitsConsumed >> 3;
-        bitD->bitsConsumed &= 7;
-        bitD->bitContainer = MEM_readLEST(bitD->ptr);
-        return BIT_DStream_unfinished;
-    }
-    if (bitD->ptr == bitD->start)
-    {
-        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
-        return BIT_DStream_completed;
-    }
-    {
-        U32 nbBytes = bitD->bitsConsumed >> 3;
-        BIT_DStream_status result = BIT_DStream_unfinished;
-        if (bitD->ptr - nbBytes < bitD->start)
-        {
-            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */
-            result = BIT_DStream_endOfBuffer;
-        }
-        bitD->ptr -= nbBytes;
-        bitD->bitsConsumed -= nbBytes*8;
-        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD) */
-        return result;
-    }
-}
-
-/*! BIT_endOfDStream
-*   @return Tells if DStream has reached its exact end
-*/
-MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
-{
-    return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
-}
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* BITSTREAM_H_MODULE */
-/* ******************************************************************
-   Error codes and messages
-   Copyright (C) 2013-2015, Yann Collet
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-   - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-#ifndef ERROR_H_MODULE
-#define ERROR_H_MODULE
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-/******************************************
-*  Compiler-specific
-******************************************/
-#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-#  define ERR_STATIC static inline
-#elif defined(_MSC_VER)
-#  define ERR_STATIC static __inline
-#elif defined(__GNUC__)
-#  define ERR_STATIC static __attribute__((unused))
-#else
-#  define ERR_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */
-#endif
-
-
-/******************************************
-*  Error Management
-******************************************/
-#define PREFIX(name) ZSTD_error_##name
-
-#define ERROR(name) (size_t)-PREFIX(name)
-
-#define ERROR_LIST(ITEM) \
-        ITEM(PREFIX(No_Error)) ITEM(PREFIX(GENERIC)) \
-        ITEM(PREFIX(dstSize_tooSmall)) ITEM(PREFIX(srcSize_wrong)) \
-        ITEM(PREFIX(prefix_unknown)) ITEM(PREFIX(corruption_detected)) \
-        ITEM(PREFIX(tableLog_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooSmall)) \
-        ITEM(PREFIX(maxCode))
-
-#define ERROR_GENERATE_ENUM(ENUM) ENUM,
-typedef enum { ERROR_LIST(ERROR_GENERATE_ENUM) } ERR_codes;  /* enum is exposed, to detect & handle specific errors; compare function result to -enum value */
-
-#define ERROR_CONVERTTOSTRING(STRING) #STRING,
-#define ERROR_GENERATE_STRING(EXPR) ERROR_CONVERTTOSTRING(EXPR)
-static const char* ERR_strings[] = { ERROR_LIST(ERROR_GENERATE_STRING) };
-
-ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
-
-ERR_STATIC const char* ERR_getErrorName(size_t code)
-{
-    static const char* codeError = "Unspecified error code";
-    if (ERR_isError(code)) return ERR_strings[-(int)(code)];
-    return codeError;
-}
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* ERROR_H_MODULE */
-/*
-Constructor and Destructor of type FSE_CTable
-    Note that its size depends on 'tableLog' and 'maxSymbolValue' */
-typedef unsigned FSE_CTable;   /* don't allocate that. It's just a way to be more restrictive than void* */
-typedef unsigned FSE_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */
-
-
-/* ******************************************************************
-   FSE : Finite State Entropy coder
-   header file for static linking (only)
-   Copyright (C) 2013-2015, Yann Collet
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-   - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-/******************************************
-*  Static allocation
-******************************************/
-/* FSE buffer bounds */
-#define FSE_NCOUNTBOUND 512
-#define FSE_BLOCKBOUND(size) (size + (size>>7))
-#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
-
-/* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */
-#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue)   (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))
-#define FSE_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<maxTableLog))
-
-
-/******************************************
-*  FSE advanced API
-******************************************/
-static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
-/* build a fake FSE_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */
-
-static size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
-/* build a fake FSE_DTable, designed to always generate the same symbolValue */
-
-
-/******************************************
-*  FSE symbol decompression API
-******************************************/
-typedef struct
-{
-    size_t      state;
-    const void* table;   /* precise table may vary, depending on U16 */
-} FSE_DState_t;
-
-
-static void     FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
-
-static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
-
-static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
-
-
-/******************************************
-*  FSE unsafe API
-******************************************/
-static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
-/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
-
-
-/******************************************
-*  Implementation of inline functions
-******************************************/
-
-/* decompression */
-
-typedef struct {
-    U16 tableLog;
-    U16 fastMode;
-} FSE_DTableHeader;   /* sizeof U32 */
-
-typedef struct
-{
-    unsigned short newState;
-    unsigned char  symbol;
-    unsigned char  nbBits;
-} FSE_decode_t;   /* size == U32 */
-
-MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
-{
-    FSE_DTableHeader DTableH;
-    memcpy(&DTableH, dt, sizeof(DTableH));
-    DStatePtr->state = BIT_readBits(bitD, DTableH.tableLog);
-    BIT_reloadDStream(bitD);
-    DStatePtr->table = dt + 1;
-}
-
-MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
-{
-    const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
-    const U32  nbBits = DInfo.nbBits;
-    BYTE symbol = DInfo.symbol;
-    size_t lowBits = BIT_readBits(bitD, nbBits);
-
-    DStatePtr->state = DInfo.newState + lowBits;
-    return symbol;
-}
-
-MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
-{
-    const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
-    const U32 nbBits = DInfo.nbBits;
-    BYTE symbol = DInfo.symbol;
-    size_t lowBits = BIT_readBitsFast(bitD, nbBits);
-
-    DStatePtr->state = DInfo.newState + lowBits;
-    return symbol;
-}
-
-MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
-{
-    return DStatePtr->state == 0;
-}
-
-
-#if defined (__cplusplus)
-}
-#endif
-/* ******************************************************************
-   Huff0 : Huffman coder, part of New Generation Entropy library
-   header file for static linking (only)
-   Copyright (C) 2013-2015, Yann Collet
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-   - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/******************************************
-*  Static allocation macros
-******************************************/
-/* Huff0 buffer bounds */
-#define HUF_CTABLEBOUND 129
-#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8)   /* only true if incompressible pre-filtered with fast heuristic */
-#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
-
-/* static allocation of Huff0's DTable */
-#define HUF_DTABLE_SIZE(maxTableLog)   (1 + (1<<maxTableLog))  /* nb Cells; use unsigned short for X2, unsigned int for X4 */
-#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
-        unsigned short DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
-#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
-        unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
-#define HUF_CREATE_STATIC_DTABLEX6(DTable, maxTableLog) \
-        unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog) * 3 / 2] = { maxTableLog }
-
-
-/******************************************
-*  Advanced functions
-******************************************/
-static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */
-static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbols decoder */
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-/*
-    zstd - standard compression library
-    Header File
-    Copyright (C) 2014-2015, Yann Collet.
-
-    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are
-    met:
-    * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-    copyright notice, this list of conditions and the following disclaimer
-    in the documentation and/or other materials provided with the
-    distribution.
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - zstd source repository : https://github.com/Cyan4973/zstd
-    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
-*/
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/* *************************************
-*  Includes
-***************************************/
-#include <stddef.h>   /* size_t */
-
-
-/* *************************************
-*  Version
-***************************************/
-#define ZSTD_VERSION_MAJOR    0    /* for breaking interface changes  */
-#define ZSTD_VERSION_MINOR    2    /* for new (non-breaking) interface capabilities */
-#define ZSTD_VERSION_RELEASE  2    /* for tweaks, bug-fixes, or development */
-#define ZSTD_VERSION_NUMBER  (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
-
-
-/* *************************************
-*  Advanced functions
-***************************************/
-typedef struct ZSTD_CCtx_s ZSTD_CCtx;   /* incomplete type */
-
-#if defined (__cplusplus)
-}
-#endif
-/*
-    zstd - standard compression library
-    Header File for static linking only
-    Copyright (C) 2014-2015, Yann Collet.
-
-    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are
-    met:
-    * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-    copyright notice, this list of conditions and the following disclaimer
-    in the documentation and/or other materials provided with the
-    distribution.
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - zstd source repository : https://github.com/Cyan4973/zstd
-    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
-*/
-
-/* The objects defined into this file should be considered experimental.
- * They are not labelled stable, as their prototype may change in the future.
- * You can use them for tests, provide feedback, or if you can endure risk of future changes.
- */
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/* *************************************
-*  Streaming functions
-***************************************/
-
-typedef struct ZSTD_DCtx_s ZSTD_DCtx;
-
-/*
-  Use above functions alternatively.
-  ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
-  ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
-  Result is the number of bytes regenerated within 'dst'.
-  It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
-*/
-
-/* *************************************
-*  Prefix - version detection
-***************************************/
-#define ZSTD_magicNumber 0xFD2FB523   /* v0.3 */
-
-
-#if defined (__cplusplus)
-}
-#endif
-/* ******************************************************************
-   FSE : Finite State Entropy coder
-   Copyright (C) 2013-2015, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
-    - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-
-#ifndef FSE_COMMONDEFS_ONLY
-
-/****************************************************************
-*  Tuning parameters
-****************************************************************/
-/* MEMORY_USAGE :
-*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
-*  Increasing memory usage improves compression ratio
-*  Reduced memory usage can improve speed, due to cache effect
-*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
-#define FSE_MAX_MEMORY_USAGE 14
-#define FSE_DEFAULT_MEMORY_USAGE 13
-
-/* FSE_MAX_SYMBOL_VALUE :
-*  Maximum symbol value authorized.
-*  Required for proper stack allocation */
-#define FSE_MAX_SYMBOL_VALUE 255
-
-
-/****************************************************************
-*  template functions type & suffix
-****************************************************************/
-#define FSE_FUNCTION_TYPE BYTE
-#define FSE_FUNCTION_EXTENSION
-
-
-/****************************************************************
-*  Byte symbol type
-****************************************************************/
-#endif   /* !FSE_COMMONDEFS_ONLY */
-
-
-/****************************************************************
-*  Compiler specifics
-****************************************************************/
-#ifdef _MSC_VER    /* Visual Studio */
-#  define FORCE_INLINE static __forceinline
-#  include <intrin.h>                    /* For Visual 2005 */
-#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
-#  pragma warning(disable : 4214)        /* disable: C4214: non-int bitfields */
-#else
-#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
-#    ifdef __GNUC__
-#      define FORCE_INLINE static inline __attribute__((always_inline))
-#    else
-#      define FORCE_INLINE static inline
-#    endif
-#  else
-#    define FORCE_INLINE static
-#  endif /* __STDC_VERSION__ */
-#endif
-
-
-/****************************************************************
-*  Includes
-****************************************************************/
-#include <stdlib.h>     /* malloc, free, qsort */
-#include <string.h>     /* memcpy, memset */
-#include <stdio.h>      /* printf (debug) */
-
-/****************************************************************
-*  Constants
-*****************************************************************/
-#define FSE_MAX_TABLELOG  (FSE_MAX_MEMORY_USAGE-2)
-#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
-#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
-#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
-#define FSE_MIN_TABLELOG 5
-
-#define FSE_TABLELOG_ABSOLUTE_MAX 15
-#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
-#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
-#endif
-
-
-/****************************************************************
-*  Error Management
-****************************************************************/
-#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
-
-
-/****************************************************************
-*  Complex types
-****************************************************************/
-typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
-
-
-/****************************************************************
-*  Templates
-****************************************************************/
-/*
-  designed to be included
-  for type-specific functions (template emulation in C)
-  Objective is to write these functions only once, for improved maintenance
-*/
-
-/* safety checks */
-#ifndef FSE_FUNCTION_EXTENSION
-#  error "FSE_FUNCTION_EXTENSION must be defined"
-#endif
-#ifndef FSE_FUNCTION_TYPE
-#  error "FSE_FUNCTION_TYPE must be defined"
-#endif
-
-/* Function names */
-#define FSE_CAT(X,Y) X##Y
-#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
-#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
-
-
-/* Function templates */
-
-#define FSE_DECODE_TYPE FSE_decode_t
-
-static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; }
-
-static size_t FSE_buildDTable
-(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
-{
-    void* ptr = dt+1;
-    FSE_DTableHeader DTableH;
-    FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)ptr;
-    const U32 tableSize = 1 << tableLog;
-    const U32 tableMask = tableSize-1;
-    const U32 step = FSE_tableStep(tableSize);
-    U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
-    U32 position = 0;
-    U32 highThreshold = tableSize-1;
-    const S16 largeLimit= (S16)(1 << (tableLog-1));
-    U32 noLarge = 1;
-    U32 s;
-
-    /* Sanity Checks */
-    if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
-    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
-
-    /* Init, lay down lowprob symbols */
-    DTableH.tableLog = (U16)tableLog;
-    for (s=0; s<=maxSymbolValue; s++)
-    {
-        if (normalizedCounter[s]==-1)
-        {
-            tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
-            symbolNext[s] = 1;
-        }
-        else
-        {
-            if (normalizedCounter[s] >= largeLimit) noLarge=0;
-            symbolNext[s] = normalizedCounter[s];
-        }
-    }
-
-    /* Spread symbols */
-    for (s=0; s<=maxSymbolValue; s++)
-    {
-        int i;
-        for (i=0; i<normalizedCounter[s]; i++)
-        {
-            tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
-            position = (position + step) & tableMask;
-            while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
-        }
-    }
-
-    if (position!=0) return ERROR(GENERIC);   /* position must reach all cells once, otherwise normalizedCounter is incorrect */
-
-    /* Build Decoding table */
-    {
-        U32 i;
-        for (i=0; i<tableSize; i++)
-        {
-            FSE_FUNCTION_TYPE symbol = (FSE_FUNCTION_TYPE)(tableDecode[i].symbol);
-            U16 nextState = symbolNext[symbol]++;
-            tableDecode[i].nbBits = (BYTE) (tableLog - BIT_highbit32 ((U32)nextState) );
-            tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize);
-        }
-    }
-
-    DTableH.fastMode = (U16)noLarge;
-    memcpy(dt, &DTableH, sizeof(DTableH));
-    return 0;
-}
-
-
-#ifndef FSE_COMMONDEFS_ONLY
-/******************************************
-*  FSE helper functions
-******************************************/
-static unsigned FSE_isError(size_t code) { return ERR_isError(code); }
-
-
-/****************************************************************
-*  FSE NCount encoding-decoding
-****************************************************************/
-static short FSE_abs(short a)
-{
-    return a<0 ? -a : a;
-}
-
-static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
-                 const void* headerBuffer, size_t hbSize)
-{
-    const BYTE* const istart = (const BYTE*) headerBuffer;
-    const BYTE* const iend = istart + hbSize;
-    const BYTE* ip = istart;
-    int nbBits;
-    int remaining;
-    int threshold;
-    U32 bitStream;
-    int bitCount;
-    unsigned charnum = 0;
-    int previous0 = 0;
-
-    if (hbSize < 4) return ERROR(srcSize_wrong);
-    bitStream = MEM_readLE32(ip);
-    nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG;   /* extract tableLog */
-    if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
-    bitStream >>= 4;
-    bitCount = 4;
-    *tableLogPtr = nbBits;
-    remaining = (1<<nbBits)+1;
-    threshold = 1<<nbBits;
-    nbBits++;
-
-    while ((remaining>1) && (charnum<=*maxSVPtr))
-    {
-        if (previous0)
-        {
-            unsigned n0 = charnum;
-            while ((bitStream & 0xFFFF) == 0xFFFF)
-            {
-                n0+=24;
-                if (ip < iend-5)
-                {
-                    ip+=2;
-                    bitStream = MEM_readLE32(ip) >> bitCount;
-                }
-                else
-                {
-                    bitStream >>= 16;
-                    bitCount+=16;
-                }
-            }
-            while ((bitStream & 3) == 3)
-            {
-                n0+=3;
-                bitStream>>=2;
-                bitCount+=2;
-            }
-            n0 += bitStream & 3;
-            bitCount += 2;
-            if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
-            while (charnum < n0) normalizedCounter[charnum++] = 0;
-            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
-            {
-                ip += bitCount>>3;
-                bitCount &= 7;
-                bitStream = MEM_readLE32(ip) >> bitCount;
-            }
-            else
-                bitStream >>= 2;
-        }
-        {
-            const short max = (short)((2*threshold-1)-remaining);
-            short count;
-
-            if ((bitStream & (threshold-1)) < (U32)max)
-            {
-                count = (short)(bitStream & (threshold-1));
-                bitCount   += nbBits-1;
-            }
-            else
-            {
-                count = (short)(bitStream & (2*threshold-1));
-                if (count >= threshold) count -= max;
-                bitCount   += nbBits;
-            }
-
-            count--;   /* extra accuracy */
-            remaining -= FSE_abs(count);
-            normalizedCounter[charnum++] = count;
-            previous0 = !count;
-            while (remaining < threshold)
-            {
-                nbBits--;
-                threshold >>= 1;
-            }
-
-            {
-                if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
-                {
-                    ip += bitCount>>3;
-                    bitCount &= 7;
-                }
-                else
-                {
-                    bitCount -= (int)(8 * (iend - 4 - ip));
-                    ip = iend - 4;
-                }
-                bitStream = MEM_readLE32(ip) >> (bitCount & 31);
-            }
-        }
-    }
-    if (remaining != 1) return ERROR(GENERIC);
-    *maxSVPtr = charnum-1;
-
-    ip += (bitCount+7)>>3;
-    if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);
-    return ip-istart;
-}
-
-
-/*********************************************************
-*  Decompression (Byte symbols)
-*********************************************************/
-static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
-{
-    void* ptr = dt;
-    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
-    FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1;
-
-    DTableH->tableLog = 0;
-    DTableH->fastMode = 0;
-
-    cell->newState = 0;
-    cell->symbol = symbolValue;
-    cell->nbBits = 0;
-
-    return 0;
-}
-
-
-static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
-{
-    void* ptr = dt;
-    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
-    FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1;
-    const unsigned tableSize = 1 << nbBits;
-    const unsigned tableMask = tableSize - 1;
-    const unsigned maxSymbolValue = tableMask;
-    unsigned s;
-
-    /* Sanity checks */
-    if (nbBits < 1) return ERROR(GENERIC);         /* min size */
-
-    /* Build Decoding Table */
-    DTableH->tableLog = (U16)nbBits;
-    DTableH->fastMode = 1;
-    for (s=0; s<=maxSymbolValue; s++)
-    {
-        dinfo[s].newState = 0;
-        dinfo[s].symbol = (BYTE)s;
-        dinfo[s].nbBits = (BYTE)nbBits;
-    }
-
-    return 0;
-}
-
-FORCE_INLINE size_t FSE_decompress_usingDTable_generic(
-          void* dst, size_t maxDstSize,
-    const void* cSrc, size_t cSrcSize,
-    const FSE_DTable* dt, const unsigned fast)
-{
-    BYTE* const ostart = (BYTE*) dst;
-    BYTE* op = ostart;
-    BYTE* const omax = op + maxDstSize;
-    BYTE* const olimit = omax-3;
-
-    BIT_DStream_t bitD;
-    FSE_DState_t state1;
-    FSE_DState_t state2;
-    size_t errorCode;
-
-    /* Init */
-    errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);   /* replaced last arg by maxCompressed Size */
-    if (FSE_isError(errorCode)) return errorCode;
-
-    FSE_initDState(&state1, &bitD, dt);
-    FSE_initDState(&state2, &bitD, dt);
-
-#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
-
-    /* 4 symbols per loop */
-    for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) && (op<olimit) ; op+=4)
-    {
-        op[0] = FSE_GETSYMBOL(&state1);
-
-        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
-            BIT_reloadDStream(&bitD);
-
-        op[1] = FSE_GETSYMBOL(&state2);
-
-        if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
-            { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
-
-        op[2] = FSE_GETSYMBOL(&state1);
-
-        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
-            BIT_reloadDStream(&bitD);
-
-        op[3] = FSE_GETSYMBOL(&state2);
-    }
-
-    /* tail */
-    /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
-    while (1)
-    {
-        if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) )
-            break;
-
-        *op++ = FSE_GETSYMBOL(&state1);
-
-        if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) )
-            break;
-
-        *op++ = FSE_GETSYMBOL(&state2);
-    }
-
-    /* end ? */
-    if (BIT_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2))
-        return op-ostart;
-
-    if (op==omax) return ERROR(dstSize_tooSmall);   /* dst buffer is full, but cSrc unfinished */
-
-    return ERROR(corruption_detected);
-}
-
-
-static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
-                            const void* cSrc, size_t cSrcSize,
-                            const FSE_DTable* dt)
-{
-    FSE_DTableHeader DTableH;
-    memcpy(&DTableH, dt, sizeof(DTableH));
-
-    /* select fast mode (static) */
-    if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
-    return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
-}
-
-
-static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
-{
-    const BYTE* const istart = (const BYTE*)cSrc;
-    const BYTE* ip = istart;
-    short counting[FSE_MAX_SYMBOL_VALUE+1];
-    DTable_max_t dt;   /* Static analyzer seems unable to understand this table will be properly initialized later */
-    unsigned tableLog;
-    unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
-    size_t errorCode;
-
-    if (cSrcSize<2) return ERROR(srcSize_wrong);   /* too small input size */
-
-    /* normal FSE decoding mode */
-    errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
-    if (FSE_isError(errorCode)) return errorCode;
-    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);   /* too small input size */
-    ip += errorCode;
-    cSrcSize -= errorCode;
-
-    errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog);
-    if (FSE_isError(errorCode)) return errorCode;
-
-    /* always return, even if it is an error code */
-    return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);
-}
-
-
-
-#endif   /* FSE_COMMONDEFS_ONLY */
-/* ******************************************************************
-   Huff0 : Huffman coder, part of New Generation Entropy library
-   Copyright (C) 2013-2015, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - FSE+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy
-    - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-
-/****************************************************************
-*  Compiler specifics
-****************************************************************/
-#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-/* inline is defined */
-#elif defined(_MSC_VER)
-#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
-#  define inline __inline
-#else
-#  define inline /* disable inline */
-#endif
-
-
-/****************************************************************
-*  Includes
-****************************************************************/
-#include <stdlib.h>     /* malloc, free, qsort */
-#include <string.h>     /* memcpy, memset */
-#include <stdio.h>      /* printf (debug) */
-
-/****************************************************************
-*  Error Management
-****************************************************************/
-#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
-
-
-/******************************************
-*  Helper functions
-******************************************/
-static unsigned HUF_isError(size_t code) { return ERR_isError(code); }
-
-#define HUF_ABSOLUTEMAX_TABLELOG  16   /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
-#define HUF_MAX_TABLELOG  12           /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
-#define HUF_DEFAULT_TABLELOG  HUF_MAX_TABLELOG   /* tableLog by default, when not specified */
-#define HUF_MAX_SYMBOL_VALUE 255
-#if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG)
-#  error "HUF_MAX_TABLELOG is too large !"
-#endif
-
-
-
-/*********************************************************
-*  Huff0 : Huffman block decompression
-*********************************************************/
-typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2;   /* single-symbol decoding */
-
-typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4;  /* double-symbols decoding */
-
-typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
-
-/*! HUF_readStats
-    Read compact Huffman tree, saved by HUF_writeCTable
-    @huffWeight : destination buffer
-    @return : size read from `src`
-*/
-static size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
-                            U32* nbSymbolsPtr, U32* tableLogPtr,
-                            const void* src, size_t srcSize)
-{
-    U32 weightTotal;
-    U32 tableLog;
-    const BYTE* ip = (const BYTE*) src;
-    size_t iSize;
-    size_t oSize;
-    U32 n;
-
-    if (!srcSize) return ERROR(srcSize_wrong);
-    iSize = ip[0];
-    //memset(huffWeight, 0, hwSize);   /* is not necessary, even though some analyzer complain ... */
-
-    if (iSize >= 128)  /* special header */
-    {
-        if (iSize >= (242))   /* RLE */
-        {
-            static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
-            oSize = l[iSize-242];
-            memset(huffWeight, 1, hwSize);
-            iSize = 0;
-        }
-        else   /* Incompressible */
-        {
-            oSize = iSize - 127;
-            iSize = ((oSize+1)/2);
-            if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
-            if (oSize >= hwSize) return ERROR(corruption_detected);
-            ip += 1;
-            for (n=0; n<oSize; n+=2)
-            {
-                huffWeight[n]   = ip[n/2] >> 4;
-                huffWeight[n+1] = ip[n/2] & 15;
-            }
-        }
-    }
-    else  /* header compressed with FSE (normal case) */
-    {
-        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
-        oSize = FSE_decompress(huffWeight, hwSize-1, ip+1, iSize);   /* max (hwSize-1) values decoded, as last one is implied */
-        if (FSE_isError(oSize)) return oSize;
-    }
-
-    /* collect weight stats */
-    memset(rankStats, 0, (HUF_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));
-    weightTotal = 0;
-    for (n=0; n<oSize; n++)
-    {
-        if (huffWeight[n] >= HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
-        rankStats[huffWeight[n]]++;
-        weightTotal += (1 << huffWeight[n]) >> 1;
-    }
-    if (weightTotal == 0) return ERROR(corruption_detected);
-
-    /* get last non-null symbol weight (implied, total must be 2^n) */
-    tableLog = BIT_highbit32(weightTotal) + 1;
-    if (tableLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
-    {
-        U32 total = 1 << tableLog;
-        U32 rest = total - weightTotal;
-        U32 verif = 1 << BIT_highbit32(rest);
-        U32 lastWeight = BIT_highbit32(rest) + 1;
-        if (verif != rest) return ERROR(corruption_detected);    /* last value must be a clean power of 2 */
-        huffWeight[oSize] = (BYTE)lastWeight;
-        rankStats[lastWeight]++;
-    }
-
-    /* check tree construction validity */
-    if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected);   /* by construction : at least 2 elts of rank 1, must be even */
-
-    /* results */
-    *nbSymbolsPtr = (U32)(oSize+1);
-    *tableLogPtr = tableLog;
-    return iSize+1;
-}
-
-
-/**************************/
-/* single-symbol decoding */
-/**************************/
-
-static size_t HUF_readDTableX2 (U16* DTable, const void* src, size_t srcSize)
-{
-    BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1];
-    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];   /* large enough for values from 0 to 16 */
-    U32 tableLog = 0;
-    const BYTE* ip = (const BYTE*) src;
-    size_t iSize = ip[0];
-    U32 nbSymbols = 0;
-    U32 n;
-    U32 nextRankStart;
-    void* ptr = DTable+1;
-    HUF_DEltX2* const dt = (HUF_DEltX2*)(ptr);
-
-    HUF_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U16));   /* if compilation fails here, assertion is false */
-    //memset(huffWeight, 0, sizeof(huffWeight));   /* is not necessary, even though some analyzer complain ... */
-
-    iSize = HUF_readStats(huffWeight, HUF_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
-    if (HUF_isError(iSize)) return iSize;
-
-    /* check result */
-    if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge);   /* DTable is too small */
-    DTable[0] = (U16)tableLog;   /* maybe should separate sizeof DTable, as allocated, from used size of DTable, in case of DTable re-use */
-
-    /* Prepare ranks */
-    nextRankStart = 0;
-    for (n=1; n<=tableLog; n++)
-    {
-        U32 current = nextRankStart;
-        nextRankStart += (rankVal[n] << (n-1));
-        rankVal[n] = current;
-    }
-
-    /* fill DTable */
-    for (n=0; n<nbSymbols; n++)
-    {
-        const U32 w = huffWeight[n];
-        const U32 length = (1 << w) >> 1;
-        U32 i;
-        HUF_DEltX2 D;
-        D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
-        for (i = rankVal[w]; i < rankVal[w] + length; i++)
-            dt[i] = D;
-        rankVal[w] += length;
-    }
-
-    return iSize;
-}
-
-static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)
-{
-        const size_t val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
-        const BYTE c = dt[val].byte;
-        BIT_skipBits(Dstream, dt[val].nbBits);
-        return c;
-}
-
-#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
-    *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)
-
-#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
-    if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
-        HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
-
-#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
-    if (MEM_64bits()) \
-        HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
-
-static inline size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)
-{
-    BYTE* const pStart = p;
-
-    /* up to 4 symbols at a time */
-    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4))
-    {
-        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
-        HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
-        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
-        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
-    }
-
-    /* closer to the end */
-    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))
-        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
-
-    /* no more data to retrieve from bitstream, hence no need to reload */
-    while (p < pEnd)
-        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
-
-    return pEnd-pStart;
-}
-
-
-static size_t HUF_decompress4X2_usingDTable(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const U16* DTable)
-{
-    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
-
-    {
-        const BYTE* const istart = (const BYTE*) cSrc;
-        BYTE* const ostart = (BYTE*) dst;
-        BYTE* const oend = ostart + dstSize;
-
-        const void* ptr = DTable;
-        const HUF_DEltX2* const dt = ((const HUF_DEltX2*)ptr) +1;
-        const U32 dtLog = DTable[0];
-        size_t errorCode;
-
-        /* Init */
-        BIT_DStream_t bitD1;
-        BIT_DStream_t bitD2;
-        BIT_DStream_t bitD3;
-        BIT_DStream_t bitD4;
-        const size_t length1 = MEM_readLE16(istart);
-        const size_t length2 = MEM_readLE16(istart+2);
-        const size_t length3 = MEM_readLE16(istart+4);
-        size_t length4;
-        const BYTE* const istart1 = istart + 6;  /* jumpTable */
-        const BYTE* const istart2 = istart1 + length1;
-        const BYTE* const istart3 = istart2 + length2;
-        const BYTE* const istart4 = istart3 + length3;
-        const size_t segmentSize = (dstSize+3) / 4;
-        BYTE* const opStart2 = ostart + segmentSize;
-        BYTE* const opStart3 = opStart2 + segmentSize;
-        BYTE* const opStart4 = opStart3 + segmentSize;
-        BYTE* op1 = ostart;
-        BYTE* op2 = opStart2;
-        BYTE* op3 = opStart3;
-        BYTE* op4 = opStart4;
-        U32 endSignal;
-
-        length4 = cSrcSize - (length1 + length2 + length3 + 6);
-        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
-        errorCode = BIT_initDStream(&bitD1, istart1, length1);
-        if (HUF_isError(errorCode)) return errorCode;
-        errorCode = BIT_initDStream(&bitD2, istart2, length2);
-        if (HUF_isError(errorCode)) return errorCode;
-        errorCode = BIT_initDStream(&bitD3, istart3, length3);
-        if (HUF_isError(errorCode)) return errorCode;
-        errorCode = BIT_initDStream(&bitD4, istart4, length4);
-        if (HUF_isError(errorCode)) return errorCode;
-
-        /* 16-32 symbols per loop (4-8 symbols per stream) */
-        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
-        for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )
-        {
-            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
-            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
-            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
-            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
-            HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
-            HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
-            HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
-            HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
-            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
-            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
-            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
-            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
-            HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
-            HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
-            HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
-            HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
-
-            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
-        }
-
-        /* check corruption */
-        if (op1 > opStart2) return ERROR(corruption_detected);
-        if (op2 > opStart3) return ERROR(corruption_detected);
-        if (op3 > opStart4) return ERROR(corruption_detected);
-        /* note : op4 supposed already verified within main loop */
-
-        /* finish bitStreams one by one */
-        HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
-        HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
-        HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
-        HUF_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);
-
-        /* check */
-        endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
-        if (!endSignal) return ERROR(corruption_detected);
-
-        /* decoded size */
-        return dstSize;
-    }
-}
-
-
-static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG);
-    const BYTE* ip = (const BYTE*) cSrc;
-    size_t errorCode;
-
-    errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize);
-    if (HUF_isError(errorCode)) return errorCode;
-    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
-    ip += errorCode;
-    cSrcSize -= errorCode;
-
-    return HUF_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
-}
-
-
-/***************************/
-/* double-symbols decoding */
-/***************************/
-
-static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed,
-                           const U32* rankValOrigin, const int minWeight,
-                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
-                           U32 nbBitsBaseline, U16 baseSeq)
-{
-    HUF_DEltX4 DElt;
-    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
-    U32 s;
-
-    /* get pre-calculated rankVal */
-    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
-
-    /* fill skipped values */
-    if (minWeight>1)
-    {
-        U32 i, skipSize = rankVal[minWeight];
-        MEM_writeLE16(&(DElt.sequence), baseSeq);
-        DElt.nbBits   = (BYTE)(consumed);
-        DElt.length   = 1;
-        for (i = 0; i < skipSize; i++)
-            DTable[i] = DElt;
-    }
-
-    /* fill DTable */
-    for (s=0; s<sortedListSize; s++)   /* note : sortedSymbols already skipped */
-    {
-        const U32 symbol = sortedSymbols[s].symbol;
-        const U32 weight = sortedSymbols[s].weight;
-        const U32 nbBits = nbBitsBaseline - weight;
-        const U32 length = 1 << (sizeLog-nbBits);
-        const U32 start = rankVal[weight];
-        U32 i = start;
-        const U32 end = start + length;
-
-        MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
-        DElt.nbBits = (BYTE)(nbBits + consumed);
-        DElt.length = 2;
-        do { DTable[i++] = DElt; } while (i<end);   /* since length >= 1 */
-
-        rankVal[weight] += length;
-    }
-}
-
-typedef U32 rankVal_t[HUF_ABSOLUTEMAX_TABLELOG][HUF_ABSOLUTEMAX_TABLELOG + 1];
-
-static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,
-                           const sortedSymbol_t* sortedList, const U32 sortedListSize,
-                           const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
-                           const U32 nbBitsBaseline)
-{
-    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
-    const int scaleLog = nbBitsBaseline - targetLog;   /* note : targetLog >= srcLog, hence scaleLog <= 1 */
-    const U32 minBits  = nbBitsBaseline - maxWeight;
-    U32 s;
-
-    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
-
-    /* fill DTable */
-    for (s=0; s<sortedListSize; s++)
-    {
-        const U16 symbol = sortedList[s].symbol;
-        const U32 weight = sortedList[s].weight;
-        const U32 nbBits = nbBitsBaseline - weight;
-        const U32 start = rankVal[weight];
-        const U32 length = 1 << (targetLog-nbBits);
-
-        if (targetLog-nbBits >= minBits)   /* enough room for a second symbol */
-        {
-            U32 sortedRank;
-            int minWeight = nbBits + scaleLog;
-            if (minWeight < 1) minWeight = 1;
-            sortedRank = rankStart[minWeight];
-            HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
-                           rankValOrigin[nbBits], minWeight,
-                           sortedList+sortedRank, sortedListSize-sortedRank,
-                           nbBitsBaseline, symbol);
-        }
-        else
-        {
-            U32 i;
-            const U32 end = start + length;
-            HUF_DEltX4 DElt;
-
-            MEM_writeLE16(&(DElt.sequence), symbol);
-            DElt.nbBits   = (BYTE)(nbBits);
-            DElt.length   = 1;
-            for (i = start; i < end; i++)
-                DTable[i] = DElt;
-        }
-        rankVal[weight] += length;
-    }
-}
-
-static size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
-{
-    BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1];
-    sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1];
-    U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 };
-    U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 };
-    U32* const rankStart = rankStart0+1;
-    rankVal_t rankVal;
-    U32 tableLog, maxW, sizeOfSort, nbSymbols;
-    const U32 memLog = DTable[0];
-    const BYTE* ip = (const BYTE*) src;
-    size_t iSize = ip[0];
-    void* ptr = DTable;
-    HUF_DEltX4* const dt = ((HUF_DEltX4*)ptr) + 1;
-
-    HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(U32));   /* if compilation fails here, assertion is false */
-    if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
-    //memset(weightList, 0, sizeof(weightList));   /* is not necessary, even though some analyzer complain ... */
-
-    iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
-    if (HUF_isError(iSize)) return iSize;
-
-    /* check result */
-    if (tableLog > memLog) return ERROR(tableLog_tooLarge);   /* DTable can't fit code depth */
-
-    /* find maxWeight */
-    for (maxW = tableLog; rankStats[maxW]==0; maxW--)
-        { if (!maxW) return ERROR(GENERIC); }  /* necessarily finds a solution before maxW==0 */
-
-    /* Get start index of each weight */
-    {
-        U32 w, nextRankStart = 0;
-        for (w=1; w<=maxW; w++)
-        {
-            U32 current = nextRankStart;
-            nextRankStart += rankStats[w];
-            rankStart[w] = current;
-        }
-        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/
-        sizeOfSort = nextRankStart;
-    }
-
-    /* sort symbols by weight */
-    {
-        U32 s;
-        for (s=0; s<nbSymbols; s++)
-        {
-            U32 w = weightList[s];
-            U32 r = rankStart[w]++;
-            sortedSymbol[r].symbol = (BYTE)s;
-            sortedSymbol[r].weight = (BYTE)w;
-        }
-        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */
-    }
-
-    /* Build rankVal */
-    {
-        const U32 minBits = tableLog+1 - maxW;
-        U32 nextRankVal = 0;
-        U32 w, consumed;
-        const int rescale = (memLog-tableLog) - 1;   /* tableLog <= memLog */
-        U32* rankVal0 = rankVal[0];
-        for (w=1; w<=maxW; w++)
-        {
-            U32 current = nextRankVal;
-            nextRankVal += rankStats[w] << (w+rescale);
-            rankVal0[w] = current;
-        }
-        for (consumed = minBits; consumed <= memLog - minBits; consumed++)
-        {
-            U32* rankValPtr = rankVal[consumed];
-            for (w = 1; w <= maxW; w++)
-            {
-                rankValPtr[w] = rankVal0[w] >> consumed;
-            }
-        }
-    }
-
-    HUF_fillDTableX4(dt, memLog,
-                   sortedSymbol, sizeOfSort,
-                   rankStart0, rankVal, maxW,
-                   tableLog+1);
-
-    return iSize;
-}
-
-
-static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
-{
-    const size_t val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
-    memcpy(op, dt+val, 2);
-    BIT_skipBits(DStream, dt[val].nbBits);
-    return dt[val].length;
-}
-
-static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
-{
-    const size_t val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
-    memcpy(op, dt+val, 1);
-    if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
-    else
-    {
-        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8))
-        {
-            BIT_skipBits(DStream, dt[val].nbBits);
-            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
-                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);   /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
-        }
-    }
-    return 1;
-}
-
-
-#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
-    ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
-    if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
-        ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
-    if (MEM_64bits()) \
-        ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-static inline size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog)
-{
-    BYTE* const pStart = p;
-
-    /* up to 8 symbols at a time */
-    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd-7))
-    {
-        HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
-        HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
-        HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
-        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
-    }
-
-    /* closer to the end */
-    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-2))
-        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
-
-    while (p <= pEnd-2)
-        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */
-
-    if (p < pEnd)
-        p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
-
-    return p-pStart;
-}
-
-
-
-static size_t HUF_decompress4X4_usingDTable(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const U32* DTable)
-{
-    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
-
-    {
-        const BYTE* const istart = (const BYTE*) cSrc;
-        BYTE* const ostart = (BYTE*) dst;
-        BYTE* const oend = ostart + dstSize;
-
-        const void* ptr = DTable;
-        const HUF_DEltX4* const dt = ((const HUF_DEltX4*)ptr) +1;
-        const U32 dtLog = DTable[0];
-        size_t errorCode;
-
-        /* Init */
-        BIT_DStream_t bitD1;
-        BIT_DStream_t bitD2;
-        BIT_DStream_t bitD3;
-        BIT_DStream_t bitD4;
-        const size_t length1 = MEM_readLE16(istart);
-        const size_t length2 = MEM_readLE16(istart+2);
-        const size_t length3 = MEM_readLE16(istart+4);
-        size_t length4;
-        const BYTE* const istart1 = istart + 6;  /* jumpTable */
-        const BYTE* const istart2 = istart1 + length1;
-        const BYTE* const istart3 = istart2 + length2;
-        const BYTE* const istart4 = istart3 + length3;
-        const size_t segmentSize = (dstSize+3) / 4;
-        BYTE* const opStart2 = ostart + segmentSize;
-        BYTE* const opStart3 = opStart2 + segmentSize;
-        BYTE* const opStart4 = opStart3 + segmentSize;
-        BYTE* op1 = ostart;
-        BYTE* op2 = opStart2;
-        BYTE* op3 = opStart3;
-        BYTE* op4 = opStart4;
-        U32 endSignal;
-
-        length4 = cSrcSize - (length1 + length2 + length3 + 6);
-        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
-        errorCode = BIT_initDStream(&bitD1, istart1, length1);
-        if (HUF_isError(errorCode)) return errorCode;
-        errorCode = BIT_initDStream(&bitD2, istart2, length2);
-        if (HUF_isError(errorCode)) return errorCode;
-        errorCode = BIT_initDStream(&bitD3, istart3, length3);
-        if (HUF_isError(errorCode)) return errorCode;
-        errorCode = BIT_initDStream(&bitD4, istart4, length4);
-        if (HUF_isError(errorCode)) return errorCode;
-
-        /* 16-32 symbols per loop (4-8 symbols per stream) */
-        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
-        for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )
-        {
-            HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
-            HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
-            HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
-            HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
-            HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
-            HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
-            HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
-            HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
-            HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
-            HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
-            HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
-            HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
-            HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
-            HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
-            HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
-            HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
-
-            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
-        }
-
-        /* check corruption */
-        if (op1 > opStart2) return ERROR(corruption_detected);
-        if (op2 > opStart3) return ERROR(corruption_detected);
-        if (op3 > opStart4) return ERROR(corruption_detected);
-        /* note : op4 supposed already verified within main loop */
-
-        /* finish bitStreams one by one */
-        HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
-        HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
-        HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
-        HUF_decodeStreamX4(op4, &bitD4, oend,     dt, dtLog);
-
-        /* check */
-        endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
-        if (!endSignal) return ERROR(corruption_detected);
-
-        /* decoded size */
-        return dstSize;
-    }
-}
-
-
-static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_MAX_TABLELOG);
-    const BYTE* ip = (const BYTE*) cSrc;
-
-    size_t hSize = HUF_readDTableX4 (DTable, cSrc, cSrcSize);
-    if (HUF_isError(hSize)) return hSize;
-    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
-    ip += hSize;
-    cSrcSize -= hSize;
-
-    return HUF_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
-}
-
-
-/**********************************/
-/* Generic decompression selector */
-/**********************************/
-
-typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
-static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
-{
-    /* single, double, quad */
-    {{0,0}, {1,1}, {2,2}},  /* Q==0 : impossible */
-    {{0,0}, {1,1}, {2,2}},  /* Q==1 : impossible */
-    {{  38,130}, {1313, 74}, {2151, 38}},   /* Q == 2 : 12-18% */
-    {{ 448,128}, {1353, 74}, {2238, 41}},   /* Q == 3 : 18-25% */
-    {{ 556,128}, {1353, 74}, {2238, 47}},   /* Q == 4 : 25-32% */
-    {{ 714,128}, {1418, 74}, {2436, 53}},   /* Q == 5 : 32-38% */
-    {{ 883,128}, {1437, 74}, {2464, 61}},   /* Q == 6 : 38-44% */
-    {{ 897,128}, {1515, 75}, {2622, 68}},   /* Q == 7 : 44-50% */
-    {{ 926,128}, {1613, 75}, {2730, 75}},   /* Q == 8 : 50-56% */
-    {{ 947,128}, {1729, 77}, {3359, 77}},   /* Q == 9 : 56-62% */
-    {{1107,128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */
-    {{1177,128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */
-    {{1242,128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */
-    {{1349,128}, {2644,106}, {5260,106}},   /* Q ==13 : 81-87% */
-    {{1455,128}, {2422,124}, {4174,124}},   /* Q ==14 : 87-93% */
-    {{ 722,128}, {1891,145}, {1936,146}},   /* Q ==15 : 93-99% */
-};
-
-typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
-
-static size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    static const decompressionAlgo decompress[3] = { HUF_decompress4X2, HUF_decompress4X4, NULL };
-    /* estimate decompression time */
-    U32 Q;
-    const U32 D256 = (U32)(dstSize >> 8);
-    U32 Dtime[3];
-    U32 algoNb = 0;
-    int n;
-
-    /* validation checks */
-    if (dstSize == 0) return ERROR(dstSize_tooSmall);
-    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */
-    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */
-    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
-
-    /* decoder timing evaluation */
-    Q = (U32)(cSrcSize * 16 / dstSize);   /* Q < 16 since dstSize > cSrcSize */
-    for (n=0; n<3; n++)
-        Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256);
-
-    Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */
-
-    if (Dtime[1] < Dtime[0]) algoNb = 1;
-
-    return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
-
-    //return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize);   /* multi-streams single-symbol decoding */
-    //return HUF_decompress4X4(dst, dstSize, cSrc, cSrcSize);   /* multi-streams double-symbols decoding */
-    //return HUF_decompress4X6(dst, dstSize, cSrc, cSrcSize);   /* multi-streams quad-symbols decoding */
-}
-/*
-    zstd - standard compression library
-    Copyright (C) 2014-2015, Yann Collet.
-
-    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are
-    met:
-    * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-    copyright notice, this list of conditions and the following disclaimer
-    in the documentation and/or other materials provided with the
-    distribution.
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - zstd source repository : https://github.com/Cyan4973/zstd
-    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
-*/
-
-/* ***************************************************************
-*  Tuning parameters
-*****************************************************************/
-/*!
-*  MEMORY_USAGE :
-*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
-*  Increasing memory usage improves compression ratio
-*  Reduced memory usage can improve speed, due to cache effect
-*/
-#define ZSTD_MEMORY_USAGE 17
-
-/*!
- * HEAPMODE :
- * Select how default compression functions will allocate memory for their hash table,
- * in memory stack (0, fastest), or in memory heap (1, requires malloc())
- * Note that compression context is fairly large, as a consequence heap memory is recommended.
- */
-#ifndef ZSTD_HEAPMODE
-#  define ZSTD_HEAPMODE 1
-#endif /* ZSTD_HEAPMODE */
-
-/*!
-*  LEGACY_SUPPORT :
-*  decompressor can decode older formats (starting from Zstd 0.1+)
-*/
-#ifndef ZSTD_LEGACY_SUPPORT
-#  define ZSTD_LEGACY_SUPPORT 1
-#endif
-
-
-/* *******************************************************
-*  Includes
-*********************************************************/
-#include <stdlib.h>      /* calloc */
-#include <string.h>      /* memcpy, memmove */
-#include <stdio.h>       /* debug : printf */
-
-
-/* *******************************************************
-*  Compiler specifics
-*********************************************************/
-#ifdef __AVX2__
-#  include <immintrin.h>   /* AVX2 intrinsics */
-#endif
-
-#ifdef _MSC_VER    /* Visual Studio */
-#  include <intrin.h>                    /* For Visual 2005 */
-#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
-#  pragma warning(disable : 4324)        /* disable: C4324: padded structure */
-#else
-#  define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
-#endif
-
-
-/* *******************************************************
-*  Constants
-*********************************************************/
-#define HASH_LOG (ZSTD_MEMORY_USAGE - 2)
-#define HASH_TABLESIZE (1 << HASH_LOG)
-#define HASH_MASK (HASH_TABLESIZE - 1)
-
-#define KNUTH 2654435761
-
-#define BIT7 128
-#define BIT6  64
-#define BIT5  32
-#define BIT4  16
-#define BIT1   2
-#define BIT0   1
-
-#define KB *(1 <<10)
-#define MB *(1 <<20)
-#define GB *(1U<<30)
-
-#define BLOCKSIZE (128 KB)                 /* define, for static allocation */
-#define MIN_SEQUENCES_SIZE (2 /*seqNb*/ + 2 /*dumps*/ + 3 /*seqTables*/ + 1 /*bitStream*/)
-#define MIN_CBLOCK_SIZE (3 /*litCSize*/ + MIN_SEQUENCES_SIZE)
-#define IS_RAW BIT0
-#define IS_RLE BIT1
-
-#define WORKPLACESIZE (BLOCKSIZE*3)
-#define MINMATCH 4
-#define MLbits   7
-#define LLbits   6
-#define Offbits  5
-#define MaxML  ((1<<MLbits )-1)
-#define MaxLL  ((1<<LLbits )-1)
-#define MaxOff   31
-#define LitFSELog  11
-#define MLFSELog   10
-#define LLFSELog   10
-#define OffFSELog   9
-#define MAX(a,b) ((a)<(b)?(b):(a))
-#define MaxSeq MAX(MaxLL, MaxML)
-
-#define LITERAL_NOENTROPY 63
-#define COMMAND_NOENTROPY 7   /* to remove */
-
-#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
-
-static const size_t ZSTD_blockHeaderSize = 3;
-static const size_t ZSTD_frameHeaderSize = 4;
-
-
-/* *******************************************************
-*  Memory operations
-**********************************************************/
-static void   ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
-
-static void   ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
-
-#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
-
-/*! ZSTD_wildcopy : custom version of memcpy(), can copy up to 7-8 bytes too many */
-static void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
-{
-    const BYTE* ip = (const BYTE*)src;
-    BYTE* op = (BYTE*)dst;
-    BYTE* const oend = op + length;
-    do COPY8(op, ip) while (op < oend);
-}
-
-
-/* **************************************
-*  Local structures
-****************************************/
-typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
-
-typedef struct
-{
-    blockType_t blockType;
-    U32 origSize;
-} blockProperties_t;
-
-typedef struct {
-    void* buffer;
-    U32*  offsetStart;
-    U32*  offset;
-    BYTE* offCodeStart;
-    BYTE* offCode;
-    BYTE* litStart;
-    BYTE* lit;
-    BYTE* litLengthStart;
-    BYTE* litLength;
-    BYTE* matchLengthStart;
-    BYTE* matchLength;
-    BYTE* dumpsStart;
-    BYTE* dumps;
-} seqStore_t;
-
-
-/* *************************************
-*  Error Management
-***************************************/
-/*! ZSTD_isError
-*   tells if a return value is an error code */
-static unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
-
-
-
-/* *************************************************************
-*   Decompression section
-***************************************************************/
-struct ZSTD_DCtx_s
-{
-    U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
-    U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
-    U32 MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
-    void* previousDstEnd;
-    void* base;
-    size_t expected;
-    blockType_t bType;
-    U32 phase;
-    const BYTE* litPtr;
-    size_t litSize;
-    BYTE litBuffer[BLOCKSIZE + 8 /* margin for wildcopy */];
-};   /* typedef'd to ZSTD_Dctx within "zstd_static.h" */
-
-
-static size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
-{
-    const BYTE* const in = (const BYTE* const)src;
-    BYTE headerFlags;
-    U32 cSize;
-
-    if (srcSize < 3) return ERROR(srcSize_wrong);
-
-    headerFlags = *in;
-    cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
-
-    bpPtr->blockType = (blockType_t)(headerFlags >> 6);
-    bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
-
-    if (bpPtr->blockType == bt_end) return 0;
-    if (bpPtr->blockType == bt_rle) return 1;
-    return cSize;
-}
-
-static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
-    if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
-    memcpy(dst, src, srcSize);
-    return srcSize;
-}
-
-
-/** ZSTD_decompressLiterals
-    @return : nb of bytes read from src, or an error code*/
-static size_t ZSTD_decompressLiterals(void* dst, size_t* maxDstSizePtr,
-                                const void* src, size_t srcSize)
-{
-    const BYTE* ip = (const BYTE*)src;
-
-    const size_t litSize = (MEM_readLE32(src) & 0x1FFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
-    const size_t litCSize = (MEM_readLE32(ip+2) & 0xFFFFFF) >> 5;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
-
-    if (litSize > *maxDstSizePtr) return ERROR(corruption_detected);
-    if (litCSize + 5 > srcSize) return ERROR(corruption_detected);
-
-    if (HUF_isError(HUF_decompress(dst, litSize, ip+5, litCSize))) return ERROR(corruption_detected);
-
-    *maxDstSizePtr = litSize;
-    return litCSize + 5;
-}
-
-
-/** ZSTD_decodeLiteralsBlock
-    @return : nb of bytes read from src (< srcSize )*/
-static size_t ZSTD_decodeLiteralsBlock(void* ctx,
-                          const void* src, size_t srcSize)
-{
-    ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx;
-    const BYTE* const istart = (const BYTE* const)src;
-
-    /* any compressed block with literals segment must be at least this size */
-    if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
-
-    switch(*istart & 3)
-    {
-    default:
-    case 0:
-        {
-            size_t litSize = BLOCKSIZE;
-            const size_t readSize = ZSTD_decompressLiterals(dctx->litBuffer, &litSize, src, srcSize);
-            dctx->litPtr = dctx->litBuffer;
-            dctx->litSize = litSize;
-            memset(dctx->litBuffer + dctx->litSize, 0, 8);
-            return readSize;   /* works if it's an error too */
-        }
-    case IS_RAW:
-        {
-            const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
-            if (litSize > srcSize-11)   /* risk of reading too far with wildcopy */
-            {
-                if (litSize > srcSize-3) return ERROR(corruption_detected);
-                memcpy(dctx->litBuffer, istart, litSize);
-                dctx->litPtr = dctx->litBuffer;
-                dctx->litSize = litSize;
-                memset(dctx->litBuffer + dctx->litSize, 0, 8);
-                return litSize+3;
-            }
-            /* direct reference into compressed stream */
-            dctx->litPtr = istart+3;
-            dctx->litSize = litSize;
-            return litSize+3;
-        }
-    case IS_RLE:
-        {
-            const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
-            if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
-            memset(dctx->litBuffer, istart[3], litSize + 8);
-            dctx->litPtr = dctx->litBuffer;
-            dctx->litSize = litSize;
-            return 4;
-        }
-    }
-}
-
-
-static size_t ZSTD_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,
-                         FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb,
-                         const void* src, size_t srcSize)
-{
-    const BYTE* const istart = (const BYTE* const)src;
-    const BYTE* ip = istart;
-    const BYTE* const iend = istart + srcSize;
-    U32 LLtype, Offtype, MLtype;
-    U32 LLlog, Offlog, MLlog;
-    size_t dumpsLength;
-
-    /* check */
-    if (srcSize < 5) return ERROR(srcSize_wrong);
-
-    /* SeqHead */
-    *nbSeq = MEM_readLE16(ip); ip+=2;
-    LLtype  = *ip >> 6;
-    Offtype = (*ip >> 4) & 3;
-    MLtype  = (*ip >> 2) & 3;
-    if (*ip & 2)
-    {
-        dumpsLength  = ip[2];
-        dumpsLength += ip[1] << 8;
-        ip += 3;
-    }
-    else
-    {
-        dumpsLength  = ip[1];
-        dumpsLength += (ip[0] & 1) << 8;
-        ip += 2;
-    }
-    *dumpsPtr = ip;
-    ip += dumpsLength;
-    *dumpsLengthPtr = dumpsLength;
-
-    /* check */
-    if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
-
-    /* sequences */
-    {
-        S16 norm[MaxML+1];    /* assumption : MaxML >= MaxLL and MaxOff */
-        size_t headerSize;
-
-        /* Build DTables */
-        switch(LLtype)
-        {
-        case bt_rle :
-            LLlog = 0;
-            FSE_buildDTable_rle(DTableLL, *ip++); break;
-        case bt_raw :
-            LLlog = LLbits;
-            FSE_buildDTable_raw(DTableLL, LLbits); break;
-        default :
-            {   U32 max = MaxLL;
-                headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip);
-                if (FSE_isError(headerSize)) return ERROR(GENERIC);
-                if (LLlog > LLFSELog) return ERROR(corruption_detected);
-                ip += headerSize;
-                FSE_buildDTable(DTableLL, norm, max, LLlog);
-        }   }
-
-        switch(Offtype)
-        {
-        case bt_rle :
-            Offlog = 0;
-            if (ip > iend-2) return ERROR(srcSize_wrong);   /* min : "raw", hence no header, but at least xxLog bits */
-            FSE_buildDTable_rle(DTableOffb, *ip++ & MaxOff); /* if *ip > MaxOff, data is corrupted */
-            break;
-        case bt_raw :
-            Offlog = Offbits;
-            FSE_buildDTable_raw(DTableOffb, Offbits); break;
-        default :
-            {   U32 max = MaxOff;
-                headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip);
-                if (FSE_isError(headerSize)) return ERROR(GENERIC);
-                if (Offlog > OffFSELog) return ERROR(corruption_detected);
-                ip += headerSize;
-                FSE_buildDTable(DTableOffb, norm, max, Offlog);
-        }   }
-
-        switch(MLtype)
-        {
-        case bt_rle :
-            MLlog = 0;
-            if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
-            FSE_buildDTable_rle(DTableML, *ip++); break;
-        case bt_raw :
-            MLlog = MLbits;
-            FSE_buildDTable_raw(DTableML, MLbits); break;
-        default :
-            {   U32 max = MaxML;
-                headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip);
-                if (FSE_isError(headerSize)) return ERROR(GENERIC);
-                if (MLlog > MLFSELog) return ERROR(corruption_detected);
-                ip += headerSize;
-                FSE_buildDTable(DTableML, norm, max, MLlog);
-    }   }   }
-
-    return ip-istart;
-}
-
-
-typedef struct {
-    size_t litLength;
-    size_t offset;
-    size_t matchLength;
-} seq_t;
-
-typedef struct {
-    BIT_DStream_t DStream;
-    FSE_DState_t stateLL;
-    FSE_DState_t stateOffb;
-    FSE_DState_t stateML;
-    size_t prevOffset;
-    const BYTE* dumps;
-    const BYTE* dumpsEnd;
-} seqState_t;
-
-
-static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState)
-{
-    size_t litLength;
-    size_t prevOffset;
-    size_t offset;
-    size_t matchLength;
-    const BYTE* dumps = seqState->dumps;
-    const BYTE* const de = seqState->dumpsEnd;
-
-    /* Literal length */
-    litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));
-    prevOffset = litLength ? seq->offset : seqState->prevOffset;
-    seqState->prevOffset = seq->offset;
-    if (litLength == MaxLL)
-    {
-        const U32 add = dumps<de ? *dumps++ : 0;
-        if (add < 255) litLength += add;
-        else if (dumps + 3 <= de)
-        {
-            litLength = MEM_readLE24(dumps);
-            dumps += 3;
-        }
-        if (dumps >= de) dumps = de-1;   /* late correction, to avoid read overflow (data is now corrupted anyway) */
-    }
-
-    /* Offset */
-    {
-        static const size_t offsetPrefix[MaxOff+1] = {  /* note : size_t faster than U32 */
-                1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256,
-                512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144,
-                524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 };
-        U32 offsetCode, nbBits;
-        offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream));   /* <= maxOff, by table construction */
-        if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
-        nbBits = offsetCode - 1;
-        if (offsetCode==0) nbBits = 0;   /* cmove */
-        offset = offsetPrefix[offsetCode] + BIT_readBits(&(seqState->DStream), nbBits);
-        if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
-        if (offsetCode==0) offset = prevOffset;   /* cmove */
-    }
-
-    /* MatchLength */
-    matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
-    if (matchLength == MaxML)
-    {
-        const U32 add = dumps<de ? *dumps++ : 0;
-        if (add < 255) matchLength += add;
-        else if (dumps + 3 <= de)
-        {
-            matchLength = MEM_readLE24(dumps);
-            dumps += 3;
-        }
-        if (dumps >= de) dumps = de-1;   /* late correction, to avoid read overflow (data is now corrupted anyway) */
-    }
-    matchLength += MINMATCH;
-
-    /* save result */
-    seq->litLength = litLength;
-    seq->offset = offset;
-    seq->matchLength = matchLength;
-    seqState->dumps = dumps;
-}
-
-
-static size_t ZSTD_execSequence(BYTE* op,
-                                seq_t sequence,
-                                const BYTE** litPtr, const BYTE* const litLimit,
-                                BYTE* const base, BYTE* const oend)
-{
-    static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};   /* added */
-    static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11};   /* subtracted */
-    const BYTE* const ostart = op;
-    BYTE* const oLitEnd = op + sequence.litLength;
-    BYTE* const oMatchEnd = op + sequence.litLength + sequence.matchLength;   /* risk : address space overflow (32-bits) */
-    BYTE* const oend_8 = oend-8;
-    const BYTE* const litEnd = *litPtr + sequence.litLength;
-
-    /* checks */
-    if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall);   /* last match must start at a minimum distance of 8 from oend */
-    if (oMatchEnd > oend) return ERROR(dstSize_tooSmall);   /* overwrite beyond dst buffer */
-    if (litEnd > litLimit) return ERROR(corruption_detected);   /* overRead beyond lit buffer */
-
-    /* copy Literals */
-    ZSTD_wildcopy(op, *litPtr, sequence.litLength);   /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
-    op = oLitEnd;
-    *litPtr = litEnd;   /* update for next sequence */
-
-    /* copy Match */
-    {
-        const BYTE* match = op - sequence.offset;
-
-        /* check */
-        if (sequence.offset > (size_t)op) return ERROR(corruption_detected);   /* address space overflow test (this test seems kept by clang optimizer) */
-        //if (match > op) return ERROR(corruption_detected);   /* address space overflow test (is clang optimizer removing this test ?) */
-        if (match < base) return ERROR(corruption_detected);
-
-        /* close range match, overlap */
-        if (sequence.offset < 8)
-        {
-            const int dec64 = dec64table[sequence.offset];
-            op[0] = match[0];
-            op[1] = match[1];
-            op[2] = match[2];
-            op[3] = match[3];
-            match += dec32table[sequence.offset];
-            ZSTD_copy4(op+4, match);
-            match -= dec64;
-        }
-        else
-        {
-            ZSTD_copy8(op, match);
-        }
-        op += 8; match += 8;
-
-        if (oMatchEnd > oend-(16-MINMATCH))
-        {
-            if (op < oend_8)
-            {
-                ZSTD_wildcopy(op, match, oend_8 - op);
-                match += oend_8 - op;
-                op = oend_8;
-            }
-            while (op < oMatchEnd) *op++ = *match++;
-        }
-        else
-        {
-            ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8 */
-        }
-    }
-
-    return oMatchEnd - ostart;
-}
-
-static size_t ZSTD_decompressSequences(
-                               void* ctx,
-                               void* dst, size_t maxDstSize,
-                         const void* seqStart, size_t seqSize)
-{
-    ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx;
-    const BYTE* ip = (const BYTE*)seqStart;
-    const BYTE* const iend = ip + seqSize;
-    BYTE* const ostart = (BYTE* const)dst;
-    BYTE* op = ostart;
-    BYTE* const oend = ostart + maxDstSize;
-    size_t errorCode, dumpsLength;
-    const BYTE* litPtr = dctx->litPtr;
-    const BYTE* const litEnd = litPtr + dctx->litSize;
-    int nbSeq;
-    const BYTE* dumps;
-    U32* DTableLL = dctx->LLTable;
-    U32* DTableML = dctx->MLTable;
-    U32* DTableOffb = dctx->OffTable;
-    BYTE* const base = (BYTE*) (dctx->base);
-
-    /* Build Decoding Tables */
-    errorCode = ZSTD_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,
-                                      DTableLL, DTableML, DTableOffb,
-                                      ip, iend-ip);
-    if (ZSTD_isError(errorCode)) return errorCode;
-    ip += errorCode;
-
-    /* Regen sequences */
-    {
-        seq_t sequence;
-        seqState_t seqState;
-
-        memset(&sequence, 0, sizeof(sequence));
-        seqState.dumps = dumps;
-        seqState.dumpsEnd = dumps + dumpsLength;
-        seqState.prevOffset = sequence.offset = 4;
-        errorCode = BIT_initDStream(&(seqState.DStream), ip, iend-ip);
-        if (ERR_isError(errorCode)) return ERROR(corruption_detected);
-        FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
-        FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
-        FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
-
-        for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (nbSeq>0) ; )
-        {
-            size_t oneSeqSize;
-            nbSeq--;
-            ZSTD_decodeSequence(&sequence, &seqState);
-            oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litEnd, base, oend);
-            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
-            op += oneSeqSize;
-        }
-
-        /* check if reached exact end */
-        if ( !BIT_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected);   /* requested too much : data is corrupted */
-        if (nbSeq<0) return ERROR(corruption_detected);   /* requested too many sequences : data is corrupted */
-
-        /* last literal segment */
-        {
-            size_t lastLLSize = litEnd - litPtr;
-            if (litPtr > litEnd) return ERROR(corruption_detected);
-            if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
-            if (op != litPtr) memmove(op, litPtr, lastLLSize);
-            op += lastLLSize;
-        }
-    }
-
-    return op-ostart;
-}
-
-
-static size_t ZSTD_decompressBlock(
-                            void* ctx,
-                            void* dst, size_t maxDstSize,
-                      const void* src, size_t srcSize)
-{
-    /* blockType == blockCompressed */
-    const BYTE* ip = (const BYTE*)src;
-
-    /* Decode literals sub-block */
-    size_t litCSize = ZSTD_decodeLiteralsBlock(ctx, src, srcSize);
-    if (ZSTD_isError(litCSize)) return litCSize;
-    ip += litCSize;
-    srcSize -= litCSize;
-
-    return ZSTD_decompressSequences(ctx, dst, maxDstSize, ip, srcSize);
-}
-
-
-static size_t ZSTD_decompressDCtx(void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
-    const BYTE* ip = (const BYTE*)src;
-    const BYTE* iend = ip + srcSize;
-    BYTE* const ostart = (BYTE* const)dst;
-    BYTE* op = ostart;
-    BYTE* const oend = ostart + maxDstSize;
-    size_t remainingSize = srcSize;
-    U32 magicNumber;
-    blockProperties_t blockProperties;
-
-    /* Frame Header */
-    if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
-    magicNumber = MEM_readLE32(src);
-    if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
-    ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
-
-    /* Loop on each block */
-    while (1)
-    {
-        size_t decodedSize=0;
-        size_t cBlockSize = ZSTD_getcBlockSize(ip, iend-ip, &blockProperties);
-        if (ZSTD_isError(cBlockSize)) return cBlockSize;
-
-        ip += ZSTD_blockHeaderSize;
-        remainingSize -= ZSTD_blockHeaderSize;
-        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
-
-        switch(blockProperties.blockType)
-        {
-        case bt_compressed:
-            decodedSize = ZSTD_decompressBlock(ctx, op, oend-op, ip, cBlockSize);
-            break;
-        case bt_raw :
-            decodedSize = ZSTD_copyUncompressedBlock(op, oend-op, ip, cBlockSize);
-            break;
-        case bt_rle :
-            return ERROR(GENERIC);   /* not yet supported */
-            break;
-        case bt_end :
-            /* end of frame */
-            if (remainingSize) return ERROR(srcSize_wrong);
-            break;
-        default:
-            return ERROR(GENERIC);   /* impossible */
-        }
-        if (cBlockSize == 0) break;   /* bt_end */
-
-        if (ZSTD_isError(decodedSize)) return decodedSize;
-        op += decodedSize;
-        ip += cBlockSize;
-        remainingSize -= cBlockSize;
-    }
-
-    return op-ostart;
-}
-
-static size_t ZSTD_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
-    ZSTD_DCtx ctx;
-    ctx.base = dst;
-    return ZSTD_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize);
-}
-
-/* ZSTD_errorFrameSizeInfoLegacy() :
-   assumes `cSize` and `dBound` are _not_ NULL */
-MEM_STATIC void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
-{
-    *cSize = ret;
-    *dBound = ZSTD_CONTENTSIZE_ERROR;
-}
-
-void ZSTDv03_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
-{
-    const BYTE* ip = (const BYTE*)src;
-    size_t remainingSize = srcSize;
-    size_t nbBlocks = 0;
-    U32 magicNumber;
-    blockProperties_t blockProperties;
-
-    /* Frame Header */
-    if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) {
-        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
-        return;
-    }
-    magicNumber = MEM_readLE32(src);
-    if (magicNumber != ZSTD_magicNumber) {
-        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
-        return;
-    }
-    ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
-
-    /* Loop on each block */
-    while (1)
-    {
-        size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
-        if (ZSTD_isError(cBlockSize)) {
-            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
-            return;
-        }
-
-        ip += ZSTD_blockHeaderSize;
-        remainingSize -= ZSTD_blockHeaderSize;
-        if (cBlockSize > remainingSize) {
-            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
-            return;
-        }
-
-        if (cBlockSize == 0) break;   /* bt_end */
-
-        ip += cBlockSize;
-        remainingSize -= cBlockSize;
-        nbBlocks++;
-    }
-
-    *cSize = ip - (const BYTE*)src;
-    *dBound = nbBlocks * BLOCKSIZE;
-}
-
-
-/*******************************
-*  Streaming Decompression API
-*******************************/
-
-static size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx)
-{
-    dctx->expected = ZSTD_frameHeaderSize;
-    dctx->phase = 0;
-    dctx->previousDstEnd = NULL;
-    dctx->base = NULL;
-    return 0;
-}
-
-static ZSTD_DCtx* ZSTD_createDCtx(void)
-{
-    ZSTD_DCtx* dctx = (ZSTD_DCtx*)malloc(sizeof(ZSTD_DCtx));
-    if (dctx==NULL) return NULL;
-    ZSTD_resetDCtx(dctx);
-    return dctx;
-}
-
-static size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
-{
-    free(dctx);
-    return 0;
-}
-
-static size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx)
-{
-    return dctx->expected;
-}
-
-static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
-    /* Sanity check */
-    if (srcSize != ctx->expected) return ERROR(srcSize_wrong);
-    if (dst != ctx->previousDstEnd)  /* not contiguous */
-        ctx->base = dst;
-
-    /* Decompress : frame header */
-    if (ctx->phase == 0)
-    {
-        /* Check frame magic header */
-        U32 magicNumber = MEM_readLE32(src);
-        if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
-        ctx->phase = 1;
-        ctx->expected = ZSTD_blockHeaderSize;
-        return 0;
-    }
-
-    /* Decompress : block header */
-    if (ctx->phase == 1)
-    {
-        blockProperties_t bp;
-        size_t blockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
-        if (ZSTD_isError(blockSize)) return blockSize;
-        if (bp.blockType == bt_end)
-        {
-            ctx->expected = 0;
-            ctx->phase = 0;
-        }
-        else
-        {
-            ctx->expected = blockSize;
-            ctx->bType = bp.blockType;
-            ctx->phase = 2;
-        }
-
-        return 0;
-    }
-
-    /* Decompress : block content */
-    {
-        size_t rSize;
-        switch(ctx->bType)
-        {
-        case bt_compressed:
-            rSize = ZSTD_decompressBlock(ctx, dst, maxDstSize, src, srcSize);
-            break;
-        case bt_raw :
-            rSize = ZSTD_copyUncompressedBlock(dst, maxDstSize, src, srcSize);
-            break;
-        case bt_rle :
-            return ERROR(GENERIC);   /* not yet handled */
-            break;
-        case bt_end :   /* should never happen (filtered at phase 1) */
-            rSize = 0;
-            break;
-        default:
-            return ERROR(GENERIC);
-        }
-        ctx->phase = 1;
-        ctx->expected = ZSTD_blockHeaderSize;
-        ctx->previousDstEnd = (void*)( ((char*)dst) + rSize);
-        return rSize;
-    }
-
-}
-
-
-/* wrapper layer */
-
-unsigned ZSTDv03_isError(size_t code)
-{
-    return ZSTD_isError(code);
-}
-
-size_t ZSTDv03_decompress( void* dst, size_t maxOriginalSize,
-                     const void* src, size_t compressedSize)
-{
-    return ZSTD_decompress(dst, maxOriginalSize, src, compressedSize);
-}
-
-ZSTDv03_Dctx* ZSTDv03_createDCtx(void)
-{
-    return (ZSTDv03_Dctx*)ZSTD_createDCtx();
-}
-
-size_t ZSTDv03_freeDCtx(ZSTDv03_Dctx* dctx)
-{
-    return ZSTD_freeDCtx((ZSTD_DCtx*)dctx);
-}
-
-size_t ZSTDv03_resetDCtx(ZSTDv03_Dctx* dctx)
-{
-    return ZSTD_resetDCtx((ZSTD_DCtx*)dctx);
-}
-
-size_t ZSTDv03_nextSrcSizeToDecompress(ZSTDv03_Dctx* dctx)
-{
-    return ZSTD_nextSrcSizeToDecompress((ZSTD_DCtx*)dctx);
-}
-
-size_t ZSTDv03_decompressContinue(ZSTDv03_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
-    return ZSTD_decompressContinue((ZSTD_DCtx*)dctx, dst, maxDstSize, src, srcSize);
-}
diff --git a/vendor/github.com/DataDog/zstd/zstd_v03.h b/vendor/github.com/DataDog/zstd/zstd_v03.h
deleted file mode 100644
index efd8c2b..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_v03.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef ZSTD_V03_H_298734209782
-#define ZSTD_V03_H_298734209782
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/* *************************************
-*  Includes
-***************************************/
-#include <stddef.h>   /* size_t */
-
-
-/* *************************************
-*  Simple one-step function
-***************************************/
-/**
-ZSTDv03_decompress() : decompress ZSTD frames compliant with v0.3.x format
-    compressedSize : is the exact source size
-    maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated.
-                      It must be equal or larger than originalSize, otherwise decompression will fail.
-    return : the number of bytes decompressed into destination buffer (originalSize)
-             or an errorCode if it fails (which can be tested using ZSTDv01_isError())
-*/
-size_t ZSTDv03_decompress( void* dst, size_t maxOriginalSize,
-                     const void* src, size_t compressedSize);
-
- /**
- ZSTDv03_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.3.x format
-     srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
-     cSize (output parameter)  : the number of bytes that would be read to decompress this frame
-                                 or an error code if it fails (which can be tested using ZSTDv01_isError())
-     dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
-                                 or ZSTD_CONTENTSIZE_ERROR if an error occurs
-
-    note : assumes `cSize` and `dBound` are _not_ NULL.
- */
- void ZSTDv03_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
-                                      size_t* cSize, unsigned long long* dBound);
-
-    /**
-ZSTDv03_isError() : tells if the result of ZSTDv03_decompress() is an error
-*/
-unsigned ZSTDv03_isError(size_t code);
-
-
-/* *************************************
-*  Advanced functions
-***************************************/
-typedef struct ZSTDv03_Dctx_s ZSTDv03_Dctx;
-ZSTDv03_Dctx* ZSTDv03_createDCtx(void);
-size_t ZSTDv03_freeDCtx(ZSTDv03_Dctx* dctx);
-
-size_t ZSTDv03_decompressDCtx(void* ctx,
-                              void* dst, size_t maxOriginalSize,
-                        const void* src, size_t compressedSize);
-
-/* *************************************
-*  Streaming functions
-***************************************/
-size_t ZSTDv03_resetDCtx(ZSTDv03_Dctx* dctx);
-
-size_t ZSTDv03_nextSrcSizeToDecompress(ZSTDv03_Dctx* dctx);
-size_t ZSTDv03_decompressContinue(ZSTDv03_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
-/**
-  Use above functions alternatively.
-  ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
-  ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
-  Result is the number of bytes regenerated within 'dst'.
-  It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
-*/
-
-/* *************************************
-*  Prefix - version detection
-***************************************/
-#define ZSTDv03_magicNumber 0xFD2FB523   /* v0.3 */
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* ZSTD_V03_H_298734209782 */
diff --git a/vendor/github.com/DataDog/zstd/zstd_v04.c b/vendor/github.com/DataDog/zstd/zstd_v04.c
deleted file mode 100644
index 645a6e3..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_v04.c
+++ /dev/null
@@ -1,3637 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-
- /******************************************
- *  Includes
- ******************************************/
-#include <stddef.h>    /* size_t, ptrdiff_t */
-#include <string.h>    /* memcpy */
-
-#include "zstd_v04.h"
-#include "error_private.h"
-
-
-/* ******************************************************************
- *   mem.h
- *******************************************************************/
-#ifndef MEM_H_MODULE
-#define MEM_H_MODULE
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-/******************************************
-*  Compiler-specific
-******************************************/
-#if defined(_MSC_VER)   /* Visual Studio */
-#   include <stdlib.h>  /* _byteswap_ulong */
-#   include <intrin.h>  /* _byteswap_* */
-#endif
-#if defined(__GNUC__)
-#  define MEM_STATIC static __attribute__((unused))
-#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-#  define MEM_STATIC static inline
-#elif defined(_MSC_VER)
-#  define MEM_STATIC static __inline
-#else
-#  define MEM_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */
-#endif
-
-
-/****************************************************************
-*  Basic Types
-*****************************************************************/
-#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-# include <stdint.h>
-  typedef  uint8_t BYTE;
-  typedef uint16_t U16;
-  typedef  int16_t S16;
-  typedef uint32_t U32;
-  typedef  int32_t S32;
-  typedef uint64_t U64;
-  typedef  int64_t S64;
-#else
-  typedef unsigned char       BYTE;
-  typedef unsigned short      U16;
-  typedef   signed short      S16;
-  typedef unsigned int        U32;
-  typedef   signed int        S32;
-  typedef unsigned long long  U64;
-  typedef   signed long long  S64;
-#endif
-
-
-/*-*************************************
-*  Debug
-***************************************/
-#include "debug.h"
-#ifndef assert
-#  define assert(condition) ((void)0)
-#endif
-
-
-/****************************************************************
-*  Memory I/O
-*****************************************************************/
-/* MEM_FORCE_MEMORY_ACCESS
- * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
- * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
- * The below switch allow to select different access method for improved performance.
- * Method 0 (default) : use `memcpy()`. Safe and portable.
- * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
- *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
- * Method 2 : direct access. This method is portable but violate C standard.
- *            It can generate buggy code on targets generating assembly depending on alignment.
- *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
- * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
- * Prefer these methods in priority order (0 > 1 > 2)
- */
-#ifndef MEM_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
-#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
-#    define MEM_FORCE_MEMORY_ACCESS 2
-#  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
-  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
-#    define MEM_FORCE_MEMORY_ACCESS 1
-#  endif
-#endif
-
-MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; }
-MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; }
-
-MEM_STATIC unsigned MEM_isLittleEndian(void)
-{
-    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */
-    return one.c[0];
-}
-
-#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
-
-/* violates C standard on structure alignment.
-Only use if no other choice to achieve best performance on target platform */
-MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
-MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
-MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
-
-MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
-
-#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
-
-/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
-/* currently only defined for gcc and icc */
-typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign;
-
-MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
-MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
-MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
-
-MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
-
-#else
-
-/* default method, safe and standard.
-   can sometimes prove slower */
-
-MEM_STATIC U16 MEM_read16(const void* memPtr)
-{
-    U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-MEM_STATIC U32 MEM_read32(const void* memPtr)
-{
-    U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-MEM_STATIC U64 MEM_read64(const void* memPtr)
-{
-    U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-MEM_STATIC void MEM_write16(void* memPtr, U16 value)
-{
-    memcpy(memPtr, &value, sizeof(value));
-}
-
-#endif // MEM_FORCE_MEMORY_ACCESS
-
-
-MEM_STATIC U16 MEM_readLE16(const void* memPtr)
-{
-    if (MEM_isLittleEndian())
-        return MEM_read16(memPtr);
-    else
-    {
-        const BYTE* p = (const BYTE*)memPtr;
-        return (U16)(p[0] + (p[1]<<8));
-    }
-}
-
-MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
-{
-    if (MEM_isLittleEndian())
-    {
-        MEM_write16(memPtr, val);
-    }
-    else
-    {
-        BYTE* p = (BYTE*)memPtr;
-        p[0] = (BYTE)val;
-        p[1] = (BYTE)(val>>8);
-    }
-}
-
-MEM_STATIC U32 MEM_readLE24(const void* memPtr)
-{
-    return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
-}
-
-MEM_STATIC U32 MEM_readLE32(const void* memPtr)
-{
-    if (MEM_isLittleEndian())
-        return MEM_read32(memPtr);
-    else
-    {
-        const BYTE* p = (const BYTE*)memPtr;
-        return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));
-    }
-}
-
-
-MEM_STATIC U64 MEM_readLE64(const void* memPtr)
-{
-    if (MEM_isLittleEndian())
-        return MEM_read64(memPtr);
-    else
-    {
-        const BYTE* p = (const BYTE*)memPtr;
-        return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24)
-                     + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56));
-    }
-}
-
-
-MEM_STATIC size_t MEM_readLEST(const void* memPtr)
-{
-    if (MEM_32bits())
-        return (size_t)MEM_readLE32(memPtr);
-    else
-        return (size_t)MEM_readLE64(memPtr);
-}
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* MEM_H_MODULE */
-
-/*
-    zstd - standard compression library
-    Header File for static linking only
-*/
-#ifndef ZSTD_STATIC_H
-#define ZSTD_STATIC_H
-
-
-/* *************************************
-*  Types
-***************************************/
-#define ZSTD_WINDOWLOG_ABSOLUTEMIN 11
-
-/** from faster to stronger */
-typedef enum { ZSTD_fast, ZSTD_greedy, ZSTD_lazy, ZSTD_lazy2, ZSTD_btlazy2 } ZSTD_strategy;
-
-typedef struct
-{
-    U64 srcSize;       /* optional : tells how much bytes are present in the frame. Use 0 if not known. */
-    U32 windowLog;     /* largest match distance : larger == more compression, more memory needed during decompression */
-    U32 contentLog;    /* full search segment : larger == more compression, slower, more memory (useless for fast) */
-    U32 hashLog;       /* dispatch table : larger == more memory, faster */
-    U32 searchLog;     /* nb of searches : larger == more compression, slower */
-    U32 searchLength;  /* size of matches : larger == faster decompression, sometimes less compression */
-    ZSTD_strategy strategy;
-} ZSTD_parameters;
-
-typedef ZSTDv04_Dctx ZSTD_DCtx;
-
-/* *************************************
-*  Advanced functions
-***************************************/
-/** ZSTD_decompress_usingDict
-*   Same as ZSTD_decompressDCtx, using a Dictionary content as prefix
-*   Note : dict can be NULL, in which case, it's equivalent to ZSTD_decompressDCtx() */
-static size_t ZSTD_decompress_usingDict(ZSTD_DCtx* ctx,
-                                             void* dst, size_t maxDstSize,
-                                       const void* src, size_t srcSize,
-                                       const void* dict,size_t dictSize);
-
-
-/* **************************************
-*  Streaming functions (direct mode)
-****************************************/
-static size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx);
-static size_t ZSTD_getFrameParams(ZSTD_parameters* params, const void* src, size_t srcSize);
-static void   ZSTD_decompress_insertDictionary(ZSTD_DCtx* ctx, const void* src, size_t srcSize);
-
-static size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
-static size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
-
-/**
-  Streaming decompression, bufferless mode
-
-  A ZSTD_DCtx object is required to track streaming operations.
-  Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
-  A ZSTD_DCtx object can be re-used multiple times. Use ZSTD_resetDCtx() to return to fresh status.
-
-  First operation is to retrieve frame parameters, using ZSTD_getFrameParams().
-  This function doesn't consume its input. It needs enough input data to properly decode the frame header.
-  Objective is to retrieve *params.windowlog, to know minimum amount of memory required during decoding.
-  Result : 0 when successful, it means the ZSTD_parameters structure has been filled.
-           >0 : means there is not enough data into src. Provides the expected size to successfully decode header.
-           errorCode, which can be tested using ZSTD_isError() (For example, if it's not a ZSTD header)
-
-  Then, you can optionally insert a dictionary.
-  This operation must mimic the compressor behavior, otherwise decompression will fail or be corrupted.
-
-  Then it's possible to start decompression.
-  Use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.
-  ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
-  ZSTD_decompressContinue() requires this exact amount of bytes, or it will fail.
-  ZSTD_decompressContinue() needs previous data blocks during decompression, up to (1 << windowlog).
-  They should preferably be located contiguously, prior to current block. Alternatively, a round buffer is also possible.
-
-  @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst'.
-  It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
-
-  A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
-  Context can then be reset to start a new decompression.
-*/
-
-
-
-
-#endif  /* ZSTD_STATIC_H */
-
-
-/*
-    zstd_internal - common functions to include
-    Header File for include
-*/
-#ifndef ZSTD_CCOMMON_H_MODULE
-#define ZSTD_CCOMMON_H_MODULE
-
-/* *************************************
-*  Common macros
-***************************************/
-#define MIN(a,b) ((a)<(b) ? (a) : (b))
-#define MAX(a,b) ((a)>(b) ? (a) : (b))
-
-
-/* *************************************
-*  Common constants
-***************************************/
-#define ZSTD_MAGICNUMBER 0xFD2FB524   /* v0.4 */
-
-#define KB *(1 <<10)
-#define MB *(1 <<20)
-#define GB *(1U<<30)
-
-#define BLOCKSIZE (128 KB)                 /* define, for static allocation */
-
-static const size_t ZSTD_blockHeaderSize = 3;
-static const size_t ZSTD_frameHeaderSize_min = 5;
-#define ZSTD_frameHeaderSize_max 5         /* define, for static allocation */
-
-#define BIT7 128
-#define BIT6  64
-#define BIT5  32
-#define BIT4  16
-#define BIT1   2
-#define BIT0   1
-
-#define IS_RAW BIT0
-#define IS_RLE BIT1
-
-#define MINMATCH 4
-#define REPCODE_STARTVALUE 4
-
-#define MLbits   7
-#define LLbits   6
-#define Offbits  5
-#define MaxML  ((1<<MLbits) - 1)
-#define MaxLL  ((1<<LLbits) - 1)
-#define MaxOff ((1<<Offbits)- 1)
-#define MLFSELog   10
-#define LLFSELog   10
-#define OffFSELog   9
-#define MaxSeq MAX(MaxLL, MaxML)
-
-#define MIN_SEQUENCES_SIZE (2 /*seqNb*/ + 2 /*dumps*/ + 3 /*seqTables*/ + 1 /*bitStream*/)
-#define MIN_CBLOCK_SIZE (3 /*litCSize*/ + MIN_SEQUENCES_SIZE)
-
-#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
-
-typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
-
-
-/* ******************************************
-*  Shared functions to include for inlining
-********************************************/
-static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
-
-#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
-
-/*! ZSTD_wildcopy : custom version of memcpy(), can copy up to 7-8 bytes too many */
-static void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
-{
-    const BYTE* ip = (const BYTE*)src;
-    BYTE* op = (BYTE*)dst;
-    BYTE* const oend = op + length;
-    do
-        COPY8(op, ip)
-    while (op < oend);
-}
-
-
-
-/* ******************************************************************
-   FSE : Finite State Entropy coder
-   header file
-****************************************************************** */
-#ifndef FSE_H
-#define FSE_H
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-/* *****************************************
-*  Includes
-******************************************/
-#include <stddef.h>    /* size_t, ptrdiff_t */
-
-
-/* *****************************************
-*  FSE simple functions
-******************************************/
-static size_t FSE_decompress(void* dst,  size_t maxDstSize,
-                const void* cSrc, size_t cSrcSize);
-/*!
-FSE_decompress():
-    Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
-    into already allocated destination buffer 'dst', of size 'maxDstSize'.
-    return : size of regenerated data (<= maxDstSize)
-             or an error code, which can be tested using FSE_isError()
-
-    ** Important ** : FSE_decompress() doesn't decompress non-compressible nor RLE data !!!
-    Why ? : making this distinction requires a header.
-    Header management is intentionally delegated to the user layer, which can better manage special cases.
-*/
-
-
-/* *****************************************
-*  Tool functions
-******************************************/
-/* Error Management */
-static unsigned    FSE_isError(size_t code);        /* tells if a return value is an error code */
-
-
-
-/* *****************************************
-*  FSE detailed API
-******************************************/
-/*!
-FSE_compress() does the following:
-1. count symbol occurrence from source[] into table count[]
-2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
-3. save normalized counters to memory buffer using writeNCount()
-4. build encoding table 'CTable' from normalized counters
-5. encode the data stream using encoding table 'CTable'
-
-FSE_decompress() does the following:
-1. read normalized counters with readNCount()
-2. build decoding table 'DTable' from normalized counters
-3. decode the data stream using decoding table 'DTable'
-
-The following API allows targeting specific sub-functions for advanced tasks.
-For example, it's possible to compress several blocks using the same 'CTable',
-or to save and provide normalized distribution using external method.
-*/
-
-
-/* *** DECOMPRESSION *** */
-
-/*!
-FSE_readNCount():
-   Read compactly saved 'normalizedCounter' from 'rBuffer'.
-   return : size read from 'rBuffer'
-            or an errorCode, which can be tested using FSE_isError()
-            maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
-static  size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);
-
-/*!
-Constructor and Destructor of type FSE_DTable
-    Note that its size depends on 'tableLog' */
-typedef unsigned FSE_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */
-
-/*!
-FSE_buildDTable():
-   Builds 'dt', which must be already allocated, using FSE_createDTable()
-   return : 0,
-            or an errorCode, which can be tested using FSE_isError() */
-static size_t FSE_buildDTable ( FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
-
-/*!
-FSE_decompress_usingDTable():
-   Decompress compressed source 'cSrc' of size 'cSrcSize' using 'dt'
-   into 'dst' which must be already allocated.
-   return : size of regenerated data (necessarily <= maxDstSize)
-            or an errorCode, which can be tested using FSE_isError() */
-static  size_t FSE_decompress_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);
-
-/*!
-Tutorial :
-----------
-(Note : these functions only decompress FSE-compressed blocks.
- If block is uncompressed, use memcpy() instead
- If block is a single repeated byte, use memset() instead )
-
-The first step is to obtain the normalized frequencies of symbols.
-This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
-'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
-In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
-or size the table to handle worst case situations (typically 256).
-FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
-The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
-Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
-If there is an error, the function will return an error code, which can be tested using FSE_isError().
-
-The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
-This is performed by the function FSE_buildDTable().
-The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
-If there is an error, the function will return an error code, which can be tested using FSE_isError().
-
-'FSE_DTable' can then be used to decompress 'cSrc', with FSE_decompress_usingDTable().
-'cSrcSize' must be strictly correct, otherwise decompression will fail.
-FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=maxDstSize).
-If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
-*/
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif  /* FSE_H */
-
-
-/* ******************************************************************
-   bitstream
-   Part of NewGen Entropy library
-   header file (to include)
-   Copyright (C) 2013-2015, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-   - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-#ifndef BITSTREAM_H_MODULE
-#define BITSTREAM_H_MODULE
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-/*
-*  This API consists of small unitary functions, which highly benefit from being inlined.
-*  Since link-time-optimization is not available for all compilers,
-*  these functions are defined into a .h to be included.
-*/
-
-/**********************************************
-*  bitStream decompression API (read backward)
-**********************************************/
-typedef struct
-{
-    size_t   bitContainer;
-    unsigned bitsConsumed;
-    const char* ptr;
-    const char* start;
-} BIT_DStream_t;
-
-typedef enum { BIT_DStream_unfinished = 0,
-               BIT_DStream_endOfBuffer = 1,
-               BIT_DStream_completed = 2,
-               BIT_DStream_overflow = 3 } BIT_DStream_status;  /* result of BIT_reloadDStream() */
-               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
-
-MEM_STATIC size_t   BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
-MEM_STATIC size_t   BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
-MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
-MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
-
-
-
-
-/******************************************
-*  unsafe API
-******************************************/
-MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
-/* faster, but works only if nbBits >= 1 */
-
-
-
-/****************************************************************
-*  Helper functions
-****************************************************************/
-MEM_STATIC unsigned BIT_highbit32 (U32 val)
-{
-#   if defined(_MSC_VER)   /* Visual */
-    unsigned long r=0;
-    _BitScanReverse ( &r, val );
-    return (unsigned) r;
-#   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* Use GCC Intrinsic */
-    return 31 - __builtin_clz (val);
-#   else   /* Software version */
-    static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
-    U32 v = val;
-    unsigned r;
-    v |= v >> 1;
-    v |= v >> 2;
-    v |= v >> 4;
-    v |= v >> 8;
-    v |= v >> 16;
-    r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
-    return r;
-#   endif
-}
-
-
-/**********************************************************
-* bitStream decoding
-**********************************************************/
-
-/*!BIT_initDStream
-*  Initialize a BIT_DStream_t.
-*  @bitD : a pointer to an already allocated BIT_DStream_t structure
-*  @srcBuffer must point at the beginning of a bitStream
-*  @srcSize must be the exact size of the bitStream
-*  @result : size of stream (== srcSize) or an errorCode if a problem is detected
-*/
-MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
-{
-    if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
-
-    if (srcSize >=  sizeof(size_t))   /* normal case */
-    {
-        U32 contain32;
-        bitD->start = (const char*)srcBuffer;
-        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(size_t);
-        bitD->bitContainer = MEM_readLEST(bitD->ptr);
-        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
-        if (contain32 == 0) return ERROR(GENERIC);   /* endMark not present */
-        bitD->bitsConsumed = 8 - BIT_highbit32(contain32);
-    }
-    else
-    {
-        U32 contain32;
-        bitD->start = (const char*)srcBuffer;
-        bitD->ptr   = bitD->start;
-        bitD->bitContainer = *(const BYTE*)(bitD->start);
-        switch(srcSize)
-        {
-            case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);/* fall-through */
-            case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);/* fall-through */
-            case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);/* fall-through */
-            case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; /* fall-through */
-            case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; /* fall-through */
-            case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) <<  8; /* fall-through */
-            default: break;
-        }
-        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
-        if (contain32 == 0) return ERROR(GENERIC);   /* endMark not present */
-        bitD->bitsConsumed = 8 - BIT_highbit32(contain32);
-        bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8;
-    }
-
-    return srcSize;
-}
-
-MEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits)
-{
-    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
-    return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
-}
-
-/*! BIT_lookBitsFast :
-*   unsafe version; only works only if nbBits >= 1 */
-MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits)
-{
-    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
-    return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
-}
-
-MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
-{
-    bitD->bitsConsumed += nbBits;
-}
-
-MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)
-{
-    size_t value = BIT_lookBits(bitD, nbBits);
-    BIT_skipBits(bitD, nbBits);
-    return value;
-}
-
-/*!BIT_readBitsFast :
-*  unsafe version; only works only if nbBits >= 1 */
-MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
-{
-    size_t value = BIT_lookBitsFast(bitD, nbBits);
-    BIT_skipBits(bitD, nbBits);
-    return value;
-}
-
-MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
-{
-    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should never happen */
-        return BIT_DStream_overflow;
-
-    if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))
-    {
-        bitD->ptr -= bitD->bitsConsumed >> 3;
-        bitD->bitsConsumed &= 7;
-        bitD->bitContainer = MEM_readLEST(bitD->ptr);
-        return BIT_DStream_unfinished;
-    }
-    if (bitD->ptr == bitD->start)
-    {
-        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
-        return BIT_DStream_completed;
-    }
-    {
-        U32 nbBytes = bitD->bitsConsumed >> 3;
-        BIT_DStream_status result = BIT_DStream_unfinished;
-        if (bitD->ptr - nbBytes < bitD->start)
-        {
-            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */
-            result = BIT_DStream_endOfBuffer;
-        }
-        bitD->ptr -= nbBytes;
-        bitD->bitsConsumed -= nbBytes*8;
-        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD) */
-        return result;
-    }
-}
-
-/*! BIT_endOfDStream
-*   @return Tells if DStream has reached its exact end
-*/
-MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
-{
-    return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
-}
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* BITSTREAM_H_MODULE */
-
-
-
-/* ******************************************************************
-   FSE : Finite State Entropy coder
-   header file for static linking (only)
-   Copyright (C) 2013-2015, Yann Collet
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-   - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-#ifndef FSE_STATIC_H
-#define FSE_STATIC_H
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-/* *****************************************
-*  Static allocation
-*******************************************/
-/* FSE buffer bounds */
-#define FSE_NCOUNTBOUND 512
-#define FSE_BLOCKBOUND(size) (size + (size>>7))
-#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
-
-/* It is possible to statically allocate FSE CTable/DTable as a table of unsigned using below macros */
-#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue)   (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))
-#define FSE_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<maxTableLog))
-
-
-/* *****************************************
-*  FSE advanced API
-*******************************************/
-static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
-/* build a fake FSE_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */
-
-static size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
-/* build a fake FSE_DTable, designed to always generate the same symbolValue */
-
-
-
-/* *****************************************
-*  FSE symbol decompression API
-*******************************************/
-typedef struct
-{
-    size_t      state;
-    const void* table;   /* precise table may vary, depending on U16 */
-} FSE_DState_t;
-
-
-static void     FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
-
-static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
-
-static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
-
-
-/* *****************************************
-*  FSE unsafe API
-*******************************************/
-static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
-/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
-
-
-/* *****************************************
-*  Implementation of inlined functions
-*******************************************/
-/* decompression */
-
-typedef struct {
-    U16 tableLog;
-    U16 fastMode;
-} FSE_DTableHeader;   /* sizeof U32 */
-
-typedef struct
-{
-    unsigned short newState;
-    unsigned char  symbol;
-    unsigned char  nbBits;
-} FSE_decode_t;   /* size == U32 */
-
-MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
-{
-    FSE_DTableHeader DTableH;
-    memcpy(&DTableH, dt, sizeof(DTableH));
-    DStatePtr->state = BIT_readBits(bitD, DTableH.tableLog);
-    BIT_reloadDStream(bitD);
-    DStatePtr->table = dt + 1;
-}
-
-MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
-{
-    const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
-    const U32  nbBits = DInfo.nbBits;
-    BYTE symbol = DInfo.symbol;
-    size_t lowBits = BIT_readBits(bitD, nbBits);
-
-    DStatePtr->state = DInfo.newState + lowBits;
-    return symbol;
-}
-
-MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
-{
-    const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
-    const U32 nbBits = DInfo.nbBits;
-    BYTE symbol = DInfo.symbol;
-    size_t lowBits = BIT_readBitsFast(bitD, nbBits);
-
-    DStatePtr->state = DInfo.newState + lowBits;
-    return symbol;
-}
-
-MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
-{
-    return DStatePtr->state == 0;
-}
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif  /* FSE_STATIC_H */
-
-/* ******************************************************************
-   FSE : Finite State Entropy coder
-   Copyright (C) 2013-2015, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
-    - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-
-#ifndef FSE_COMMONDEFS_ONLY
-
-/* **************************************************************
-*  Tuning parameters
-****************************************************************/
-/*!MEMORY_USAGE :
-*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
-*  Increasing memory usage improves compression ratio
-*  Reduced memory usage can improve speed, due to cache effect
-*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
-#define FSE_MAX_MEMORY_USAGE 14
-#define FSE_DEFAULT_MEMORY_USAGE 13
-
-/*!FSE_MAX_SYMBOL_VALUE :
-*  Maximum symbol value authorized.
-*  Required for proper stack allocation */
-#define FSE_MAX_SYMBOL_VALUE 255
-
-
-/* **************************************************************
-*  template functions type & suffix
-****************************************************************/
-#define FSE_FUNCTION_TYPE BYTE
-#define FSE_FUNCTION_EXTENSION
-#define FSE_DECODE_TYPE FSE_decode_t
-
-
-#endif   /* !FSE_COMMONDEFS_ONLY */
-
-/* **************************************************************
-*  Compiler specifics
-****************************************************************/
-#ifdef _MSC_VER    /* Visual Studio */
-#  define FORCE_INLINE static __forceinline
-#  include <intrin.h>                    /* For Visual 2005 */
-#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
-#  pragma warning(disable : 4214)        /* disable: C4214: non-int bitfields */
-#else
-#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
-#    ifdef __GNUC__
-#      define FORCE_INLINE static inline __attribute__((always_inline))
-#    else
-#      define FORCE_INLINE static inline
-#    endif
-#  else
-#    define FORCE_INLINE static
-#  endif /* __STDC_VERSION__ */
-#endif
-
-
-/* **************************************************************
-*  Dependencies
-****************************************************************/
-#include <stdlib.h>     /* malloc, free, qsort */
-#include <string.h>     /* memcpy, memset */
-#include <stdio.h>      /* printf (debug) */
-
-
-/* ***************************************************************
-*  Constants
-*****************************************************************/
-#define FSE_MAX_TABLELOG  (FSE_MAX_MEMORY_USAGE-2)
-#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
-#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
-#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
-#define FSE_MIN_TABLELOG 5
-
-#define FSE_TABLELOG_ABSOLUTE_MAX 15
-#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
-#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
-#endif
-
-
-/* **************************************************************
-*  Error Management
-****************************************************************/
-#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
-
-
-/* **************************************************************
-*  Complex types
-****************************************************************/
-typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
-
-
-/*-**************************************************************
-*  Templates
-****************************************************************/
-/*
-  designed to be included
-  for type-specific functions (template emulation in C)
-  Objective is to write these functions only once, for improved maintenance
-*/
-
-/* safety checks */
-#ifndef FSE_FUNCTION_EXTENSION
-#  error "FSE_FUNCTION_EXTENSION must be defined"
-#endif
-#ifndef FSE_FUNCTION_TYPE
-#  error "FSE_FUNCTION_TYPE must be defined"
-#endif
-
-/* Function names */
-#define FSE_CAT(X,Y) X##Y
-#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
-#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
-
-static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; }
-
-
-static size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
-{
-    FSE_DTableHeader DTableH;
-    void* const tdPtr = dt+1;   /* because dt is unsigned, 32-bits aligned on 32-bits */
-    FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);
-    const U32 tableSize = 1 << tableLog;
-    const U32 tableMask = tableSize-1;
-    const U32 step = FSE_tableStep(tableSize);
-    U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
-    U32 position = 0;
-    U32 highThreshold = tableSize-1;
-    const S16 largeLimit= (S16)(1 << (tableLog-1));
-    U32 noLarge = 1;
-    U32 s;
-
-    /* Sanity Checks */
-    if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
-    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
-
-    /* Init, lay down lowprob symbols */
-    memset(tableDecode, 0, sizeof(FSE_DECODE_TYPE) * (maxSymbolValue+1) );   /* useless init, but keep static analyzer happy, and we don't need to performance optimize legacy decoders */
-    DTableH.tableLog = (U16)tableLog;
-    for (s=0; s<=maxSymbolValue; s++)
-    {
-        if (normalizedCounter[s]==-1)
-        {
-            tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
-            symbolNext[s] = 1;
-        }
-        else
-        {
-            if (normalizedCounter[s] >= largeLimit) noLarge=0;
-            symbolNext[s] = normalizedCounter[s];
-        }
-    }
-
-    /* Spread symbols */
-    for (s=0; s<=maxSymbolValue; s++)
-    {
-        int i;
-        for (i=0; i<normalizedCounter[s]; i++)
-        {
-            tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
-            position = (position + step) & tableMask;
-            while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
-        }
-    }
-
-    if (position!=0) return ERROR(GENERIC);   /* position must reach all cells once, otherwise normalizedCounter is incorrect */
-
-    /* Build Decoding table */
-    {
-        U32 i;
-        for (i=0; i<tableSize; i++)
-        {
-            FSE_FUNCTION_TYPE symbol = (FSE_FUNCTION_TYPE)(tableDecode[i].symbol);
-            U16 nextState = symbolNext[symbol]++;
-            tableDecode[i].nbBits = (BYTE) (tableLog - BIT_highbit32 ((U32)nextState) );
-            tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize);
-        }
-    }
-
-    DTableH.fastMode = (U16)noLarge;
-    memcpy(dt, &DTableH, sizeof(DTableH));
-    return 0;
-}
-
-
-#ifndef FSE_COMMONDEFS_ONLY
-/******************************************
-*  FSE helper functions
-******************************************/
-static unsigned FSE_isError(size_t code) { return ERR_isError(code); }
-
-
-/****************************************************************
-*  FSE NCount encoding-decoding
-****************************************************************/
-static short FSE_abs(short a)
-{
-    return a<0 ? -a : a;
-}
-
-static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
-                 const void* headerBuffer, size_t hbSize)
-{
-    const BYTE* const istart = (const BYTE*) headerBuffer;
-    const BYTE* const iend = istart + hbSize;
-    const BYTE* ip = istart;
-    int nbBits;
-    int remaining;
-    int threshold;
-    U32 bitStream;
-    int bitCount;
-    unsigned charnum = 0;
-    int previous0 = 0;
-
-    if (hbSize < 4) return ERROR(srcSize_wrong);
-    bitStream = MEM_readLE32(ip);
-    nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG;   /* extract tableLog */
-    if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
-    bitStream >>= 4;
-    bitCount = 4;
-    *tableLogPtr = nbBits;
-    remaining = (1<<nbBits)+1;
-    threshold = 1<<nbBits;
-    nbBits++;
-
-    while ((remaining>1) && (charnum<=*maxSVPtr))
-    {
-        if (previous0)
-        {
-            unsigned n0 = charnum;
-            while ((bitStream & 0xFFFF) == 0xFFFF)
-            {
-                n0+=24;
-                if (ip < iend-5)
-                {
-                    ip+=2;
-                    bitStream = MEM_readLE32(ip) >> bitCount;
-                }
-                else
-                {
-                    bitStream >>= 16;
-                    bitCount+=16;
-                }
-            }
-            while ((bitStream & 3) == 3)
-            {
-                n0+=3;
-                bitStream>>=2;
-                bitCount+=2;
-            }
-            n0 += bitStream & 3;
-            bitCount += 2;
-            if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
-            while (charnum < n0) normalizedCounter[charnum++] = 0;
-            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
-            {
-                ip += bitCount>>3;
-                bitCount &= 7;
-                bitStream = MEM_readLE32(ip) >> bitCount;
-            }
-            else
-                bitStream >>= 2;
-        }
-        {
-            const short max = (short)((2*threshold-1)-remaining);
-            short count;
-
-            if ((bitStream & (threshold-1)) < (U32)max)
-            {
-                count = (short)(bitStream & (threshold-1));
-                bitCount   += nbBits-1;
-            }
-            else
-            {
-                count = (short)(bitStream & (2*threshold-1));
-                if (count >= threshold) count -= max;
-                bitCount   += nbBits;
-            }
-
-            count--;   /* extra accuracy */
-            remaining -= FSE_abs(count);
-            normalizedCounter[charnum++] = count;
-            previous0 = !count;
-            while (remaining < threshold)
-            {
-                nbBits--;
-                threshold >>= 1;
-            }
-
-            {
-                if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
-                {
-                    ip += bitCount>>3;
-                    bitCount &= 7;
-                }
-                else
-                {
-                    bitCount -= (int)(8 * (iend - 4 - ip));
-                    ip = iend - 4;
-                }
-                bitStream = MEM_readLE32(ip) >> (bitCount & 31);
-            }
-        }
-    }
-    if (remaining != 1) return ERROR(GENERIC);
-    *maxSVPtr = charnum-1;
-
-    ip += (bitCount+7)>>3;
-    if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);
-    return ip-istart;
-}
-
-
-/*********************************************************
-*  Decompression (Byte symbols)
-*********************************************************/
-static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
-{
-    void* ptr = dt;
-    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
-    void* dPtr = dt + 1;
-    FSE_decode_t* const cell = (FSE_decode_t*)dPtr;
-
-    DTableH->tableLog = 0;
-    DTableH->fastMode = 0;
-
-    cell->newState = 0;
-    cell->symbol = symbolValue;
-    cell->nbBits = 0;
-
-    return 0;
-}
-
-
-static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
-{
-    void* ptr = dt;
-    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
-    void* dPtr = dt + 1;
-    FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr;
-    const unsigned tableSize = 1 << nbBits;
-    const unsigned tableMask = tableSize - 1;
-    const unsigned maxSymbolValue = tableMask;
-    unsigned s;
-
-    /* Sanity checks */
-    if (nbBits < 1) return ERROR(GENERIC);         /* min size */
-
-    /* Build Decoding Table */
-    DTableH->tableLog = (U16)nbBits;
-    DTableH->fastMode = 1;
-    for (s=0; s<=maxSymbolValue; s++)
-    {
-        dinfo[s].newState = 0;
-        dinfo[s].symbol = (BYTE)s;
-        dinfo[s].nbBits = (BYTE)nbBits;
-    }
-
-    return 0;
-}
-
-FORCE_INLINE size_t FSE_decompress_usingDTable_generic(
-          void* dst, size_t maxDstSize,
-    const void* cSrc, size_t cSrcSize,
-    const FSE_DTable* dt, const unsigned fast)
-{
-    BYTE* const ostart = (BYTE*) dst;
-    BYTE* op = ostart;
-    BYTE* const omax = op + maxDstSize;
-    BYTE* const olimit = omax-3;
-
-    BIT_DStream_t bitD;
-    FSE_DState_t state1;
-    FSE_DState_t state2;
-    size_t errorCode;
-
-    /* Init */
-    errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);   /* replaced last arg by maxCompressed Size */
-    if (FSE_isError(errorCode)) return errorCode;
-
-    FSE_initDState(&state1, &bitD, dt);
-    FSE_initDState(&state2, &bitD, dt);
-
-#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
-
-    /* 4 symbols per loop */
-    for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) && (op<olimit) ; op+=4)
-    {
-        op[0] = FSE_GETSYMBOL(&state1);
-
-        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
-            BIT_reloadDStream(&bitD);
-
-        op[1] = FSE_GETSYMBOL(&state2);
-
-        if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
-            { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
-
-        op[2] = FSE_GETSYMBOL(&state1);
-
-        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
-            BIT_reloadDStream(&bitD);
-
-        op[3] = FSE_GETSYMBOL(&state2);
-    }
-
-    /* tail */
-    /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
-    while (1)
-    {
-        if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) )
-            break;
-
-        *op++ = FSE_GETSYMBOL(&state1);
-
-        if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) )
-            break;
-
-        *op++ = FSE_GETSYMBOL(&state2);
-    }
-
-    /* end ? */
-    if (BIT_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2))
-        return op-ostart;
-
-    if (op==omax) return ERROR(dstSize_tooSmall);   /* dst buffer is full, but cSrc unfinished */
-
-    return ERROR(corruption_detected);
-}
-
-
-static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
-                            const void* cSrc, size_t cSrcSize,
-                            const FSE_DTable* dt)
-{
-    FSE_DTableHeader DTableH;
-    U32 fastMode;
-
-    memcpy(&DTableH, dt, sizeof(DTableH));
-    fastMode = DTableH.fastMode;
-
-    /* select fast mode (static) */
-    if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
-    return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
-}
-
-
-static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
-{
-    const BYTE* const istart = (const BYTE*)cSrc;
-    const BYTE* ip = istart;
-    short counting[FSE_MAX_SYMBOL_VALUE+1];
-    DTable_max_t dt;   /* Static analyzer seems unable to understand this table will be properly initialized later */
-    unsigned tableLog;
-    unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
-    size_t errorCode;
-
-    if (cSrcSize<2) return ERROR(srcSize_wrong);   /* too small input size */
-
-    /* normal FSE decoding mode */
-    errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
-    if (FSE_isError(errorCode)) return errorCode;
-    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);   /* too small input size */
-    ip += errorCode;
-    cSrcSize -= errorCode;
-
-    errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog);
-    if (FSE_isError(errorCode)) return errorCode;
-
-    /* always return, even if it is an error code */
-    return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);
-}
-
-
-
-#endif   /* FSE_COMMONDEFS_ONLY */
-
-
-/* ******************************************************************
-   Huff0 : Huffman coder, part of New Generation Entropy library
-   header file
-   Copyright (C) 2013-2015, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-   - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-#ifndef HUFF0_H
-#define HUFF0_H
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-/* ****************************************
-*  Dependency
-******************************************/
-#include <stddef.h>    /* size_t */
-
-
-/* ****************************************
-*  Huff0 simple functions
-******************************************/
-static size_t HUF_decompress(void* dst,  size_t dstSize,
-                const void* cSrc, size_t cSrcSize);
-/*!
-HUF_decompress():
-    Decompress Huff0 data from buffer 'cSrc', of size 'cSrcSize',
-    into already allocated destination buffer 'dst', of size 'dstSize'.
-    'dstSize' must be the exact size of original (uncompressed) data.
-    Note : in contrast with FSE, HUF_decompress can regenerate RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data, because it knows size to regenerate.
-    @return : size of regenerated data (== dstSize)
-              or an error code, which can be tested using HUF_isError()
-*/
-
-
-/* ****************************************
-*  Tool functions
-******************************************/
-/* Error Management */
-static unsigned    HUF_isError(size_t code);        /* tells if a return value is an error code */
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif   /* HUFF0_H */
-
-
-/* ******************************************************************
-   Huff0 : Huffman coder, part of New Generation Entropy library
-   header file for static linking (only)
-   Copyright (C) 2013-2015, Yann Collet
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-   - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-#ifndef HUFF0_STATIC_H
-#define HUFF0_STATIC_H
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-
-/* ****************************************
-*  Static allocation macros
-******************************************/
-/* static allocation of Huff0's DTable */
-#define HUF_DTABLE_SIZE(maxTableLog)   (1 + (1<<maxTableLog))  /* nb Cells; use unsigned short for X2, unsigned int for X4 */
-#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
-        unsigned short DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
-#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
-        unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
-#define HUF_CREATE_STATIC_DTABLEX6(DTable, maxTableLog) \
-        unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog) * 3 / 2] = { maxTableLog }
-
-
-/* ****************************************
-*  Advanced decompression functions
-******************************************/
-static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */
-static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbols decoder */
-
-
-/* ****************************************
-*  Huff0 detailed API
-******************************************/
-/*!
-HUF_decompress() does the following:
-1. select the decompression algorithm (X2, X4, X6) based on pre-computed heuristics
-2. build Huffman table from save, using HUF_readDTableXn()
-3. decode 1 or 4 segments in parallel using HUF_decompressSXn_usingDTable
-
-*/
-static size_t HUF_readDTableX2 (unsigned short* DTable, const void* src, size_t srcSize);
-static size_t HUF_readDTableX4 (unsigned* DTable, const void* src, size_t srcSize);
-
-static size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned short* DTable);
-static size_t HUF_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned* DTable);
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* HUFF0_STATIC_H */
-
-
-
-/* ******************************************************************
-   Huff0 : Huffman coder, part of New Generation Entropy library
-   Copyright (C) 2013-2015, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - FSE+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy
-****************************************************************** */
-
-/* **************************************************************
-*  Compiler specifics
-****************************************************************/
-#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-/* inline is defined */
-#elif defined(_MSC_VER)
-#  define inline __inline
-#else
-#  define inline /* disable inline */
-#endif
-
-
-#ifdef _MSC_VER    /* Visual Studio */
-#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
-#endif
-
-
-/* **************************************************************
-*  Includes
-****************************************************************/
-#include <stdlib.h>     /* malloc, free, qsort */
-#include <string.h>     /* memcpy, memset */
-#include <stdio.h>      /* printf (debug) */
-
-
-/* **************************************************************
-*  Constants
-****************************************************************/
-#define HUF_ABSOLUTEMAX_TABLELOG  16   /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
-#define HUF_MAX_TABLELOG  12           /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
-#define HUF_DEFAULT_TABLELOG  HUF_MAX_TABLELOG   /* tableLog by default, when not specified */
-#define HUF_MAX_SYMBOL_VALUE 255
-#if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG)
-#  error "HUF_MAX_TABLELOG is too large !"
-#endif
-
-
-/* **************************************************************
-*  Error Management
-****************************************************************/
-static unsigned HUF_isError(size_t code) { return ERR_isError(code); }
-#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
-
-
-
-/*-*******************************************************
-*  Huff0 : Huffman block decompression
-*********************************************************/
-typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2;   /* single-symbol decoding */
-
-typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4;  /* double-symbols decoding */
-
-typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
-
-/*! HUF_readStats
-    Read compact Huffman tree, saved by HUF_writeCTable
-    @huffWeight : destination buffer
-    @return : size read from `src`
-*/
-static size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
-                            U32* nbSymbolsPtr, U32* tableLogPtr,
-                            const void* src, size_t srcSize)
-{
-    U32 weightTotal;
-    U32 tableLog;
-    const BYTE* ip = (const BYTE*) src;
-    size_t iSize;
-    size_t oSize;
-    U32 n;
-
-    if (!srcSize) return ERROR(srcSize_wrong);
-    iSize = ip[0];
-    //memset(huffWeight, 0, hwSize);   /* is not necessary, even though some analyzer complain ... */
-
-    if (iSize >= 128)  /* special header */
-    {
-        if (iSize >= (242))   /* RLE */
-        {
-            static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
-            oSize = l[iSize-242];
-            memset(huffWeight, 1, hwSize);
-            iSize = 0;
-        }
-        else   /* Incompressible */
-        {
-            oSize = iSize - 127;
-            iSize = ((oSize+1)/2);
-            if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
-            if (oSize >= hwSize) return ERROR(corruption_detected);
-            ip += 1;
-            for (n=0; n<oSize; n+=2)
-            {
-                huffWeight[n]   = ip[n/2] >> 4;
-                huffWeight[n+1] = ip[n/2] & 15;
-            }
-        }
-    }
-    else  /* header compressed with FSE (normal case) */
-    {
-        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
-        oSize = FSE_decompress(huffWeight, hwSize-1, ip+1, iSize);   /* max (hwSize-1) values decoded, as last one is implied */
-        if (FSE_isError(oSize)) return oSize;
-    }
-
-    /* collect weight stats */
-    memset(rankStats, 0, (HUF_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));
-    weightTotal = 0;
-    for (n=0; n<oSize; n++)
-    {
-        if (huffWeight[n] >= HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
-        rankStats[huffWeight[n]]++;
-        weightTotal += (1 << huffWeight[n]) >> 1;
-    }
-    if (weightTotal == 0) return ERROR(corruption_detected);
-
-    /* get last non-null symbol weight (implied, total must be 2^n) */
-    tableLog = BIT_highbit32(weightTotal) + 1;
-    if (tableLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
-    {
-        U32 total = 1 << tableLog;
-        U32 rest = total - weightTotal;
-        U32 verif = 1 << BIT_highbit32(rest);
-        U32 lastWeight = BIT_highbit32(rest) + 1;
-        if (verif != rest) return ERROR(corruption_detected);    /* last value must be a clean power of 2 */
-        huffWeight[oSize] = (BYTE)lastWeight;
-        rankStats[lastWeight]++;
-    }
-
-    /* check tree construction validity */
-    if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected);   /* by construction : at least 2 elts of rank 1, must be even */
-
-    /* results */
-    *nbSymbolsPtr = (U32)(oSize+1);
-    *tableLogPtr = tableLog;
-    return iSize+1;
-}
-
-
-/**************************/
-/* single-symbol decoding */
-/**************************/
-
-static size_t HUF_readDTableX2 (U16* DTable, const void* src, size_t srcSize)
-{
-    BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1];
-    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];   /* large enough for values from 0 to 16 */
-    U32 tableLog = 0;
-    size_t iSize;
-    U32 nbSymbols = 0;
-    U32 n;
-    U32 nextRankStart;
-    void* const dtPtr = DTable + 1;
-    HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
-
-    HUF_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U16));   /* if compilation fails here, assertion is false */
-    //memset(huffWeight, 0, sizeof(huffWeight));   /* is not necessary, even though some analyzer complain ... */
-
-    iSize = HUF_readStats(huffWeight, HUF_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
-    if (HUF_isError(iSize)) return iSize;
-
-    /* check result */
-    if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge);   /* DTable is too small */
-    DTable[0] = (U16)tableLog;   /* maybe should separate sizeof DTable, as allocated, from used size of DTable, in case of DTable re-use */
-
-    /* Prepare ranks */
-    nextRankStart = 0;
-    for (n=1; n<=tableLog; n++)
-    {
-        U32 current = nextRankStart;
-        nextRankStart += (rankVal[n] << (n-1));
-        rankVal[n] = current;
-    }
-
-    /* fill DTable */
-    for (n=0; n<nbSymbols; n++)
-    {
-        const U32 w = huffWeight[n];
-        const U32 length = (1 << w) >> 1;
-        U32 i;
-        HUF_DEltX2 D;
-        D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
-        for (i = rankVal[w]; i < rankVal[w] + length; i++)
-            dt[i] = D;
-        rankVal[w] += length;
-    }
-
-    return iSize;
-}
-
-static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)
-{
-        const size_t val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
-        const BYTE c = dt[val].byte;
-        BIT_skipBits(Dstream, dt[val].nbBits);
-        return c;
-}
-
-#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
-    *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)
-
-#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
-    if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
-        HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
-
-#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
-    if (MEM_64bits()) \
-        HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
-
-static inline size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)
-{
-    BYTE* const pStart = p;
-
-    /* up to 4 symbols at a time */
-    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4))
-    {
-        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
-        HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
-        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
-        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
-    }
-
-    /* closer to the end */
-    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))
-        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
-
-    /* no more data to retrieve from bitstream, hence no need to reload */
-    while (p < pEnd)
-        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
-
-    return pEnd-pStart;
-}
-
-
-static size_t HUF_decompress4X2_usingDTable(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const U16* DTable)
-{
-    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
-
-    {
-        const BYTE* const istart = (const BYTE*) cSrc;
-        BYTE* const ostart = (BYTE*) dst;
-        BYTE* const oend = ostart + dstSize;
-        const void* const dtPtr = DTable;
-        const HUF_DEltX2* const dt = ((const HUF_DEltX2*)dtPtr) +1;
-        const U32 dtLog = DTable[0];
-        size_t errorCode;
-
-        /* Init */
-        BIT_DStream_t bitD1;
-        BIT_DStream_t bitD2;
-        BIT_DStream_t bitD3;
-        BIT_DStream_t bitD4;
-        const size_t length1 = MEM_readLE16(istart);
-        const size_t length2 = MEM_readLE16(istart+2);
-        const size_t length3 = MEM_readLE16(istart+4);
-        size_t length4;
-        const BYTE* const istart1 = istart + 6;  /* jumpTable */
-        const BYTE* const istart2 = istart1 + length1;
-        const BYTE* const istart3 = istart2 + length2;
-        const BYTE* const istart4 = istart3 + length3;
-        const size_t segmentSize = (dstSize+3) / 4;
-        BYTE* const opStart2 = ostart + segmentSize;
-        BYTE* const opStart3 = opStart2 + segmentSize;
-        BYTE* const opStart4 = opStart3 + segmentSize;
-        BYTE* op1 = ostart;
-        BYTE* op2 = opStart2;
-        BYTE* op3 = opStart3;
-        BYTE* op4 = opStart4;
-        U32 endSignal;
-
-        length4 = cSrcSize - (length1 + length2 + length3 + 6);
-        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
-        errorCode = BIT_initDStream(&bitD1, istart1, length1);
-        if (HUF_isError(errorCode)) return errorCode;
-        errorCode = BIT_initDStream(&bitD2, istart2, length2);
-        if (HUF_isError(errorCode)) return errorCode;
-        errorCode = BIT_initDStream(&bitD3, istart3, length3);
-        if (HUF_isError(errorCode)) return errorCode;
-        errorCode = BIT_initDStream(&bitD4, istart4, length4);
-        if (HUF_isError(errorCode)) return errorCode;
-
-        /* 16-32 symbols per loop (4-8 symbols per stream) */
-        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
-        for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )
-        {
-            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
-            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
-            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
-            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
-            HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
-            HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
-            HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
-            HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
-            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
-            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
-            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
-            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
-            HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
-            HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
-            HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
-            HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
-
-            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
-        }
-
-        /* check corruption */
-        if (op1 > opStart2) return ERROR(corruption_detected);
-        if (op2 > opStart3) return ERROR(corruption_detected);
-        if (op3 > opStart4) return ERROR(corruption_detected);
-        /* note : op4 supposed already verified within main loop */
-
-        /* finish bitStreams one by one */
-        HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
-        HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
-        HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
-        HUF_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);
-
-        /* check */
-        endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
-        if (!endSignal) return ERROR(corruption_detected);
-
-        /* decoded size */
-        return dstSize;
-    }
-}
-
-
-static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG);
-    const BYTE* ip = (const BYTE*) cSrc;
-    size_t errorCode;
-
-    errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize);
-    if (HUF_isError(errorCode)) return errorCode;
-    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
-    ip += errorCode;
-    cSrcSize -= errorCode;
-
-    return HUF_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
-}
-
-
-/***************************/
-/* double-symbols decoding */
-/***************************/
-
-static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed,
-                           const U32* rankValOrigin, const int minWeight,
-                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
-                           U32 nbBitsBaseline, U16 baseSeq)
-{
-    HUF_DEltX4 DElt;
-    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
-    U32 s;
-
-    /* get pre-calculated rankVal */
-    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
-
-    /* fill skipped values */
-    if (minWeight>1)
-    {
-        U32 i, skipSize = rankVal[minWeight];
-        MEM_writeLE16(&(DElt.sequence), baseSeq);
-        DElt.nbBits   = (BYTE)(consumed);
-        DElt.length   = 1;
-        for (i = 0; i < skipSize; i++)
-            DTable[i] = DElt;
-    }
-
-    /* fill DTable */
-    for (s=0; s<sortedListSize; s++)   /* note : sortedSymbols already skipped */
-    {
-        const U32 symbol = sortedSymbols[s].symbol;
-        const U32 weight = sortedSymbols[s].weight;
-        const U32 nbBits = nbBitsBaseline - weight;
-        const U32 length = 1 << (sizeLog-nbBits);
-        const U32 start = rankVal[weight];
-        U32 i = start;
-        const U32 end = start + length;
-
-        MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
-        DElt.nbBits = (BYTE)(nbBits + consumed);
-        DElt.length = 2;
-        do { DTable[i++] = DElt; } while (i<end);   /* since length >= 1 */
-
-        rankVal[weight] += length;
-    }
-}
-
-typedef U32 rankVal_t[HUF_ABSOLUTEMAX_TABLELOG][HUF_ABSOLUTEMAX_TABLELOG + 1];
-
-static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,
-                           const sortedSymbol_t* sortedList, const U32 sortedListSize,
-                           const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
-                           const U32 nbBitsBaseline)
-{
-    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
-    const int scaleLog = nbBitsBaseline - targetLog;   /* note : targetLog >= srcLog, hence scaleLog <= 1 */
-    const U32 minBits  = nbBitsBaseline - maxWeight;
-    U32 s;
-
-    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
-
-    /* fill DTable */
-    for (s=0; s<sortedListSize; s++)
-    {
-        const U16 symbol = sortedList[s].symbol;
-        const U32 weight = sortedList[s].weight;
-        const U32 nbBits = nbBitsBaseline - weight;
-        const U32 start = rankVal[weight];
-        const U32 length = 1 << (targetLog-nbBits);
-
-        if (targetLog-nbBits >= minBits)   /* enough room for a second symbol */
-        {
-            U32 sortedRank;
-            int minWeight = nbBits + scaleLog;
-            if (minWeight < 1) minWeight = 1;
-            sortedRank = rankStart[minWeight];
-            HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
-                           rankValOrigin[nbBits], minWeight,
-                           sortedList+sortedRank, sortedListSize-sortedRank,
-                           nbBitsBaseline, symbol);
-        }
-        else
-        {
-            U32 i;
-            const U32 end = start + length;
-            HUF_DEltX4 DElt;
-
-            MEM_writeLE16(&(DElt.sequence), symbol);
-            DElt.nbBits   = (BYTE)(nbBits);
-            DElt.length   = 1;
-            for (i = start; i < end; i++)
-                DTable[i] = DElt;
-        }
-        rankVal[weight] += length;
-    }
-}
-
-static size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
-{
-    BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1];
-    sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1];
-    U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 };
-    U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 };
-    U32* const rankStart = rankStart0+1;
-    rankVal_t rankVal;
-    U32 tableLog, maxW, sizeOfSort, nbSymbols;
-    const U32 memLog = DTable[0];
-    size_t iSize;
-    void* dtPtr = DTable;
-    HUF_DEltX4* const dt = ((HUF_DEltX4*)dtPtr) + 1;
-
-    HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(U32));   /* if compilation fails here, assertion is false */
-    if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
-    //memset(weightList, 0, sizeof(weightList));   /* is not necessary, even though some analyzer complain ... */
-
-    iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
-    if (HUF_isError(iSize)) return iSize;
-
-    /* check result */
-    if (tableLog > memLog) return ERROR(tableLog_tooLarge);   /* DTable can't fit code depth */
-
-    /* find maxWeight */
-    for (maxW = tableLog; rankStats[maxW]==0; maxW--)
-        { if (!maxW) return ERROR(GENERIC); }  /* necessarily finds a solution before maxW==0 */
-
-    /* Get start index of each weight */
-    {
-        U32 w, nextRankStart = 0;
-        for (w=1; w<=maxW; w++)
-        {
-            U32 current = nextRankStart;
-            nextRankStart += rankStats[w];
-            rankStart[w] = current;
-        }
-        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/
-        sizeOfSort = nextRankStart;
-    }
-
-    /* sort symbols by weight */
-    {
-        U32 s;
-        for (s=0; s<nbSymbols; s++)
-        {
-            U32 w = weightList[s];
-            U32 r = rankStart[w]++;
-            sortedSymbol[r].symbol = (BYTE)s;
-            sortedSymbol[r].weight = (BYTE)w;
-        }
-        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */
-    }
-
-    /* Build rankVal */
-    {
-        const U32 minBits = tableLog+1 - maxW;
-        U32 nextRankVal = 0;
-        U32 w, consumed;
-        const int rescale = (memLog-tableLog) - 1;   /* tableLog <= memLog */
-        U32* rankVal0 = rankVal[0];
-        for (w=1; w<=maxW; w++)
-        {
-            U32 current = nextRankVal;
-            nextRankVal += rankStats[w] << (w+rescale);
-            rankVal0[w] = current;
-        }
-        for (consumed = minBits; consumed <= memLog - minBits; consumed++)
-        {
-            U32* rankValPtr = rankVal[consumed];
-            for (w = 1; w <= maxW; w++)
-            {
-                rankValPtr[w] = rankVal0[w] >> consumed;
-            }
-        }
-    }
-
-    HUF_fillDTableX4(dt, memLog,
-                   sortedSymbol, sizeOfSort,
-                   rankStart0, rankVal, maxW,
-                   tableLog+1);
-
-    return iSize;
-}
-
-
-static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
-{
-    const size_t val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
-    memcpy(op, dt+val, 2);
-    BIT_skipBits(DStream, dt[val].nbBits);
-    return dt[val].length;
-}
-
-static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
-{
-    const size_t val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
-    memcpy(op, dt+val, 1);
-    if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
-    else
-    {
-        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8))
-        {
-            BIT_skipBits(DStream, dt[val].nbBits);
-            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
-                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);   /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
-        }
-    }
-    return 1;
-}
-
-
-#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
-    ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
-    if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
-        ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
-    if (MEM_64bits()) \
-        ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-static inline size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog)
-{
-    BYTE* const pStart = p;
-
-    /* up to 8 symbols at a time */
-    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd-7))
-    {
-        HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
-        HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
-        HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
-        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
-    }
-
-    /* closer to the end */
-    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-2))
-        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
-
-    while (p <= pEnd-2)
-        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */
-
-    if (p < pEnd)
-        p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
-
-    return p-pStart;
-}
-
-static size_t HUF_decompress4X4_usingDTable(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const U32* DTable)
-{
-    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
-
-    {
-        const BYTE* const istart = (const BYTE*) cSrc;
-        BYTE* const ostart = (BYTE*) dst;
-        BYTE* const oend = ostart + dstSize;
-        const void* const dtPtr = DTable;
-        const HUF_DEltX4* const dt = ((const HUF_DEltX4*)dtPtr) +1;
-        const U32 dtLog = DTable[0];
-        size_t errorCode;
-
-        /* Init */
-        BIT_DStream_t bitD1;
-        BIT_DStream_t bitD2;
-        BIT_DStream_t bitD3;
-        BIT_DStream_t bitD4;
-        const size_t length1 = MEM_readLE16(istart);
-        const size_t length2 = MEM_readLE16(istart+2);
-        const size_t length3 = MEM_readLE16(istart+4);
-        size_t length4;
-        const BYTE* const istart1 = istart + 6;  /* jumpTable */
-        const BYTE* const istart2 = istart1 + length1;
-        const BYTE* const istart3 = istart2 + length2;
-        const BYTE* const istart4 = istart3 + length3;
-        const size_t segmentSize = (dstSize+3) / 4;
-        BYTE* const opStart2 = ostart + segmentSize;
-        BYTE* const opStart3 = opStart2 + segmentSize;
-        BYTE* const opStart4 = opStart3 + segmentSize;
-        BYTE* op1 = ostart;
-        BYTE* op2 = opStart2;
-        BYTE* op3 = opStart3;
-        BYTE* op4 = opStart4;
-        U32 endSignal;
-
-        length4 = cSrcSize - (length1 + length2 + length3 + 6);
-        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
-        errorCode = BIT_initDStream(&bitD1, istart1, length1);
-        if (HUF_isError(errorCode)) return errorCode;
-        errorCode = BIT_initDStream(&bitD2, istart2, length2);
-        if (HUF_isError(errorCode)) return errorCode;
-        errorCode = BIT_initDStream(&bitD3, istart3, length3);
-        if (HUF_isError(errorCode)) return errorCode;
-        errorCode = BIT_initDStream(&bitD4, istart4, length4);
-        if (HUF_isError(errorCode)) return errorCode;
-
-        /* 16-32 symbols per loop (4-8 symbols per stream) */
-        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
-        for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )
-        {
-            HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
-            HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
-            HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
-            HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
-            HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
-            HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
-            HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
-            HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
-            HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
-            HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
-            HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
-            HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
-            HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
-            HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
-            HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
-            HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
-
-            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
-        }
-
-        /* check corruption */
-        if (op1 > opStart2) return ERROR(corruption_detected);
-        if (op2 > opStart3) return ERROR(corruption_detected);
-        if (op3 > opStart4) return ERROR(corruption_detected);
-        /* note : op4 supposed already verified within main loop */
-
-        /* finish bitStreams one by one */
-        HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
-        HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
-        HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
-        HUF_decodeStreamX4(op4, &bitD4, oend,     dt, dtLog);
-
-        /* check */
-        endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
-        if (!endSignal) return ERROR(corruption_detected);
-
-        /* decoded size */
-        return dstSize;
-    }
-}
-
-
-static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_MAX_TABLELOG);
-    const BYTE* ip = (const BYTE*) cSrc;
-
-    size_t hSize = HUF_readDTableX4 (DTable, cSrc, cSrcSize);
-    if (HUF_isError(hSize)) return hSize;
-    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
-    ip += hSize;
-    cSrcSize -= hSize;
-
-    return HUF_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
-}
-
-
-/**********************************/
-/* Generic decompression selector */
-/**********************************/
-
-typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
-static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
-{
-    /* single, double, quad */
-    {{0,0}, {1,1}, {2,2}},  /* Q==0 : impossible */
-    {{0,0}, {1,1}, {2,2}},  /* Q==1 : impossible */
-    {{  38,130}, {1313, 74}, {2151, 38}},   /* Q == 2 : 12-18% */
-    {{ 448,128}, {1353, 74}, {2238, 41}},   /* Q == 3 : 18-25% */
-    {{ 556,128}, {1353, 74}, {2238, 47}},   /* Q == 4 : 25-32% */
-    {{ 714,128}, {1418, 74}, {2436, 53}},   /* Q == 5 : 32-38% */
-    {{ 883,128}, {1437, 74}, {2464, 61}},   /* Q == 6 : 38-44% */
-    {{ 897,128}, {1515, 75}, {2622, 68}},   /* Q == 7 : 44-50% */
-    {{ 926,128}, {1613, 75}, {2730, 75}},   /* Q == 8 : 50-56% */
-    {{ 947,128}, {1729, 77}, {3359, 77}},   /* Q == 9 : 56-62% */
-    {{1107,128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */
-    {{1177,128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */
-    {{1242,128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */
-    {{1349,128}, {2644,106}, {5260,106}},   /* Q ==13 : 81-87% */
-    {{1455,128}, {2422,124}, {4174,124}},   /* Q ==14 : 87-93% */
-    {{ 722,128}, {1891,145}, {1936,146}},   /* Q ==15 : 93-99% */
-};
-
-typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
-
-static size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    static const decompressionAlgo decompress[3] = { HUF_decompress4X2, HUF_decompress4X4, NULL };
-    /* estimate decompression time */
-    U32 Q;
-    const U32 D256 = (U32)(dstSize >> 8);
-    U32 Dtime[3];
-    U32 algoNb = 0;
-    int n;
-
-    /* validation checks */
-    if (dstSize == 0) return ERROR(dstSize_tooSmall);
-    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */
-    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */
-    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
-
-    /* decoder timing evaluation */
-    Q = (U32)(cSrcSize * 16 / dstSize);   /* Q < 16 since dstSize > cSrcSize */
-    for (n=0; n<3; n++)
-        Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256);
-
-    Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */
-
-    if (Dtime[1] < Dtime[0]) algoNb = 1;
-
-    return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
-
-    //return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize);   /* multi-streams single-symbol decoding */
-    //return HUF_decompress4X4(dst, dstSize, cSrc, cSrcSize);   /* multi-streams double-symbols decoding */
-    //return HUF_decompress4X6(dst, dstSize, cSrc, cSrcSize);   /* multi-streams quad-symbols decoding */
-}
-
-
-
-#endif   /* ZSTD_CCOMMON_H_MODULE */
-
-
-/*
-    zstd - decompression module fo v0.4 legacy format
-    Copyright (C) 2015-2016, Yann Collet.
-
-    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are
-    met:
-    * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-    copyright notice, this list of conditions and the following disclaimer
-    in the documentation and/or other materials provided with the
-    distribution.
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - zstd source repository : https://github.com/Cyan4973/zstd
-    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
-*/
-
-/* ***************************************************************
-*  Tuning parameters
-*****************************************************************/
-/*!
- * HEAPMODE :
- * Select how default decompression function ZSTD_decompress() will allocate memory,
- * in memory stack (0), or in memory heap (1, requires malloc())
- */
-#ifndef ZSTD_HEAPMODE
-#  define ZSTD_HEAPMODE 1
-#endif
-
-
-/* *******************************************************
-*  Includes
-*********************************************************/
-#include <stdlib.h>      /* calloc */
-#include <string.h>      /* memcpy, memmove */
-#include <stdio.h>       /* debug : printf */
-
-
-/* *******************************************************
-*  Compiler specifics
-*********************************************************/
-#ifdef _MSC_VER    /* Visual Studio */
-#  include <intrin.h>                    /* For Visual 2005 */
-#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
-#  pragma warning(disable : 4324)        /* disable: C4324: padded structure */
-#endif
-
-
-/* *************************************
-*  Local types
-***************************************/
-typedef struct
-{
-    blockType_t blockType;
-    U32 origSize;
-} blockProperties_t;
-
-
-/* *******************************************************
-*  Memory operations
-**********************************************************/
-static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
-
-
-/* *************************************
-*  Error Management
-***************************************/
-
-/*! ZSTD_isError
-*   tells if a return value is an error code */
-static unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
-
-
-/* *************************************************************
-*   Context management
-***************************************************************/
-typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
-               ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock } ZSTD_dStage;
-
-struct ZSTDv04_Dctx_s
-{
-    U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
-    U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
-    U32 MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
-    const void* previousDstEnd;
-    const void* base;
-    const void* vBase;
-    const void* dictEnd;
-    size_t expected;
-    size_t headerSize;
-    ZSTD_parameters params;
-    blockType_t bType;
-    ZSTD_dStage stage;
-    const BYTE* litPtr;
-    size_t litSize;
-    BYTE litBuffer[BLOCKSIZE + 8 /* margin for wildcopy */];
-    BYTE headerBuffer[ZSTD_frameHeaderSize_max];
-};  /* typedef'd to ZSTD_DCtx within "zstd_static.h" */
-
-static size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx)
-{
-    dctx->expected = ZSTD_frameHeaderSize_min;
-    dctx->stage = ZSTDds_getFrameHeaderSize;
-    dctx->previousDstEnd = NULL;
-    dctx->base = NULL;
-    dctx->vBase = NULL;
-    dctx->dictEnd = NULL;
-    return 0;
-}
-
-static ZSTD_DCtx* ZSTD_createDCtx(void)
-{
-    ZSTD_DCtx* dctx = (ZSTD_DCtx*)malloc(sizeof(ZSTD_DCtx));
-    if (dctx==NULL) return NULL;
-    ZSTD_resetDCtx(dctx);
-    return dctx;
-}
-
-static size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
-{
-    free(dctx);
-    return 0;
-}
-
-
-/* *************************************************************
-*   Decompression section
-***************************************************************/
-/** ZSTD_decodeFrameHeader_Part1
-*   decode the 1st part of the Frame Header, which tells Frame Header size.
-*   srcSize must be == ZSTD_frameHeaderSize_min
-*   @return : the full size of the Frame Header */
-static size_t ZSTD_decodeFrameHeader_Part1(ZSTD_DCtx* zc, const void* src, size_t srcSize)
-{
-    U32 magicNumber;
-    if (srcSize != ZSTD_frameHeaderSize_min) return ERROR(srcSize_wrong);
-    magicNumber = MEM_readLE32(src);
-    if (magicNumber != ZSTD_MAGICNUMBER) return ERROR(prefix_unknown);
-    zc->headerSize = ZSTD_frameHeaderSize_min;
-    return zc->headerSize;
-}
-
-
-static size_t ZSTD_getFrameParams(ZSTD_parameters* params, const void* src, size_t srcSize)
-{
-    U32 magicNumber;
-    if (srcSize < ZSTD_frameHeaderSize_min) return ZSTD_frameHeaderSize_max;
-    magicNumber = MEM_readLE32(src);
-    if (magicNumber != ZSTD_MAGICNUMBER) return ERROR(prefix_unknown);
-    memset(params, 0, sizeof(*params));
-    params->windowLog = (((const BYTE*)src)[4] & 15) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
-    if ((((const BYTE*)src)[4] >> 4) != 0) return ERROR(frameParameter_unsupported);   /* reserved bits */
-    return 0;
-}
-
-/** ZSTD_decodeFrameHeader_Part2
-*   decode the full Frame Header
-*   srcSize must be the size provided by ZSTD_decodeFrameHeader_Part1
-*   @return : 0, or an error code, which can be tested using ZSTD_isError() */
-static size_t ZSTD_decodeFrameHeader_Part2(ZSTD_DCtx* zc, const void* src, size_t srcSize)
-{
-    size_t result;
-    if (srcSize != zc->headerSize) return ERROR(srcSize_wrong);
-    result = ZSTD_getFrameParams(&(zc->params), src, srcSize);
-    if ((MEM_32bits()) && (zc->params.windowLog > 25)) return ERROR(frameParameter_unsupported);
-    return result;
-}
-
-
-static size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
-{
-    const BYTE* const in = (const BYTE* const)src;
-    BYTE headerFlags;
-    U32 cSize;
-
-    if (srcSize < 3) return ERROR(srcSize_wrong);
-
-    headerFlags = *in;
-    cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
-
-    bpPtr->blockType = (blockType_t)(headerFlags >> 6);
-    bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
-
-    if (bpPtr->blockType == bt_end) return 0;
-    if (bpPtr->blockType == bt_rle) return 1;
-    return cSize;
-}
-
-static size_t ZSTD_copyRawBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
-    if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
-    memcpy(dst, src, srcSize);
-    return srcSize;
-}
-
-
-/** ZSTD_decompressLiterals
-    @return : nb of bytes read from src, or an error code*/
-static size_t ZSTD_decompressLiterals(void* dst, size_t* maxDstSizePtr,
-                                const void* src, size_t srcSize)
-{
-    const BYTE* ip = (const BYTE*)src;
-
-    const size_t litSize = (MEM_readLE32(src) & 0x1FFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
-    const size_t litCSize = (MEM_readLE32(ip+2) & 0xFFFFFF) >> 5;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
-
-    if (litSize > *maxDstSizePtr) return ERROR(corruption_detected);
-    if (litCSize + 5 > srcSize) return ERROR(corruption_detected);
-
-    if (HUF_isError(HUF_decompress(dst, litSize, ip+5, litCSize))) return ERROR(corruption_detected);
-
-    *maxDstSizePtr = litSize;
-    return litCSize + 5;
-}
-
-
-/** ZSTD_decodeLiteralsBlock
-    @return : nb of bytes read from src (< srcSize ) */
-static size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
-                          const void* src, size_t srcSize)   /* note : srcSize < BLOCKSIZE */
-{
-    const BYTE* const istart = (const BYTE*) src;
-
-    /* any compressed block with literals segment must be at least this size */
-    if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
-
-    switch(*istart & 3)
-    {
-    /* compressed */
-    case 0:
-        {
-            size_t litSize = BLOCKSIZE;
-            const size_t readSize = ZSTD_decompressLiterals(dctx->litBuffer, &litSize, src, srcSize);
-            dctx->litPtr = dctx->litBuffer;
-            dctx->litSize = litSize;
-            memset(dctx->litBuffer + dctx->litSize, 0, 8);
-            return readSize;   /* works if it's an error too */
-        }
-    case IS_RAW:
-        {
-            const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
-            if (litSize > srcSize-11)   /* risk of reading too far with wildcopy */
-            {
-                if (litSize > srcSize-3) return ERROR(corruption_detected);
-                memcpy(dctx->litBuffer, istart, litSize);
-                dctx->litPtr = dctx->litBuffer;
-                dctx->litSize = litSize;
-                memset(dctx->litBuffer + dctx->litSize, 0, 8);
-                return litSize+3;
-            }
-            /* direct reference into compressed stream */
-            dctx->litPtr = istart+3;
-            dctx->litSize = litSize;
-            return litSize+3;        }
-    case IS_RLE:
-        {
-            const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
-            if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
-            memset(dctx->litBuffer, istart[3], litSize + 8);
-            dctx->litPtr = dctx->litBuffer;
-            dctx->litSize = litSize;
-            return 4;
-        }
-    default:
-        return ERROR(corruption_detected);   /* forbidden nominal case */
-    }
-}
-
-
-static size_t ZSTD_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,
-                         FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb,
-                         const void* src, size_t srcSize)
-{
-    const BYTE* const istart = (const BYTE* const)src;
-    const BYTE* ip = istart;
-    const BYTE* const iend = istart + srcSize;
-    U32 LLtype, Offtype, MLtype;
-    U32 LLlog, Offlog, MLlog;
-    size_t dumpsLength;
-
-    /* check */
-    if (srcSize < 5) return ERROR(srcSize_wrong);
-
-    /* SeqHead */
-    *nbSeq = MEM_readLE16(ip); ip+=2;
-    LLtype  = *ip >> 6;
-    Offtype = (*ip >> 4) & 3;
-    MLtype  = (*ip >> 2) & 3;
-    if (*ip & 2)
-    {
-        dumpsLength  = ip[2];
-        dumpsLength += ip[1] << 8;
-        ip += 3;
-    }
-    else
-    {
-        dumpsLength  = ip[1];
-        dumpsLength += (ip[0] & 1) << 8;
-        ip += 2;
-    }
-    *dumpsPtr = ip;
-    ip += dumpsLength;
-    *dumpsLengthPtr = dumpsLength;
-
-    /* check */
-    if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
-
-    /* sequences */
-    {
-        S16 norm[MaxML+1];    /* assumption : MaxML >= MaxLL >= MaxOff */
-        size_t headerSize;
-
-        /* Build DTables */
-        switch(LLtype)
-        {
-        case bt_rle :
-            LLlog = 0;
-            FSE_buildDTable_rle(DTableLL, *ip++); break;
-        case bt_raw :
-            LLlog = LLbits;
-            FSE_buildDTable_raw(DTableLL, LLbits); break;
-        default :
-            {   U32 max = MaxLL;
-                headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip);
-                if (FSE_isError(headerSize)) return ERROR(GENERIC);
-                if (LLlog > LLFSELog) return ERROR(corruption_detected);
-                ip += headerSize;
-                FSE_buildDTable(DTableLL, norm, max, LLlog);
-        }   }
-
-        switch(Offtype)
-        {
-        case bt_rle :
-            Offlog = 0;
-            if (ip > iend-2) return ERROR(srcSize_wrong);   /* min : "raw", hence no header, but at least xxLog bits */
-            FSE_buildDTable_rle(DTableOffb, *ip++ & MaxOff); /* if *ip > MaxOff, data is corrupted */
-            break;
-        case bt_raw :
-            Offlog = Offbits;
-            FSE_buildDTable_raw(DTableOffb, Offbits); break;
-        default :
-            {   U32 max = MaxOff;
-                headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip);
-                if (FSE_isError(headerSize)) return ERROR(GENERIC);
-                if (Offlog > OffFSELog) return ERROR(corruption_detected);
-                ip += headerSize;
-                FSE_buildDTable(DTableOffb, norm, max, Offlog);
-        }   }
-
-        switch(MLtype)
-        {
-        case bt_rle :
-            MLlog = 0;
-            if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
-            FSE_buildDTable_rle(DTableML, *ip++); break;
-        case bt_raw :
-            MLlog = MLbits;
-            FSE_buildDTable_raw(DTableML, MLbits); break;
-        default :
-            {   U32 max = MaxML;
-                headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip);
-                if (FSE_isError(headerSize)) return ERROR(GENERIC);
-                if (MLlog > MLFSELog) return ERROR(corruption_detected);
-                ip += headerSize;
-                FSE_buildDTable(DTableML, norm, max, MLlog);
-    }   }   }
-
-    return ip-istart;
-}
-
-
-typedef struct {
-    size_t litLength;
-    size_t offset;
-    size_t matchLength;
-} seq_t;
-
-typedef struct {
-    BIT_DStream_t DStream;
-    FSE_DState_t stateLL;
-    FSE_DState_t stateOffb;
-    FSE_DState_t stateML;
-    size_t prevOffset;
-    const BYTE* dumps;
-    const BYTE* dumpsEnd;
-} seqState_t;
-
-
-static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState)
-{
-    size_t litLength;
-    size_t prevOffset;
-    size_t offset;
-    size_t matchLength;
-    const BYTE* dumps = seqState->dumps;
-    const BYTE* const de = seqState->dumpsEnd;
-
-    /* Literal length */
-    litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));
-    prevOffset = litLength ? seq->offset : seqState->prevOffset;
-    if (litLength == MaxLL) {
-        const U32 add = dumps<de ? *dumps++ : 0;
-        if (add < 255) litLength += add;
-        else if (dumps + 3 <= de) {
-            litLength = MEM_readLE24(dumps);
-            dumps += 3;
-        }
-        if (dumps >= de) { dumps = de-1; }  /* late correction, to avoid read overflow (data is now corrupted anyway) */
-    }
-
-    /* Offset */
-    {   static const U32 offsetPrefix[MaxOff+1] = {
-                1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256,
-                512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144,
-                524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 };
-        U32 offsetCode, nbBits;
-        offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream));   /* <= maxOff, by table construction */
-        if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
-        nbBits = offsetCode - 1;
-        if (offsetCode==0) nbBits = 0;   /* cmove */
-        offset = offsetPrefix[offsetCode] + BIT_readBits(&(seqState->DStream), nbBits);
-        if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
-        if (offsetCode==0) offset = prevOffset;   /* cmove */
-        if (offsetCode | !litLength) seqState->prevOffset = seq->offset;   /* cmove */
-    }
-
-    /* MatchLength */
-    matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
-    if (matchLength == MaxML) {
-        const U32 add = dumps<de ? *dumps++ : 0;
-        if (add < 255) matchLength += add;
-        else if (dumps + 3 <= de){
-            matchLength = MEM_readLE24(dumps);
-            dumps += 3;
-        }
-        if (dumps >= de) { dumps = de-1; }  /* late correction, to avoid read overflow (data is now corrupted anyway) */
-    }
-    matchLength += MINMATCH;
-
-    /* save result */
-    seq->litLength = litLength;
-    seq->offset = offset;
-    seq->matchLength = matchLength;
-    seqState->dumps = dumps;
-}
-
-
-static size_t ZSTD_execSequence(BYTE* op,
-                                BYTE* const oend, seq_t sequence,
-                                const BYTE** litPtr, const BYTE* const litLimit,
-                                const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
-{
-    static const int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
-    static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */
-    BYTE* const oLitEnd = op + sequence.litLength;
-    const size_t sequenceLength = sequence.litLength + sequence.matchLength;
-    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
-    BYTE* const oend_8 = oend-8;
-    const BYTE* const litEnd = *litPtr + sequence.litLength;
-    const BYTE* match = oLitEnd - sequence.offset;
-
-    /* check */
-    if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall);   /* last match must start at a minimum distance of 8 from oend */
-    if (oMatchEnd > oend) return ERROR(dstSize_tooSmall);   /* overwrite beyond dst buffer */
-    if (litEnd > litLimit) return ERROR(corruption_detected);   /* risk read beyond lit buffer */
-
-    /* copy Literals */
-    ZSTD_wildcopy(op, *litPtr, sequence.litLength);   /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
-    op = oLitEnd;
-    *litPtr = litEnd;   /* update for next sequence */
-
-    /* copy Match */
-    if (sequence.offset > (size_t)(oLitEnd - base))
-    {
-        /* offset beyond prefix */
-        if (sequence.offset > (size_t)(oLitEnd - vBase))
-            return ERROR(corruption_detected);
-        match = dictEnd - (base-match);
-        if (match + sequence.matchLength <= dictEnd)
-        {
-            memmove(oLitEnd, match, sequence.matchLength);
-            return sequenceLength;
-        }
-        /* span extDict & currentPrefixSegment */
-        {
-            size_t length1 = dictEnd - match;
-            memmove(oLitEnd, match, length1);
-            op = oLitEnd + length1;
-            sequence.matchLength -= length1;
-            match = base;
-            if (op > oend_8 || sequence.matchLength < MINMATCH) {
-              while (op < oMatchEnd) *op++ = *match++;
-              return sequenceLength;
-            }
-        }
-    }
-    /* Requirement: op <= oend_8 */
-
-    /* match within prefix */
-    if (sequence.offset < 8) {
-        /* close range match, overlap */
-        const int sub2 = dec64table[sequence.offset];
-        op[0] = match[0];
-        op[1] = match[1];
-        op[2] = match[2];
-        op[3] = match[3];
-        match += dec32table[sequence.offset];
-        ZSTD_copy4(op+4, match);
-        match -= sub2;
-    } else {
-        ZSTD_copy8(op, match);
-    }
-    op += 8; match += 8;
-
-    if (oMatchEnd > oend-(16-MINMATCH))
-    {
-        if (op < oend_8)
-        {
-            ZSTD_wildcopy(op, match, oend_8 - op);
-            match += oend_8 - op;
-            op = oend_8;
-        }
-        while (op < oMatchEnd) *op++ = *match++;
-    }
-    else
-    {
-        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8, but must be signed */
-    }
-    return sequenceLength;
-}
-
-
-static size_t ZSTD_decompressSequences(
-                               ZSTD_DCtx* dctx,
-                               void* dst, size_t maxDstSize,
-                         const void* seqStart, size_t seqSize)
-{
-    const BYTE* ip = (const BYTE*)seqStart;
-    const BYTE* const iend = ip + seqSize;
-    BYTE* const ostart = (BYTE* const)dst;
-    BYTE* op = ostart;
-    BYTE* const oend = ostart + maxDstSize;
-    size_t errorCode, dumpsLength;
-    const BYTE* litPtr = dctx->litPtr;
-    const BYTE* const litEnd = litPtr + dctx->litSize;
-    int nbSeq;
-    const BYTE* dumps;
-    U32* DTableLL = dctx->LLTable;
-    U32* DTableML = dctx->MLTable;
-    U32* DTableOffb = dctx->OffTable;
-    const BYTE* const base = (const BYTE*) (dctx->base);
-    const BYTE* const vBase = (const BYTE*) (dctx->vBase);
-    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
-
-    /* Build Decoding Tables */
-    errorCode = ZSTD_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,
-                                      DTableLL, DTableML, DTableOffb,
-                                      ip, iend-ip);
-    if (ZSTD_isError(errorCode)) return errorCode;
-    ip += errorCode;
-
-    /* Regen sequences */
-    {
-        seq_t sequence;
-        seqState_t seqState;
-
-        memset(&sequence, 0, sizeof(sequence));
-        sequence.offset = 4;
-        seqState.dumps = dumps;
-        seqState.dumpsEnd = dumps + dumpsLength;
-        seqState.prevOffset = 4;
-        errorCode = BIT_initDStream(&(seqState.DStream), ip, iend-ip);
-        if (ERR_isError(errorCode)) return ERROR(corruption_detected);
-        FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
-        FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
-        FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
-
-        for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; )
-        {
-            size_t oneSeqSize;
-            nbSeq--;
-            ZSTD_decodeSequence(&sequence, &seqState);
-            oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
-            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
-            op += oneSeqSize;
-        }
-
-        /* check if reached exact end */
-        if ( !BIT_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected);   /* DStream should be entirely and exactly consumed; otherwise data is corrupted */
-
-        /* last literal segment */
-        {
-            size_t lastLLSize = litEnd - litPtr;
-            if (litPtr > litEnd) return ERROR(corruption_detected);
-            if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
-            if (op != litPtr) memcpy(op, litPtr, lastLLSize);
-            op += lastLLSize;
-        }
-    }
-
-    return op-ostart;
-}
-
-
-static void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst)
-{
-    if (dst != dctx->previousDstEnd)   /* not contiguous */
-    {
-        dctx->dictEnd = dctx->previousDstEnd;
-        dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
-        dctx->base = dst;
-        dctx->previousDstEnd = dst;
-    }
-}
-
-
-static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
-                            void* dst, size_t maxDstSize,
-                      const void* src, size_t srcSize)
-{
-    /* blockType == blockCompressed */
-    const BYTE* ip = (const BYTE*)src;
-
-    /* Decode literals sub-block */
-    size_t litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
-    if (ZSTD_isError(litCSize)) return litCSize;
-    ip += litCSize;
-    srcSize -= litCSize;
-
-    return ZSTD_decompressSequences(dctx, dst, maxDstSize, ip, srcSize);
-}
-
-
-static size_t ZSTD_decompress_usingDict(ZSTD_DCtx* ctx,
-                                 void* dst, size_t maxDstSize,
-                                 const void* src, size_t srcSize,
-                                 const void* dict, size_t dictSize)
-{
-    const BYTE* ip = (const BYTE*)src;
-    const BYTE* iend = ip + srcSize;
-    BYTE* const ostart = (BYTE* const)dst;
-    BYTE* op = ostart;
-    BYTE* const oend = ostart + maxDstSize;
-    size_t remainingSize = srcSize;
-    blockProperties_t blockProperties;
-
-    /* init */
-    ZSTD_resetDCtx(ctx);
-    if (dict)
-    {
-        ZSTD_decompress_insertDictionary(ctx, dict, dictSize);
-        ctx->dictEnd = ctx->previousDstEnd;
-        ctx->vBase = (const char*)dst - ((const char*)(ctx->previousDstEnd) - (const char*)(ctx->base));
-        ctx->base = dst;
-    }
-    else
-    {
-        ctx->vBase = ctx->base = ctx->dictEnd = dst;
-    }
-
-    /* Frame Header */
-    {
-        size_t frameHeaderSize;
-        if (srcSize < ZSTD_frameHeaderSize_min+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
-        frameHeaderSize = ZSTD_decodeFrameHeader_Part1(ctx, src, ZSTD_frameHeaderSize_min);
-        if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
-        if (srcSize < frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
-        ip += frameHeaderSize; remainingSize -= frameHeaderSize;
-        frameHeaderSize = ZSTD_decodeFrameHeader_Part2(ctx, src, frameHeaderSize);
-        if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
-    }
-
-    /* Loop on each block */
-    while (1)
-    {
-        size_t decodedSize=0;
-        size_t cBlockSize = ZSTD_getcBlockSize(ip, iend-ip, &blockProperties);
-        if (ZSTD_isError(cBlockSize)) return cBlockSize;
-
-        ip += ZSTD_blockHeaderSize;
-        remainingSize -= ZSTD_blockHeaderSize;
-        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
-
-        switch(blockProperties.blockType)
-        {
-        case bt_compressed:
-            decodedSize = ZSTD_decompressBlock_internal(ctx, op, oend-op, ip, cBlockSize);
-            break;
-        case bt_raw :
-            decodedSize = ZSTD_copyRawBlock(op, oend-op, ip, cBlockSize);
-            break;
-        case bt_rle :
-            return ERROR(GENERIC);   /* not yet supported */
-            break;
-        case bt_end :
-            /* end of frame */
-            if (remainingSize) return ERROR(srcSize_wrong);
-            break;
-        default:
-            return ERROR(GENERIC);   /* impossible */
-        }
-        if (cBlockSize == 0) break;   /* bt_end */
-
-        if (ZSTD_isError(decodedSize)) return decodedSize;
-        op += decodedSize;
-        ip += cBlockSize;
-        remainingSize -= cBlockSize;
-    }
-
-    return op-ostart;
-}
-
-/* ZSTD_errorFrameSizeInfoLegacy() :
-   assumes `cSize` and `dBound` are _not_ NULL */
-static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
-{
-    *cSize = ret;
-    *dBound = ZSTD_CONTENTSIZE_ERROR;
-}
-
-void ZSTDv04_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
-{
-    const BYTE* ip = (const BYTE*)src;
-    size_t remainingSize = srcSize;
-    size_t nbBlocks = 0;
-    blockProperties_t blockProperties;
-
-    /* Frame Header */
-    if (srcSize < ZSTD_frameHeaderSize_min) {
-        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
-        return;
-    }
-    if (MEM_readLE32(src) != ZSTD_MAGICNUMBER) {
-        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
-        return;
-    }
-    ip += ZSTD_frameHeaderSize_min; remainingSize -= ZSTD_frameHeaderSize_min;
-
-    /* Loop on each block */
-    while (1)
-    {
-        size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
-        if (ZSTD_isError(cBlockSize)) {
-            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
-            return;
-        }
-
-        ip += ZSTD_blockHeaderSize;
-        remainingSize -= ZSTD_blockHeaderSize;
-        if (cBlockSize > remainingSize) {
-            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
-            return;
-        }
-
-        if (cBlockSize == 0) break;   /* bt_end */
-
-        ip += cBlockSize;
-        remainingSize -= cBlockSize;
-        nbBlocks++;
-    }
-
-    *cSize = ip - (const BYTE*)src;
-    *dBound = nbBlocks * BLOCKSIZE;
-}
-
-/* ******************************
-*  Streaming Decompression API
-********************************/
-static size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx)
-{
-    return dctx->expected;
-}
-
-static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
-    /* Sanity check */
-    if (srcSize != ctx->expected) return ERROR(srcSize_wrong);
-    ZSTD_checkContinuity(ctx, dst);
-
-    /* Decompress : frame header; part 1 */
-    switch (ctx->stage)
-    {
-    case ZSTDds_getFrameHeaderSize :
-        /* get frame header size */
-        if (srcSize != ZSTD_frameHeaderSize_min) return ERROR(srcSize_wrong);   /* impossible */
-        ctx->headerSize = ZSTD_decodeFrameHeader_Part1(ctx, src, ZSTD_frameHeaderSize_min);
-        if (ZSTD_isError(ctx->headerSize)) return ctx->headerSize;
-        memcpy(ctx->headerBuffer, src, ZSTD_frameHeaderSize_min);
-        if (ctx->headerSize > ZSTD_frameHeaderSize_min) return ERROR(GENERIC);   /* impossible */
-        ctx->expected = 0;   /* not necessary to copy more */
-        /* fallthrough */
-    case ZSTDds_decodeFrameHeader:
-        /* get frame header */
-        {   size_t const result = ZSTD_decodeFrameHeader_Part2(ctx, ctx->headerBuffer, ctx->headerSize);
-            if (ZSTD_isError(result)) return result;
-            ctx->expected = ZSTD_blockHeaderSize;
-            ctx->stage = ZSTDds_decodeBlockHeader;
-            return 0;
-        }
-    case ZSTDds_decodeBlockHeader:
-        /* Decode block header */
-        {   blockProperties_t bp;
-            size_t const blockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
-            if (ZSTD_isError(blockSize)) return blockSize;
-            if (bp.blockType == bt_end)
-            {
-                ctx->expected = 0;
-                ctx->stage = ZSTDds_getFrameHeaderSize;
-            }
-            else
-            {
-                ctx->expected = blockSize;
-                ctx->bType = bp.blockType;
-                ctx->stage = ZSTDds_decompressBlock;
-            }
-            return 0;
-        }
-    case ZSTDds_decompressBlock:
-        {
-            /* Decompress : block content */
-            size_t rSize;
-            switch(ctx->bType)
-            {
-            case bt_compressed:
-                rSize = ZSTD_decompressBlock_internal(ctx, dst, maxDstSize, src, srcSize);
-                break;
-            case bt_raw :
-                rSize = ZSTD_copyRawBlock(dst, maxDstSize, src, srcSize);
-                break;
-            case bt_rle :
-                return ERROR(GENERIC);   /* not yet handled */
-                break;
-            case bt_end :   /* should never happen (filtered at phase 1) */
-                rSize = 0;
-                break;
-            default:
-                return ERROR(GENERIC);
-            }
-            ctx->stage = ZSTDds_decodeBlockHeader;
-            ctx->expected = ZSTD_blockHeaderSize;
-            ctx->previousDstEnd = (char*)dst + rSize;
-            return rSize;
-        }
-    default:
-        return ERROR(GENERIC);   /* impossible */
-    }
-}
-
-
-static void ZSTD_decompress_insertDictionary(ZSTD_DCtx* ctx, const void* dict, size_t dictSize)
-{
-    ctx->dictEnd = ctx->previousDstEnd;
-    ctx->vBase = (const char*)dict - ((const char*)(ctx->previousDstEnd) - (const char*)(ctx->base));
-    ctx->base = dict;
-    ctx->previousDstEnd = (const char*)dict + dictSize;
-}
-
-
-
-/*
-    Buffered version of Zstd compression library
-    Copyright (C) 2015, Yann Collet.
-
-    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are
-    met:
-    * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-    copyright notice, this list of conditions and the following disclaimer
-    in the documentation and/or other materials provided with the
-    distribution.
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - zstd source repository : https://github.com/Cyan4973/zstd
-    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
-*/
-
-/* The objects defined into this file should be considered experimental.
- * They are not labelled stable, as their prototype may change in the future.
- * You can use them for tests, provide feedback, or if you can endure risk of future changes.
- */
-
-/* *************************************
-*  Includes
-***************************************/
-#include <stdlib.h>
-
-
-/** ************************************************
-*  Streaming decompression
-*
-*  A ZBUFF_DCtx object is required to track streaming operation.
-*  Use ZBUFF_createDCtx() and ZBUFF_freeDCtx() to create/release resources.
-*  Use ZBUFF_decompressInit() to start a new decompression operation.
-*  ZBUFF_DCtx objects can be reused multiple times.
-*
-*  Use ZBUFF_decompressContinue() repetitively to consume your input.
-*  *srcSizePtr and *maxDstSizePtr can be any size.
-*  The function will report how many bytes were read or written by modifying *srcSizePtr and *maxDstSizePtr.
-*  Note that it may not consume the entire input, in which case it's up to the caller to call again the function with remaining input.
-*  The content of dst will be overwritten (up to *maxDstSizePtr) at each function call, so save its content if it matters or change dst .
-*  return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency)
-*            or 0 when a frame is completely decoded
-*            or an error code, which can be tested using ZBUFF_isError().
-*
-*  Hint : recommended buffer sizes (not compulsory)
-*  output : 128 KB block size is the internal unit, it ensures it's always possible to write a full block when it's decoded.
-*  input : just follow indications from ZBUFF_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
-* **************************************************/
-
-typedef enum { ZBUFFds_init, ZBUFFds_readHeader, ZBUFFds_loadHeader, ZBUFFds_decodeHeader,
-               ZBUFFds_read, ZBUFFds_load, ZBUFFds_flush } ZBUFF_dStage;
-
-/* *** Resource management *** */
-
-#define ZSTD_frameHeaderSize_max 5   /* too magical, should come from reference */
-struct ZBUFFv04_DCtx_s {
-    ZSTD_DCtx* zc;
-    ZSTD_parameters params;
-    char* inBuff;
-    size_t inBuffSize;
-    size_t inPos;
-    char* outBuff;
-    size_t outBuffSize;
-    size_t outStart;
-    size_t outEnd;
-    size_t hPos;
-    const char* dict;
-    size_t dictSize;
-    ZBUFF_dStage stage;
-    unsigned char headerBuffer[ZSTD_frameHeaderSize_max];
-};   /* typedef'd to ZBUFF_DCtx within "zstd_buffered.h" */
-
-typedef ZBUFFv04_DCtx ZBUFF_DCtx;
-
-
-static ZBUFF_DCtx* ZBUFF_createDCtx(void)
-{
-    ZBUFF_DCtx* zbc = (ZBUFF_DCtx*)malloc(sizeof(ZBUFF_DCtx));
-    if (zbc==NULL) return NULL;
-    memset(zbc, 0, sizeof(*zbc));
-    zbc->zc = ZSTD_createDCtx();
-    zbc->stage = ZBUFFds_init;
-    return zbc;
-}
-
-static size_t ZBUFF_freeDCtx(ZBUFF_DCtx* zbc)
-{
-    if (zbc==NULL) return 0;   /* support free on null */
-    ZSTD_freeDCtx(zbc->zc);
-    free(zbc->inBuff);
-    free(zbc->outBuff);
-    free(zbc);
-    return 0;
-}
-
-
-/* *** Initialization *** */
-
-static size_t ZBUFF_decompressInit(ZBUFF_DCtx* zbc)
-{
-    zbc->stage = ZBUFFds_readHeader;
-    zbc->hPos = zbc->inPos = zbc->outStart = zbc->outEnd = zbc->dictSize = 0;
-    return ZSTD_resetDCtx(zbc->zc);
-}
-
-
-static size_t ZBUFF_decompressWithDictionary(ZBUFF_DCtx* zbc, const void* src, size_t srcSize)
-{
-    zbc->dict = (const char*)src;
-    zbc->dictSize = srcSize;
-    return 0;
-}
-
-static size_t ZBUFF_limitCopy(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
-    size_t length = MIN(maxDstSize, srcSize);
-    memcpy(dst, src, length);
-    return length;
-}
-
-/* *** Decompression *** */
-
-static size_t ZBUFF_decompressContinue(ZBUFF_DCtx* zbc, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr)
-{
-    const char* const istart = (const char*)src;
-    const char* ip = istart;
-    const char* const iend = istart + *srcSizePtr;
-    char* const ostart = (char*)dst;
-    char* op = ostart;
-    char* const oend = ostart + *maxDstSizePtr;
-    U32 notDone = 1;
-
-    DEBUGLOG(5, "ZBUFF_decompressContinue");
-    while (notDone)
-    {
-        switch(zbc->stage)
-        {
-
-        case ZBUFFds_init :
-            DEBUGLOG(5, "ZBUFF_decompressContinue: stage==ZBUFFds_init => ERROR(init_missing)");
-            return ERROR(init_missing);
-
-        case ZBUFFds_readHeader :
-            /* read header from src */
-            {   size_t const headerSize = ZSTD_getFrameParams(&(zbc->params), src, *srcSizePtr);
-                if (ZSTD_isError(headerSize)) return headerSize;
-                if (headerSize) {
-                    /* not enough input to decode header : tell how many bytes would be necessary */
-                    memcpy(zbc->headerBuffer+zbc->hPos, src, *srcSizePtr);
-                    zbc->hPos += *srcSizePtr;
-                    *maxDstSizePtr = 0;
-                    zbc->stage = ZBUFFds_loadHeader;
-                    return headerSize - zbc->hPos;
-                }
-                zbc->stage = ZBUFFds_decodeHeader;
-                break;
-            }
-
-        case ZBUFFds_loadHeader:
-            /* complete header from src */
-            {   size_t headerSize = ZBUFF_limitCopy(
-                    zbc->headerBuffer + zbc->hPos, ZSTD_frameHeaderSize_max - zbc->hPos,
-                    src, *srcSizePtr);
-                zbc->hPos += headerSize;
-                ip += headerSize;
-                headerSize = ZSTD_getFrameParams(&(zbc->params), zbc->headerBuffer, zbc->hPos);
-                if (ZSTD_isError(headerSize)) return headerSize;
-                if (headerSize) {
-                    /* not enough input to decode header : tell how many bytes would be necessary */
-                    *maxDstSizePtr = 0;
-                    return headerSize - zbc->hPos;
-            }   }
-            /* intentional fallthrough */
-
-        case ZBUFFds_decodeHeader:
-                /* apply header to create / resize buffers */
-                {   size_t const neededOutSize = (size_t)1 << zbc->params.windowLog;
-                    size_t const neededInSize = BLOCKSIZE;   /* a block is never > BLOCKSIZE */
-                    if (zbc->inBuffSize < neededInSize) {
-                        free(zbc->inBuff);
-                        zbc->inBuffSize = neededInSize;
-                        zbc->inBuff = (char*)malloc(neededInSize);
-                        if (zbc->inBuff == NULL) return ERROR(memory_allocation);
-                    }
-                    if (zbc->outBuffSize < neededOutSize) {
-                        free(zbc->outBuff);
-                        zbc->outBuffSize = neededOutSize;
-                        zbc->outBuff = (char*)malloc(neededOutSize);
-                        if (zbc->outBuff == NULL) return ERROR(memory_allocation);
-                }   }
-                if (zbc->dictSize)
-                    ZSTD_decompress_insertDictionary(zbc->zc, zbc->dict, zbc->dictSize);
-                if (zbc->hPos) {
-                    /* some data already loaded into headerBuffer : transfer into inBuff */
-                    memcpy(zbc->inBuff, zbc->headerBuffer, zbc->hPos);
-                    zbc->inPos = zbc->hPos;
-                    zbc->hPos = 0;
-                    zbc->stage = ZBUFFds_load;
-                    break;
-                }
-                zbc->stage = ZBUFFds_read;
-		/* fall-through */
-        case ZBUFFds_read:
-            {
-                size_t neededInSize = ZSTD_nextSrcSizeToDecompress(zbc->zc);
-                if (neededInSize==0)   /* end of frame */
-                {
-                    zbc->stage = ZBUFFds_init;
-                    notDone = 0;
-                    break;
-                }
-                if ((size_t)(iend-ip) >= neededInSize)
-                {
-                    /* directly decode from src */
-                    size_t decodedSize = ZSTD_decompressContinue(zbc->zc,
-                        zbc->outBuff + zbc->outStart, zbc->outBuffSize - zbc->outStart,
-                        ip, neededInSize);
-                    if (ZSTD_isError(decodedSize)) return decodedSize;
-                    ip += neededInSize;
-                    if (!decodedSize) break;   /* this was just a header */
-                    zbc->outEnd = zbc->outStart +  decodedSize;
-                    zbc->stage = ZBUFFds_flush;
-                    break;
-                }
-                if (ip==iend) { notDone = 0; break; }   /* no more input */
-                zbc->stage = ZBUFFds_load;
-            }
-	    /* fall-through */
-        case ZBUFFds_load:
-            {
-                size_t neededInSize = ZSTD_nextSrcSizeToDecompress(zbc->zc);
-                size_t toLoad = neededInSize - zbc->inPos;   /* should always be <= remaining space within inBuff */
-                size_t loadedSize;
-                if (toLoad > zbc->inBuffSize - zbc->inPos) return ERROR(corruption_detected);   /* should never happen */
-                loadedSize = ZBUFF_limitCopy(zbc->inBuff + zbc->inPos, toLoad, ip, iend-ip);
-                ip += loadedSize;
-                zbc->inPos += loadedSize;
-                if (loadedSize < toLoad) { notDone = 0; break; }   /* not enough input, wait for more */
-                {
-                    size_t decodedSize = ZSTD_decompressContinue(zbc->zc,
-                        zbc->outBuff + zbc->outStart, zbc->outBuffSize - zbc->outStart,
-                        zbc->inBuff, neededInSize);
-                    if (ZSTD_isError(decodedSize)) return decodedSize;
-                    zbc->inPos = 0;   /* input is consumed */
-                    if (!decodedSize) { zbc->stage = ZBUFFds_read; break; }   /* this was just a header */
-                    zbc->outEnd = zbc->outStart +  decodedSize;
-                    zbc->stage = ZBUFFds_flush;
-                    /* ZBUFFds_flush follows */
-                }
-            }
-	    /* fall-through */
-        case ZBUFFds_flush:
-            {
-                size_t toFlushSize = zbc->outEnd - zbc->outStart;
-                size_t flushedSize = ZBUFF_limitCopy(op, oend-op, zbc->outBuff + zbc->outStart, toFlushSize);
-                op += flushedSize;
-                zbc->outStart += flushedSize;
-                if (flushedSize == toFlushSize)
-                {
-                    zbc->stage = ZBUFFds_read;
-                    if (zbc->outStart + BLOCKSIZE > zbc->outBuffSize)
-                        zbc->outStart = zbc->outEnd = 0;
-                    break;
-                }
-                /* cannot flush everything */
-                notDone = 0;
-                break;
-            }
-        default: return ERROR(GENERIC);   /* impossible */
-        }
-    }
-
-    *srcSizePtr = ip-istart;
-    *maxDstSizePtr = op-ostart;
-
-    {
-        size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zbc->zc);
-        if (nextSrcSizeHint > 3) nextSrcSizeHint+= 3;   /* get the next block header while at it */
-        nextSrcSizeHint -= zbc->inPos;   /* already loaded*/
-        return nextSrcSizeHint;
-    }
-}
-
-
-/* *************************************
-*  Tool functions
-***************************************/
-unsigned ZBUFFv04_isError(size_t errorCode) { return ERR_isError(errorCode); }
-const char* ZBUFFv04_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
-
-size_t ZBUFFv04_recommendedDInSize()  { return BLOCKSIZE + 3; }
-size_t ZBUFFv04_recommendedDOutSize() { return BLOCKSIZE; }
-
-
-
-/*- ========================================================================= -*/
-
-/* final wrapping stage */
-
-size_t ZSTDv04_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
-    return ZSTD_decompress_usingDict(dctx, dst, maxDstSize, src, srcSize, NULL, 0);
-}
-
-size_t ZSTDv04_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
-#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE==1)
-    size_t regenSize;
-    ZSTD_DCtx* dctx = ZSTD_createDCtx();
-    if (dctx==NULL) return ERROR(memory_allocation);
-    regenSize = ZSTDv04_decompressDCtx(dctx, dst, maxDstSize, src, srcSize);
-    ZSTD_freeDCtx(dctx);
-    return regenSize;
-#else
-    ZSTD_DCtx dctx;
-    return ZSTDv04_decompressDCtx(&dctx, dst, maxDstSize, src, srcSize);
-#endif
-}
-
-size_t ZSTDv04_resetDCtx(ZSTDv04_Dctx* dctx) { return ZSTD_resetDCtx(dctx); }
-
-size_t ZSTDv04_nextSrcSizeToDecompress(ZSTDv04_Dctx* dctx)
-{
-    return ZSTD_nextSrcSizeToDecompress(dctx);
-}
-
-size_t ZSTDv04_decompressContinue(ZSTDv04_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
-    return ZSTD_decompressContinue(dctx, dst, maxDstSize, src, srcSize);
-}
-
-
-
-ZBUFFv04_DCtx* ZBUFFv04_createDCtx(void) { return ZBUFF_createDCtx(); }
-size_t ZBUFFv04_freeDCtx(ZBUFFv04_DCtx* dctx) { return ZBUFF_freeDCtx(dctx); }
-
-size_t ZBUFFv04_decompressInit(ZBUFFv04_DCtx* dctx) { return ZBUFF_decompressInit(dctx); }
-size_t ZBUFFv04_decompressWithDictionary(ZBUFFv04_DCtx* dctx, const void* src, size_t srcSize)
-{ return ZBUFF_decompressWithDictionary(dctx, src, srcSize); }
-
-size_t ZBUFFv04_decompressContinue(ZBUFFv04_DCtx* dctx, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr)
-{
-    DEBUGLOG(5, "ZBUFFv04_decompressContinue");
-    return ZBUFF_decompressContinue(dctx, dst, maxDstSizePtr, src, srcSizePtr);
-}
-
-ZSTD_DCtx* ZSTDv04_createDCtx(void) { return ZSTD_createDCtx(); }
-size_t ZSTDv04_freeDCtx(ZSTD_DCtx* dctx) { return ZSTD_freeDCtx(dctx); }
diff --git a/vendor/github.com/DataDog/zstd/zstd_v04.h b/vendor/github.com/DataDog/zstd/zstd_v04.h
deleted file mode 100644
index bb5f3b7..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_v04.h
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef ZSTD_V04_H_91868324769238
-#define ZSTD_V04_H_91868324769238
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/* *************************************
-*  Includes
-***************************************/
-#include <stddef.h>   /* size_t */
-
-
-/* *************************************
-*  Simple one-step function
-***************************************/
-/**
-ZSTDv04_decompress() : decompress ZSTD frames compliant with v0.4.x format
-    compressedSize : is the exact source size
-    maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated.
-                      It must be equal or larger than originalSize, otherwise decompression will fail.
-    return : the number of bytes decompressed into destination buffer (originalSize)
-             or an errorCode if it fails (which can be tested using ZSTDv01_isError())
-*/
-size_t ZSTDv04_decompress( void* dst, size_t maxOriginalSize,
-                     const void* src, size_t compressedSize);
-
- /**
- ZSTDv04_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.4.x format
-     srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
-     cSize (output parameter)  : the number of bytes that would be read to decompress this frame
-                                 or an error code if it fails (which can be tested using ZSTDv01_isError())
-     dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
-                                 or ZSTD_CONTENTSIZE_ERROR if an error occurs
-
-    note : assumes `cSize` and `dBound` are _not_ NULL.
- */
- void ZSTDv04_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
-                                      size_t* cSize, unsigned long long* dBound);
-
-/**
-ZSTDv04_isError() : tells if the result of ZSTDv04_decompress() is an error
-*/
-unsigned ZSTDv04_isError(size_t code);
-
-
-/* *************************************
-*  Advanced functions
-***************************************/
-typedef struct ZSTDv04_Dctx_s ZSTDv04_Dctx;
-ZSTDv04_Dctx* ZSTDv04_createDCtx(void);
-size_t ZSTDv04_freeDCtx(ZSTDv04_Dctx* dctx);
-
-size_t ZSTDv04_decompressDCtx(ZSTDv04_Dctx* dctx,
-                              void* dst, size_t maxOriginalSize,
-                        const void* src, size_t compressedSize);
-
-
-/* *************************************
-*  Direct Streaming
-***************************************/
-size_t ZSTDv04_resetDCtx(ZSTDv04_Dctx* dctx);
-
-size_t ZSTDv04_nextSrcSizeToDecompress(ZSTDv04_Dctx* dctx);
-size_t ZSTDv04_decompressContinue(ZSTDv04_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
-/**
-  Use above functions alternatively.
-  ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
-  ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
-  Result is the number of bytes regenerated within 'dst'.
-  It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
-*/
-
-
-/* *************************************
-*  Buffered Streaming
-***************************************/
-typedef struct ZBUFFv04_DCtx_s ZBUFFv04_DCtx;
-ZBUFFv04_DCtx* ZBUFFv04_createDCtx(void);
-size_t         ZBUFFv04_freeDCtx(ZBUFFv04_DCtx* dctx);
-
-size_t ZBUFFv04_decompressInit(ZBUFFv04_DCtx* dctx);
-size_t ZBUFFv04_decompressWithDictionary(ZBUFFv04_DCtx* dctx, const void* dict, size_t dictSize);
-
-size_t ZBUFFv04_decompressContinue(ZBUFFv04_DCtx* dctx, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr);
-
-/** ************************************************
-*  Streaming decompression
-*
-*  A ZBUFF_DCtx object is required to track streaming operation.
-*  Use ZBUFF_createDCtx() and ZBUFF_freeDCtx() to create/release resources.
-*  Use ZBUFF_decompressInit() to start a new decompression operation.
-*  ZBUFF_DCtx objects can be reused multiple times.
-*
-*  Optionally, a reference to a static dictionary can be set, using ZBUFF_decompressWithDictionary()
-*  It must be the same content as the one set during compression phase.
-*  Dictionary content must remain accessible during the decompression process.
-*
-*  Use ZBUFF_decompressContinue() repetitively to consume your input.
-*  *srcSizePtr and *maxDstSizePtr can be any size.
-*  The function will report how many bytes were read or written by modifying *srcSizePtr and *maxDstSizePtr.
-*  Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
-*  The content of dst will be overwritten (up to *maxDstSizePtr) at each function call, so save its content if it matters or change dst.
-*  @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency)
-*            or 0 when a frame is completely decoded
-*            or an error code, which can be tested using ZBUFF_isError().
-*
-*  Hint : recommended buffer sizes (not compulsory) : ZBUFF_recommendedDInSize / ZBUFF_recommendedDOutSize
-*  output : ZBUFF_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when it's decoded.
-*  input : ZBUFF_recommendedDInSize==128Kb+3; just follow indications from ZBUFF_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
-* **************************************************/
-unsigned ZBUFFv04_isError(size_t errorCode);
-const char* ZBUFFv04_getErrorName(size_t errorCode);
-
-
-/** The below functions provide recommended buffer sizes for Compression or Decompression operations.
-*   These sizes are not compulsory, they just tend to offer better latency */
-size_t ZBUFFv04_recommendedDInSize(void);
-size_t ZBUFFv04_recommendedDOutSize(void);
-
-
-/* *************************************
-*  Prefix - version detection
-***************************************/
-#define ZSTDv04_magicNumber 0xFD2FB524   /* v0.4 */
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* ZSTD_V04_H_91868324769238 */
diff --git a/vendor/github.com/DataDog/zstd/zstd_v05.c b/vendor/github.com/DataDog/zstd/zstd_v05.c
deleted file mode 100644
index a7ea606..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_v05.c
+++ /dev/null
@@ -1,4043 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-
-/*- Dependencies -*/
-#include "zstd_v05.h"
-#include "error_private.h"
-
-
-/* ******************************************************************
-   mem.h
-   low-level memory access routines
-   Copyright (C) 2013-2015, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - FSEv05 source repository : https://github.com/Cyan4973/FiniteStateEntropy
-    - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-#ifndef MEM_H_MODULE
-#define MEM_H_MODULE
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/*-****************************************
-*  Dependencies
-******************************************/
-#include <stddef.h>    /* size_t, ptrdiff_t */
-#include <string.h>    /* memcpy */
-
-
-/*-****************************************
-*  Compiler specifics
-******************************************/
-#if defined(__GNUC__)
-#  define MEM_STATIC static __attribute__((unused))
-#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-#  define MEM_STATIC static inline
-#elif defined(_MSC_VER)
-#  define MEM_STATIC static __inline
-#else
-#  define MEM_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */
-#endif
-
-
-/*-**************************************************************
-*  Basic Types
-*****************************************************************/
-#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-# include <stdint.h>
-  typedef  uint8_t BYTE;
-  typedef uint16_t U16;
-  typedef  int16_t S16;
-  typedef uint32_t U32;
-  typedef  int32_t S32;
-  typedef uint64_t U64;
-  typedef  int64_t S64;
-#else
-  typedef unsigned char       BYTE;
-  typedef unsigned short      U16;
-  typedef   signed short      S16;
-  typedef unsigned int        U32;
-  typedef   signed int        S32;
-  typedef unsigned long long  U64;
-  typedef   signed long long  S64;
-#endif
-
-
-/*-**************************************************************
-*  Memory I/O
-*****************************************************************/
-/* MEM_FORCE_MEMORY_ACCESS :
- * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
- * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
- * The below switch allow to select different access method for improved performance.
- * Method 0 (default) : use `memcpy()`. Safe and portable.
- * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
- *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
- * Method 2 : direct access. This method is portable but violate C standard.
- *            It can generate buggy code on targets depending on alignment.
- *            In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
- * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
- * Prefer these methods in priority order (0 > 1 > 2)
- */
-#ifndef MEM_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
-#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
-#    define MEM_FORCE_MEMORY_ACCESS 2
-#  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
-  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
-#    define MEM_FORCE_MEMORY_ACCESS 1
-#  endif
-#endif
-
-MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; }
-MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; }
-
-MEM_STATIC unsigned MEM_isLittleEndian(void)
-{
-    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */
-    return one.c[0];
-}
-
-#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
-
-/* violates C standard, by lying on structure alignment.
-Only use if no other choice to achieve best performance on target platform */
-MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
-MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
-MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
-
-MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
-MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
-MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; }
-
-#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
-
-/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
-/* currently only defined for gcc and icc */
-typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign;
-
-MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
-MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
-MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
-
-MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
-MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; }
-MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign*)memPtr)->u64 = value; }
-
-#else
-
-/* default method, safe and standard.
-   can sometimes prove slower */
-
-MEM_STATIC U16 MEM_read16(const void* memPtr)
-{
-    U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-MEM_STATIC U32 MEM_read32(const void* memPtr)
-{
-    U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-MEM_STATIC U64 MEM_read64(const void* memPtr)
-{
-    U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-MEM_STATIC void MEM_write16(void* memPtr, U16 value)
-{
-    memcpy(memPtr, &value, sizeof(value));
-}
-
-MEM_STATIC void MEM_write32(void* memPtr, U32 value)
-{
-    memcpy(memPtr, &value, sizeof(value));
-}
-
-MEM_STATIC void MEM_write64(void* memPtr, U64 value)
-{
-    memcpy(memPtr, &value, sizeof(value));
-}
-
-#endif /* MEM_FORCE_MEMORY_ACCESS */
-
-
-MEM_STATIC U16 MEM_readLE16(const void* memPtr)
-{
-    if (MEM_isLittleEndian())
-        return MEM_read16(memPtr);
-    else {
-        const BYTE* p = (const BYTE*)memPtr;
-        return (U16)(p[0] + (p[1]<<8));
-    }
-}
-
-MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
-{
-    if (MEM_isLittleEndian()) {
-        MEM_write16(memPtr, val);
-    } else {
-        BYTE* p = (BYTE*)memPtr;
-        p[0] = (BYTE)val;
-        p[1] = (BYTE)(val>>8);
-    }
-}
-
-MEM_STATIC U32 MEM_readLE24(const void* memPtr)
-{
-    return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
-}
-
-MEM_STATIC U32 MEM_readLE32(const void* memPtr)
-{
-    if (MEM_isLittleEndian())
-        return MEM_read32(memPtr);
-    else {
-        const BYTE* p = (const BYTE*)memPtr;
-        return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));
-    }
-}
-
-
-MEM_STATIC U64 MEM_readLE64(const void* memPtr)
-{
-    if (MEM_isLittleEndian())
-        return MEM_read64(memPtr);
-    else {
-        const BYTE* p = (const BYTE*)memPtr;
-        return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24)
-                     + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56));
-    }
-}
-
-
-MEM_STATIC size_t MEM_readLEST(const void* memPtr)
-{
-    if (MEM_32bits())
-        return (size_t)MEM_readLE32(memPtr);
-    else
-        return (size_t)MEM_readLE64(memPtr);
-}
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* MEM_H_MODULE */
-
-/*
-    zstd - standard compression library
-    Header File for static linking only
-    Copyright (C) 2014-2016, Yann Collet.
-
-    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are
-    met:
-    * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-    copyright notice, this list of conditions and the following disclaimer
-    in the documentation and/or other materials provided with the
-    distribution.
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - zstd homepage : http://www.zstd.net
-*/
-#ifndef ZSTD_STATIC_H
-#define ZSTD_STATIC_H
-
-/* The prototypes defined within this file are considered experimental.
- * They should not be used in the context DLL as they may change in the future.
- * Prefer static linking if you need them, to control breaking version changes issues.
- */
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-
-/*-*************************************
-*  Types
-***************************************/
-#define ZSTDv05_WINDOWLOG_ABSOLUTEMIN 11
-
-
-/*-*************************************
-*  Advanced functions
-***************************************/
-/*- Advanced Decompression functions -*/
-
-/*! ZSTDv05_decompress_usingPreparedDCtx() :
-*   Same as ZSTDv05_decompress_usingDict, but using a reference context `preparedDCtx`, where dictionary has been loaded.
-*   It avoids reloading the dictionary each time.
-*   `preparedDCtx` must have been properly initialized using ZSTDv05_decompressBegin_usingDict().
-*   Requires 2 contexts : 1 for reference, which will not be modified, and 1 to run the decompression operation */
-size_t ZSTDv05_decompress_usingPreparedDCtx(
-                                             ZSTDv05_DCtx* dctx, const ZSTDv05_DCtx* preparedDCtx,
-                                             void* dst, size_t dstCapacity,
-                                       const void* src, size_t srcSize);
-
-
-/* **************************************
-*  Streaming functions (direct mode)
-****************************************/
-size_t ZSTDv05_decompressBegin(ZSTDv05_DCtx* dctx);
-
-/*
-  Streaming decompression, direct mode (bufferless)
-
-  A ZSTDv05_DCtx object is required to track streaming operations.
-  Use ZSTDv05_createDCtx() / ZSTDv05_freeDCtx() to manage it.
-  A ZSTDv05_DCtx object can be re-used multiple times.
-
-  First typical operation is to retrieve frame parameters, using ZSTDv05_getFrameParams().
-  This operation is independent, and just needs enough input data to properly decode the frame header.
-  Objective is to retrieve *params.windowlog, to know minimum amount of memory required during decoding.
-  Result : 0 when successful, it means the ZSTDv05_parameters structure has been filled.
-           >0 : means there is not enough data into src. Provides the expected size to successfully decode header.
-           errorCode, which can be tested using ZSTDv05_isError()
-
-  Start decompression, with ZSTDv05_decompressBegin() or ZSTDv05_decompressBegin_usingDict()
-  Alternatively, you can copy a prepared context, using ZSTDv05_copyDCtx()
-
-  Then use ZSTDv05_nextSrcSizeToDecompress() and ZSTDv05_decompressContinue() alternatively.
-  ZSTDv05_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTDv05_decompressContinue().
-  ZSTDv05_decompressContinue() requires this exact amount of bytes, or it will fail.
-  ZSTDv05_decompressContinue() needs previous data blocks during decompression, up to (1 << windowlog).
-  They should preferably be located contiguously, prior to current block. Alternatively, a round buffer is also possible.
-
-  @result of ZSTDv05_decompressContinue() is the number of bytes regenerated within 'dst'.
-  It can be zero, which is not an error; it just means ZSTDv05_decompressContinue() has decoded some header.
-
-  A frame is fully decoded when ZSTDv05_nextSrcSizeToDecompress() returns zero.
-  Context can then be reset to start a new decompression.
-*/
-
-
-/* **************************************
-*  Block functions
-****************************************/
-/*! Block functions produce and decode raw zstd blocks, without frame metadata.
-    User will have to take in charge required information to regenerate data, such as block sizes.
-
-    A few rules to respect :
-    - Uncompressed block size must be <= 128 KB
-    - Compressing or decompressing requires a context structure
-      + Use ZSTDv05_createCCtx() and ZSTDv05_createDCtx()
-    - It is necessary to init context before starting
-      + compression : ZSTDv05_compressBegin()
-      + decompression : ZSTDv05_decompressBegin()
-      + variants _usingDict() are also allowed
-      + copyCCtx() and copyDCtx() work too
-    - When a block is considered not compressible enough, ZSTDv05_compressBlock() result will be zero.
-      In which case, nothing is produced into `dst`.
-      + User must test for such outcome and deal directly with uncompressed data
-      + ZSTDv05_decompressBlock() doesn't accept uncompressed data as input !!
-*/
-
-size_t ZSTDv05_decompressBlock(ZSTDv05_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
-
-
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif  /* ZSTDv05_STATIC_H */
-
-
-/*
-    zstd_internal - common functions to include
-    Header File for include
-    Copyright (C) 2014-2016, Yann Collet.
-
-    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are
-    met:
-    * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-    copyright notice, this list of conditions and the following disclaimer
-    in the documentation and/or other materials provided with the
-    distribution.
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - zstd source repository : https://github.com/Cyan4973/zstd
-*/
-#ifndef ZSTD_CCOMMON_H_MODULE
-#define ZSTD_CCOMMON_H_MODULE
-
-
-
-/*-*************************************
-*  Common macros
-***************************************/
-#define MIN(a,b) ((a)<(b) ? (a) : (b))
-#define MAX(a,b) ((a)>(b) ? (a) : (b))
-
-
-/*-*************************************
-*  Common constants
-***************************************/
-#define ZSTDv05_DICT_MAGIC  0xEC30A435
-
-#define KB *(1 <<10)
-#define MB *(1 <<20)
-#define GB *(1U<<30)
-
-#define BLOCKSIZE (128 KB)                 /* define, for static allocation */
-
-static const size_t ZSTDv05_blockHeaderSize = 3;
-static const size_t ZSTDv05_frameHeaderSize_min = 5;
-#define ZSTDv05_frameHeaderSize_max 5         /* define, for static allocation */
-
-#define BITv057 128
-#define BITv056  64
-#define BITv055  32
-#define BITv054  16
-#define BITv051   2
-#define BITv050   1
-
-#define IS_HUFv05 0
-#define IS_PCH 1
-#define IS_RAW 2
-#define IS_RLE 3
-
-#define MINMATCH 4
-#define REPCODE_STARTVALUE 1
-
-#define Litbits  8
-#define MLbits   7
-#define LLbits   6
-#define Offbits  5
-#define MaxLit ((1<<Litbits) - 1)
-#define MaxML  ((1<<MLbits) - 1)
-#define MaxLL  ((1<<LLbits) - 1)
-#define MaxOff ((1<<Offbits)- 1)
-#define MLFSEv05Log   10
-#define LLFSEv05Log   10
-#define OffFSEv05Log   9
-#define MaxSeq MAX(MaxLL, MaxML)
-
-#define FSEv05_ENCODING_RAW     0
-#define FSEv05_ENCODING_RLE     1
-#define FSEv05_ENCODING_STATIC  2
-#define FSEv05_ENCODING_DYNAMIC 3
-
-
-#define HufLog 12
-
-#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
-#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */)   /* for a non-null block */
-
-#define WILDCOPY_OVERLENGTH 8
-
-#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
-
-typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
-
-
-/*-*******************************************
-*  Shared functions to include for inlining
-*********************************************/
-static void ZSTDv05_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
-
-#define COPY8(d,s) { ZSTDv05_copy8(d,s); d+=8; s+=8; }
-
-/*! ZSTDv05_wildcopy() :
-*   custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
-MEM_STATIC void ZSTDv05_wildcopy(void* dst, const void* src, ptrdiff_t length)
-{
-    const BYTE* ip = (const BYTE*)src;
-    BYTE* op = (BYTE*)dst;
-    BYTE* const oend = op + length;
-    do
-        COPY8(op, ip)
-    while (op < oend);
-}
-
-
-/*-*******************************************
-*  Private interfaces
-*********************************************/
-typedef struct {
-    void* buffer;
-    U32*  offsetStart;
-    U32*  offset;
-    BYTE* offCodeStart;
-    BYTE* offCode;
-    BYTE* litStart;
-    BYTE* lit;
-    BYTE* litLengthStart;
-    BYTE* litLength;
-    BYTE* matchLengthStart;
-    BYTE* matchLength;
-    BYTE* dumpsStart;
-    BYTE* dumps;
-    /* opt */
-    U32* matchLengthFreq;
-    U32* litLengthFreq;
-    U32* litFreq;
-    U32* offCodeFreq;
-    U32  matchLengthSum;
-    U32  litLengthSum;
-    U32  litSum;
-    U32  offCodeSum;
-} seqStore_t;
-
-
-
-#endif   /* ZSTDv05_CCOMMON_H_MODULE */
-/* ******************************************************************
-   FSEv05 : Finite State Entropy coder
-   header file
-   Copyright (C) 2013-2015, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-   - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-#ifndef FSEv05_H
-#define FSEv05_H
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-/* *****************************************
-*  Includes
-******************************************/
-#include <stddef.h>    /* size_t, ptrdiff_t */
-
-
-/*-****************************************
-*  FSEv05 simple functions
-******************************************/
-size_t FSEv05_decompress(void* dst,  size_t maxDstSize,
-                const void* cSrc, size_t cSrcSize);
-/*!
-FSEv05_decompress():
-    Decompress FSEv05 data from buffer 'cSrc', of size 'cSrcSize',
-    into already allocated destination buffer 'dst', of size 'maxDstSize'.
-    return : size of regenerated data (<= maxDstSize)
-             or an error code, which can be tested using FSEv05_isError()
-
-    ** Important ** : FSEv05_decompress() doesn't decompress non-compressible nor RLE data !!!
-    Why ? : making this distinction requires a header.
-    Header management is intentionally delegated to the user layer, which can better manage special cases.
-*/
-
-
-/* *****************************************
-*  Tool functions
-******************************************/
-/* Error Management */
-unsigned    FSEv05_isError(size_t code);        /* tells if a return value is an error code */
-const char* FSEv05_getErrorName(size_t code);   /* provides error code string (useful for debugging) */
-
-
-
-
-/* *****************************************
-*  FSEv05 detailed API
-******************************************/
-/* *** DECOMPRESSION *** */
-
-/*!
-FSEv05_readNCount():
-   Read compactly saved 'normalizedCounter' from 'rBuffer'.
-   return : size read from 'rBuffer'
-            or an errorCode, which can be tested using FSEv05_isError()
-            maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
-size_t FSEv05_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);
-
-/*!
-Constructor and Destructor of type FSEv05_DTable
-    Note that its size depends on 'tableLog' */
-typedef unsigned FSEv05_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */
-FSEv05_DTable* FSEv05_createDTable(unsigned tableLog);
-void        FSEv05_freeDTable(FSEv05_DTable* dt);
-
-/*!
-FSEv05_buildDTable():
-   Builds 'dt', which must be already allocated, using FSEv05_createDTable()
-   @return : 0,
-             or an errorCode, which can be tested using FSEv05_isError() */
-size_t FSEv05_buildDTable (FSEv05_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
-
-/*!
-FSEv05_decompress_usingDTable():
-   Decompress compressed source @cSrc of size @cSrcSize using `dt`
-   into `dst` which must be already allocated.
-   @return : size of regenerated data (necessarily <= @dstCapacity)
-             or an errorCode, which can be tested using FSEv05_isError() */
-size_t FSEv05_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSEv05_DTable* dt);
-
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif  /* FSEv05_H */
-/* ******************************************************************
-   bitstream
-   Part of FSEv05 library
-   header file (to include)
-   Copyright (C) 2013-2016, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-****************************************************************** */
-#ifndef BITv05STREAM_H_MODULE
-#define BITv05STREAM_H_MODULE
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-/*
-*  This API consists of small unitary functions, which highly benefit from being inlined.
-*  Since link-time-optimization is not available for all compilers,
-*  these functions are defined into a .h to be included.
-*/
-
-
-
-/*-********************************************
-*  bitStream decoding API (read backward)
-**********************************************/
-typedef struct
-{
-    size_t   bitContainer;
-    unsigned bitsConsumed;
-    const char* ptr;
-    const char* start;
-} BITv05_DStream_t;
-
-typedef enum { BITv05_DStream_unfinished = 0,
-               BITv05_DStream_endOfBuffer = 1,
-               BITv05_DStream_completed = 2,
-               BITv05_DStream_overflow = 3 } BITv05_DStream_status;  /* result of BITv05_reloadDStream() */
-               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
-
-MEM_STATIC size_t   BITv05_initDStream(BITv05_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
-MEM_STATIC size_t   BITv05_readBits(BITv05_DStream_t* bitD, unsigned nbBits);
-MEM_STATIC BITv05_DStream_status BITv05_reloadDStream(BITv05_DStream_t* bitD);
-MEM_STATIC unsigned BITv05_endOfDStream(const BITv05_DStream_t* bitD);
-
-
-/*-****************************************
-*  unsafe API
-******************************************/
-MEM_STATIC size_t BITv05_readBitsFast(BITv05_DStream_t* bitD, unsigned nbBits);
-/* faster, but works only if nbBits >= 1 */
-
-
-
-/*-**************************************************************
-*  Helper functions
-****************************************************************/
-MEM_STATIC unsigned BITv05_highbit32 (U32 val)
-{
-#   if defined(_MSC_VER)   /* Visual */
-    unsigned long r=0;
-    _BitScanReverse ( &r, val );
-    return (unsigned) r;
-#   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* Use GCC Intrinsic */
-    return 31 - __builtin_clz (val);
-#   else   /* Software version */
-    static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
-    U32 v = val;
-    unsigned r;
-    v |= v >> 1;
-    v |= v >> 2;
-    v |= v >> 4;
-    v |= v >> 8;
-    v |= v >> 16;
-    r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
-    return r;
-#   endif
-}
-
-
-
-/*-********************************************************
-* bitStream decoding
-**********************************************************/
-/*!BITv05_initDStream
-*  Initialize a BITv05_DStream_t.
-*  @bitD : a pointer to an already allocated BITv05_DStream_t structure
-*  @srcBuffer must point at the beginning of a bitStream
-*  @srcSize must be the exact size of the bitStream
-*  @result : size of stream (== srcSize) or an errorCode if a problem is detected
-*/
-MEM_STATIC size_t BITv05_initDStream(BITv05_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
-{
-    if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
-
-    if (srcSize >=  sizeof(size_t)) {  /* normal case */
-        U32 contain32;
-        bitD->start = (const char*)srcBuffer;
-        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(size_t);
-        bitD->bitContainer = MEM_readLEST(bitD->ptr);
-        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
-        if (contain32 == 0) return ERROR(GENERIC);   /* endMark not present */
-        bitD->bitsConsumed = 8 - BITv05_highbit32(contain32);
-    } else {
-        U32 contain32;
-        bitD->start = (const char*)srcBuffer;
-        bitD->ptr   = bitD->start;
-        bitD->bitContainer = *(const BYTE*)(bitD->start);
-        switch(srcSize)
-        {
-            case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);/* fall-through */
-            case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);/* fall-through */
-            case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);/* fall-through */
-            case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; /* fall-through */
-            case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; /* fall-through */
-            case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) <<  8; /* fall-through */
-            default: break;
-        }
-        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
-        if (contain32 == 0) return ERROR(GENERIC);   /* endMark not present */
-        bitD->bitsConsumed = 8 - BITv05_highbit32(contain32);
-        bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8;
-    }
-
-    return srcSize;
-}
-
-MEM_STATIC size_t BITv05_lookBits(BITv05_DStream_t* bitD, U32 nbBits)
-{
-    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
-    return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
-}
-
-/*! BITv05_lookBitsFast :
-*   unsafe version; only works only if nbBits >= 1 */
-MEM_STATIC size_t BITv05_lookBitsFast(BITv05_DStream_t* bitD, U32 nbBits)
-{
-    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
-    return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
-}
-
-MEM_STATIC void BITv05_skipBits(BITv05_DStream_t* bitD, U32 nbBits)
-{
-    bitD->bitsConsumed += nbBits;
-}
-
-MEM_STATIC size_t BITv05_readBits(BITv05_DStream_t* bitD, unsigned nbBits)
-{
-    size_t value = BITv05_lookBits(bitD, nbBits);
-    BITv05_skipBits(bitD, nbBits);
-    return value;
-}
-
-/*!BITv05_readBitsFast :
-*  unsafe version; only works only if nbBits >= 1 */
-MEM_STATIC size_t BITv05_readBitsFast(BITv05_DStream_t* bitD, unsigned nbBits)
-{
-    size_t value = BITv05_lookBitsFast(bitD, nbBits);
-    BITv05_skipBits(bitD, nbBits);
-    return value;
-}
-
-MEM_STATIC BITv05_DStream_status BITv05_reloadDStream(BITv05_DStream_t* bitD)
-{
-    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should never happen */
-        return BITv05_DStream_overflow;
-
-    if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {
-        bitD->ptr -= bitD->bitsConsumed >> 3;
-        bitD->bitsConsumed &= 7;
-        bitD->bitContainer = MEM_readLEST(bitD->ptr);
-        return BITv05_DStream_unfinished;
-    }
-    if (bitD->ptr == bitD->start) {
-        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BITv05_DStream_endOfBuffer;
-        return BITv05_DStream_completed;
-    }
-    {
-        U32 nbBytes = bitD->bitsConsumed >> 3;
-        BITv05_DStream_status result = BITv05_DStream_unfinished;
-        if (bitD->ptr - nbBytes < bitD->start) {
-            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */
-            result = BITv05_DStream_endOfBuffer;
-        }
-        bitD->ptr -= nbBytes;
-        bitD->bitsConsumed -= nbBytes*8;
-        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD) */
-        return result;
-    }
-}
-
-/*! BITv05_endOfDStream
-*   @return Tells if DStream has reached its exact end
-*/
-MEM_STATIC unsigned BITv05_endOfDStream(const BITv05_DStream_t* DStream)
-{
-    return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
-}
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* BITv05STREAM_H_MODULE */
-/* ******************************************************************
-   FSEv05 : Finite State Entropy coder
-   header file for static linking (only)
-   Copyright (C) 2013-2015, Yann Collet
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-   - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-#ifndef FSEv05_STATIC_H
-#define FSEv05_STATIC_H
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-
-/* *****************************************
-*  Static allocation
-*******************************************/
-/* It is possible to statically allocate FSEv05 CTable/DTable as a table of unsigned using below macros */
-#define FSEv05_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<maxTableLog))
-
-
-/* *****************************************
-*  FSEv05 advanced API
-*******************************************/
-size_t FSEv05_buildDTable_raw (FSEv05_DTable* dt, unsigned nbBits);
-/* build a fake FSEv05_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */
-
-size_t FSEv05_buildDTable_rle (FSEv05_DTable* dt, unsigned char symbolValue);
-/* build a fake FSEv05_DTable, designed to always generate the same symbolValue */
-
-
-
-/* *****************************************
-*  FSEv05 symbol decompression API
-*******************************************/
-typedef struct
-{
-    size_t      state;
-    const void* table;   /* precise table may vary, depending on U16 */
-} FSEv05_DState_t;
-
-
-static void     FSEv05_initDState(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD, const FSEv05_DTable* dt);
-
-static unsigned char FSEv05_decodeSymbol(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD);
-
-static unsigned FSEv05_endOfDState(const FSEv05_DState_t* DStatePtr);
-
-
-
-/* *****************************************
-*  FSEv05 unsafe API
-*******************************************/
-static unsigned char FSEv05_decodeSymbolFast(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD);
-/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
-
-
-/* *****************************************
-*  Implementation of inlined functions
-*******************************************/
-/* decompression */
-
-typedef struct {
-    U16 tableLog;
-    U16 fastMode;
-} FSEv05_DTableHeader;   /* sizeof U32 */
-
-typedef struct
-{
-    unsigned short newState;
-    unsigned char  symbol;
-    unsigned char  nbBits;
-} FSEv05_decode_t;   /* size == U32 */
-
-MEM_STATIC void FSEv05_initDState(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD, const FSEv05_DTable* dt)
-{
-    const void* ptr = dt;
-    const FSEv05_DTableHeader* const DTableH = (const FSEv05_DTableHeader*)ptr;
-    DStatePtr->state = BITv05_readBits(bitD, DTableH->tableLog);
-    BITv05_reloadDStream(bitD);
-    DStatePtr->table = dt + 1;
-}
-
-MEM_STATIC BYTE FSEv05_peakSymbol(FSEv05_DState_t* DStatePtr)
-{
-    const FSEv05_decode_t DInfo = ((const FSEv05_decode_t*)(DStatePtr->table))[DStatePtr->state];
-    return DInfo.symbol;
-}
-
-MEM_STATIC BYTE FSEv05_decodeSymbol(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD)
-{
-    const FSEv05_decode_t DInfo = ((const FSEv05_decode_t*)(DStatePtr->table))[DStatePtr->state];
-    const U32  nbBits = DInfo.nbBits;
-    BYTE symbol = DInfo.symbol;
-    size_t lowBits = BITv05_readBits(bitD, nbBits);
-
-    DStatePtr->state = DInfo.newState + lowBits;
-    return symbol;
-}
-
-MEM_STATIC BYTE FSEv05_decodeSymbolFast(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD)
-{
-    const FSEv05_decode_t DInfo = ((const FSEv05_decode_t*)(DStatePtr->table))[DStatePtr->state];
-    const U32 nbBits = DInfo.nbBits;
-    BYTE symbol = DInfo.symbol;
-    size_t lowBits = BITv05_readBitsFast(bitD, nbBits);
-
-    DStatePtr->state = DInfo.newState + lowBits;
-    return symbol;
-}
-
-MEM_STATIC unsigned FSEv05_endOfDState(const FSEv05_DState_t* DStatePtr)
-{
-    return DStatePtr->state == 0;
-}
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif  /* FSEv05_STATIC_H */
-/* ******************************************************************
-   FSEv05 : Finite State Entropy coder
-   Copyright (C) 2013-2015, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - FSEv05 source repository : https://github.com/Cyan4973/FiniteStateEntropy
-    - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-
-#ifndef FSEv05_COMMONDEFS_ONLY
-
-/* **************************************************************
-*  Tuning parameters
-****************************************************************/
-/*!MEMORY_USAGE :
-*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
-*  Increasing memory usage improves compression ratio
-*  Reduced memory usage can improve speed, due to cache effect
-*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
-#define FSEv05_MAX_MEMORY_USAGE 14
-#define FSEv05_DEFAULT_MEMORY_USAGE 13
-
-/*!FSEv05_MAX_SYMBOL_VALUE :
-*  Maximum symbol value authorized.
-*  Required for proper stack allocation */
-#define FSEv05_MAX_SYMBOL_VALUE 255
-
-
-/* **************************************************************
-*  template functions type & suffix
-****************************************************************/
-#define FSEv05_FUNCTION_TYPE BYTE
-#define FSEv05_FUNCTION_EXTENSION
-#define FSEv05_DECODE_TYPE FSEv05_decode_t
-
-
-#endif   /* !FSEv05_COMMONDEFS_ONLY */
-
-/* **************************************************************
-*  Compiler specifics
-****************************************************************/
-#ifdef _MSC_VER    /* Visual Studio */
-#  define FORCE_INLINE static __forceinline
-#  include <intrin.h>                    /* For Visual 2005 */
-#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
-#  pragma warning(disable : 4214)        /* disable: C4214: non-int bitfields */
-#else
-#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
-#    ifdef __GNUC__
-#      define FORCE_INLINE static inline __attribute__((always_inline))
-#    else
-#      define FORCE_INLINE static inline
-#    endif
-#  else
-#    define FORCE_INLINE static
-#  endif /* __STDC_VERSION__ */
-#endif
-
-
-/* **************************************************************
-*  Includes
-****************************************************************/
-#include <stdlib.h>     /* malloc, free, qsort */
-#include <string.h>     /* memcpy, memset */
-#include <stdio.h>      /* printf (debug) */
-
-
-
-/* ***************************************************************
-*  Constants
-*****************************************************************/
-#define FSEv05_MAX_TABLELOG  (FSEv05_MAX_MEMORY_USAGE-2)
-#define FSEv05_MAX_TABLESIZE (1U<<FSEv05_MAX_TABLELOG)
-#define FSEv05_MAXTABLESIZE_MASK (FSEv05_MAX_TABLESIZE-1)
-#define FSEv05_DEFAULT_TABLELOG (FSEv05_DEFAULT_MEMORY_USAGE-2)
-#define FSEv05_MIN_TABLELOG 5
-
-#define FSEv05_TABLELOG_ABSOLUTE_MAX 15
-#if FSEv05_MAX_TABLELOG > FSEv05_TABLELOG_ABSOLUTE_MAX
-#error "FSEv05_MAX_TABLELOG > FSEv05_TABLELOG_ABSOLUTE_MAX is not supported"
-#endif
-
-
-/* **************************************************************
-*  Error Management
-****************************************************************/
-#define FSEv05_STATIC_ASSERT(c) { enum { FSEv05_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
-
-
-/* **************************************************************
-*  Complex types
-****************************************************************/
-typedef unsigned DTable_max_t[FSEv05_DTABLE_SIZE_U32(FSEv05_MAX_TABLELOG)];
-
-
-/* **************************************************************
-*  Templates
-****************************************************************/
-/*
-  designed to be included
-  for type-specific functions (template emulation in C)
-  Objective is to write these functions only once, for improved maintenance
-*/
-
-/* safety checks */
-#ifndef FSEv05_FUNCTION_EXTENSION
-#  error "FSEv05_FUNCTION_EXTENSION must be defined"
-#endif
-#ifndef FSEv05_FUNCTION_TYPE
-#  error "FSEv05_FUNCTION_TYPE must be defined"
-#endif
-
-/* Function names */
-#define FSEv05_CAT(X,Y) X##Y
-#define FSEv05_FUNCTION_NAME(X,Y) FSEv05_CAT(X,Y)
-#define FSEv05_TYPE_NAME(X,Y) FSEv05_CAT(X,Y)
-
-
-/* Function templates */
-static U32 FSEv05_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; }
-
-
-
-FSEv05_DTable* FSEv05_createDTable (unsigned tableLog)
-{
-    if (tableLog > FSEv05_TABLELOG_ABSOLUTE_MAX) tableLog = FSEv05_TABLELOG_ABSOLUTE_MAX;
-    return (FSEv05_DTable*)malloc( FSEv05_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
-}
-
-void FSEv05_freeDTable (FSEv05_DTable* dt)
-{
-    free(dt);
-}
-
-size_t FSEv05_buildDTable(FSEv05_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
-{
-    FSEv05_DTableHeader DTableH;
-    void* const tdPtr = dt+1;   /* because dt is unsigned, 32-bits aligned on 32-bits */
-    FSEv05_DECODE_TYPE* const tableDecode = (FSEv05_DECODE_TYPE*) (tdPtr);
-    const U32 tableSize = 1 << tableLog;
-    const U32 tableMask = tableSize-1;
-    const U32 step = FSEv05_tableStep(tableSize);
-    U16 symbolNext[FSEv05_MAX_SYMBOL_VALUE+1];
-    U32 position = 0;
-    U32 highThreshold = tableSize-1;
-    const S16 largeLimit= (S16)(1 << (tableLog-1));
-    U32 noLarge = 1;
-    U32 s;
-
-    /* Sanity Checks */
-    if (maxSymbolValue > FSEv05_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
-    if (tableLog > FSEv05_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
-
-    /* Init, lay down lowprob symbols */
-    memset(tableDecode, 0, sizeof(FSEv05_FUNCTION_TYPE) * (maxSymbolValue+1) );   /* useless init, but keep static analyzer happy, and we don't need to performance optimize legacy decoders */
-    DTableH.tableLog = (U16)tableLog;
-    for (s=0; s<=maxSymbolValue; s++) {
-        if (normalizedCounter[s]==-1) {
-            tableDecode[highThreshold--].symbol = (FSEv05_FUNCTION_TYPE)s;
-            symbolNext[s] = 1;
-        } else {
-            if (normalizedCounter[s] >= largeLimit) noLarge=0;
-            symbolNext[s] = normalizedCounter[s];
-    }   }
-
-    /* Spread symbols */
-    for (s=0; s<=maxSymbolValue; s++) {
-        int i;
-        for (i=0; i<normalizedCounter[s]; i++) {
-            tableDecode[position].symbol = (FSEv05_FUNCTION_TYPE)s;
-            position = (position + step) & tableMask;
-            while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
-    }   }
-
-    if (position!=0) return ERROR(GENERIC);   /* position must reach all cells once, otherwise normalizedCounter is incorrect */
-
-    /* Build Decoding table */
-    {
-        U32 i;
-        for (i=0; i<tableSize; i++) {
-            FSEv05_FUNCTION_TYPE symbol = (FSEv05_FUNCTION_TYPE)(tableDecode[i].symbol);
-            U16 nextState = symbolNext[symbol]++;
-            tableDecode[i].nbBits = (BYTE) (tableLog - BITv05_highbit32 ((U32)nextState) );
-            tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize);
-    }   }
-
-    DTableH.fastMode = (U16)noLarge;
-    memcpy(dt, &DTableH, sizeof(DTableH));
-    return 0;
-}
-
-
-#ifndef FSEv05_COMMONDEFS_ONLY
-/*-****************************************
-*  FSEv05 helper functions
-******************************************/
-unsigned FSEv05_isError(size_t code) { return ERR_isError(code); }
-
-const char* FSEv05_getErrorName(size_t code) { return ERR_getErrorName(code); }
-
-
-/*-**************************************************************
-*  FSEv05 NCount encoding-decoding
-****************************************************************/
-static short FSEv05_abs(short a) { return a<0 ? -a : a; }
-
-
-size_t FSEv05_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
-                 const void* headerBuffer, size_t hbSize)
-{
-    const BYTE* const istart = (const BYTE*) headerBuffer;
-    const BYTE* const iend = istart + hbSize;
-    const BYTE* ip = istart;
-    int nbBits;
-    int remaining;
-    int threshold;
-    U32 bitStream;
-    int bitCount;
-    unsigned charnum = 0;
-    int previous0 = 0;
-
-    if (hbSize < 4) return ERROR(srcSize_wrong);
-    bitStream = MEM_readLE32(ip);
-    nbBits = (bitStream & 0xF) + FSEv05_MIN_TABLELOG;   /* extract tableLog */
-    if (nbBits > FSEv05_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
-    bitStream >>= 4;
-    bitCount = 4;
-    *tableLogPtr = nbBits;
-    remaining = (1<<nbBits)+1;
-    threshold = 1<<nbBits;
-    nbBits++;
-
-    while ((remaining>1) && (charnum<=*maxSVPtr)) {
-        if (previous0) {
-            unsigned n0 = charnum;
-            while ((bitStream & 0xFFFF) == 0xFFFF) {
-                n0+=24;
-                if (ip < iend-5) {
-                    ip+=2;
-                    bitStream = MEM_readLE32(ip) >> bitCount;
-                } else {
-                    bitStream >>= 16;
-                    bitCount+=16;
-            }   }
-            while ((bitStream & 3) == 3) {
-                n0+=3;
-                bitStream>>=2;
-                bitCount+=2;
-            }
-            n0 += bitStream & 3;
-            bitCount += 2;
-            if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
-            while (charnum < n0) normalizedCounter[charnum++] = 0;
-            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
-                ip += bitCount>>3;
-                bitCount &= 7;
-                bitStream = MEM_readLE32(ip) >> bitCount;
-            }
-            else
-                bitStream >>= 2;
-        }
-        {
-            const short max = (short)((2*threshold-1)-remaining);
-            short count;
-
-            if ((bitStream & (threshold-1)) < (U32)max) {
-                count = (short)(bitStream & (threshold-1));
-                bitCount   += nbBits-1;
-            } else {
-                count = (short)(bitStream & (2*threshold-1));
-                if (count >= threshold) count -= max;
-                bitCount   += nbBits;
-            }
-
-            count--;   /* extra accuracy */
-            remaining -= FSEv05_abs(count);
-            normalizedCounter[charnum++] = count;
-            previous0 = !count;
-            while (remaining < threshold) {
-                nbBits--;
-                threshold >>= 1;
-            }
-
-            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
-                ip += bitCount>>3;
-                bitCount &= 7;
-            } else {
-                bitCount -= (int)(8 * (iend - 4 - ip));
-                ip = iend - 4;
-            }
-            bitStream = MEM_readLE32(ip) >> (bitCount & 31);
-    }   }
-    if (remaining != 1) return ERROR(GENERIC);
-    *maxSVPtr = charnum-1;
-
-    ip += (bitCount+7)>>3;
-    if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);
-    return ip-istart;
-}
-
-
-
-/*-*******************************************************
-*  Decompression (Byte symbols)
-*********************************************************/
-size_t FSEv05_buildDTable_rle (FSEv05_DTable* dt, BYTE symbolValue)
-{
-    void* ptr = dt;
-    FSEv05_DTableHeader* const DTableH = (FSEv05_DTableHeader*)ptr;
-    void* dPtr = dt + 1;
-    FSEv05_decode_t* const cell = (FSEv05_decode_t*)dPtr;
-
-    DTableH->tableLog = 0;
-    DTableH->fastMode = 0;
-
-    cell->newState = 0;
-    cell->symbol = symbolValue;
-    cell->nbBits = 0;
-
-    return 0;
-}
-
-
-size_t FSEv05_buildDTable_raw (FSEv05_DTable* dt, unsigned nbBits)
-{
-    void* ptr = dt;
-    FSEv05_DTableHeader* const DTableH = (FSEv05_DTableHeader*)ptr;
-    void* dPtr = dt + 1;
-    FSEv05_decode_t* const dinfo = (FSEv05_decode_t*)dPtr;
-    const unsigned tableSize = 1 << nbBits;
-    const unsigned tableMask = tableSize - 1;
-    const unsigned maxSymbolValue = tableMask;
-    unsigned s;
-
-    /* Sanity checks */
-    if (nbBits < 1) return ERROR(GENERIC);         /* min size */
-
-    /* Build Decoding Table */
-    DTableH->tableLog = (U16)nbBits;
-    DTableH->fastMode = 1;
-    for (s=0; s<=maxSymbolValue; s++) {
-        dinfo[s].newState = 0;
-        dinfo[s].symbol = (BYTE)s;
-        dinfo[s].nbBits = (BYTE)nbBits;
-    }
-
-    return 0;
-}
-
-FORCE_INLINE size_t FSEv05_decompress_usingDTable_generic(
-          void* dst, size_t maxDstSize,
-    const void* cSrc, size_t cSrcSize,
-    const FSEv05_DTable* dt, const unsigned fast)
-{
-    BYTE* const ostart = (BYTE*) dst;
-    BYTE* op = ostart;
-    BYTE* const omax = op + maxDstSize;
-    BYTE* const olimit = omax-3;
-
-    BITv05_DStream_t bitD;
-    FSEv05_DState_t state1;
-    FSEv05_DState_t state2;
-    size_t errorCode;
-
-    /* Init */
-    errorCode = BITv05_initDStream(&bitD, cSrc, cSrcSize);   /* replaced last arg by maxCompressed Size */
-    if (FSEv05_isError(errorCode)) return errorCode;
-
-    FSEv05_initDState(&state1, &bitD, dt);
-    FSEv05_initDState(&state2, &bitD, dt);
-
-#define FSEv05_GETSYMBOL(statePtr) fast ? FSEv05_decodeSymbolFast(statePtr, &bitD) : FSEv05_decodeSymbol(statePtr, &bitD)
-
-    /* 4 symbols per loop */
-    for ( ; (BITv05_reloadDStream(&bitD)==BITv05_DStream_unfinished) && (op<olimit) ; op+=4) {
-        op[0] = FSEv05_GETSYMBOL(&state1);
-
-        if (FSEv05_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
-            BITv05_reloadDStream(&bitD);
-
-        op[1] = FSEv05_GETSYMBOL(&state2);
-
-        if (FSEv05_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
-            { if (BITv05_reloadDStream(&bitD) > BITv05_DStream_unfinished) { op+=2; break; } }
-
-        op[2] = FSEv05_GETSYMBOL(&state1);
-
-        if (FSEv05_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
-            BITv05_reloadDStream(&bitD);
-
-        op[3] = FSEv05_GETSYMBOL(&state2);
-    }
-
-    /* tail */
-    /* note : BITv05_reloadDStream(&bitD) >= FSEv05_DStream_partiallyFilled; Ends at exactly BITv05_DStream_completed */
-    while (1) {
-        if ( (BITv05_reloadDStream(&bitD)>BITv05_DStream_completed) || (op==omax) || (BITv05_endOfDStream(&bitD) && (fast || FSEv05_endOfDState(&state1))) )
-            break;
-
-        *op++ = FSEv05_GETSYMBOL(&state1);
-
-        if ( (BITv05_reloadDStream(&bitD)>BITv05_DStream_completed) || (op==omax) || (BITv05_endOfDStream(&bitD) && (fast || FSEv05_endOfDState(&state2))) )
-            break;
-
-        *op++ = FSEv05_GETSYMBOL(&state2);
-    }
-
-    /* end ? */
-    if (BITv05_endOfDStream(&bitD) && FSEv05_endOfDState(&state1) && FSEv05_endOfDState(&state2))
-        return op-ostart;
-
-    if (op==omax) return ERROR(dstSize_tooSmall);   /* dst buffer is full, but cSrc unfinished */
-
-    return ERROR(corruption_detected);
-}
-
-
-size_t FSEv05_decompress_usingDTable(void* dst, size_t originalSize,
-                            const void* cSrc, size_t cSrcSize,
-                            const FSEv05_DTable* dt)
-{
-    const void* ptr = dt;
-    const FSEv05_DTableHeader* DTableH = (const FSEv05_DTableHeader*)ptr;
-    const U32 fastMode = DTableH->fastMode;
-
-    /* select fast mode (static) */
-    if (fastMode) return FSEv05_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
-    return FSEv05_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
-}
-
-
-size_t FSEv05_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
-{
-    const BYTE* const istart = (const BYTE*)cSrc;
-    const BYTE* ip = istart;
-    short counting[FSEv05_MAX_SYMBOL_VALUE+1];
-    DTable_max_t dt;   /* Static analyzer seems unable to understand this table will be properly initialized later */
-    unsigned tableLog;
-    unsigned maxSymbolValue = FSEv05_MAX_SYMBOL_VALUE;
-    size_t errorCode;
-
-    if (cSrcSize<2) return ERROR(srcSize_wrong);   /* too small input size */
-
-    /* normal FSEv05 decoding mode */
-    errorCode = FSEv05_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
-    if (FSEv05_isError(errorCode)) return errorCode;
-    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);   /* too small input size */
-    ip += errorCode;
-    cSrcSize -= errorCode;
-
-    errorCode = FSEv05_buildDTable (dt, counting, maxSymbolValue, tableLog);
-    if (FSEv05_isError(errorCode)) return errorCode;
-
-    /* always return, even if it is an error code */
-    return FSEv05_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);
-}
-
-
-
-#endif   /* FSEv05_COMMONDEFS_ONLY */
-/* ******************************************************************
-   Huff0 : Huffman coder, part of New Generation Entropy library
-   header file
-   Copyright (C) 2013-2016, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-****************************************************************** */
-#ifndef HUFF0_H
-#define HUFF0_H
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-
-/* ****************************************
-*  Huff0 simple functions
-******************************************/
-size_t HUFv05_decompress(void* dst,  size_t dstSize,
-                const void* cSrc, size_t cSrcSize);
-/*!
-HUFv05_decompress():
-    Decompress Huff0 data from buffer 'cSrc', of size 'cSrcSize',
-    into already allocated destination buffer 'dst', of size 'dstSize'.
-    @dstSize : must be the **exact** size of original (uncompressed) data.
-    Note : in contrast with FSEv05, HUFv05_decompress can regenerate
-           RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
-           because it knows size to regenerate.
-    @return : size of regenerated data (== dstSize)
-              or an error code, which can be tested using HUFv05_isError()
-*/
-
-
-/* ****************************************
-*  Tool functions
-******************************************/
-/* Error Management */
-unsigned    HUFv05_isError(size_t code);        /* tells if a return value is an error code */
-const char* HUFv05_getErrorName(size_t code);   /* provides error code string (useful for debugging) */
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif   /* HUF0_H */
-/* ******************************************************************
-   Huff0 : Huffman codec, part of New Generation Entropy library
-   header file, for static linking only
-   Copyright (C) 2013-2016, Yann Collet
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-****************************************************************** */
-#ifndef HUF0_STATIC_H
-#define HUF0_STATIC_H
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-
-/* ****************************************
-*  Static allocation
-******************************************/
-/* static allocation of Huff0's DTable */
-#define HUFv05_DTABLE_SIZE(maxTableLog)   (1 + (1<<maxTableLog))
-#define HUFv05_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
-        unsigned short DTable[HUFv05_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
-#define HUFv05_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
-        unsigned int DTable[HUFv05_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
-#define HUFv05_CREATE_STATIC_DTABLEX6(DTable, maxTableLog) \
-        unsigned int DTable[HUFv05_DTABLE_SIZE(maxTableLog) * 3 / 2] = { maxTableLog }
-
-
-/* ****************************************
-*  Advanced decompression functions
-******************************************/
-size_t HUFv05_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */
-size_t HUFv05_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbols decoder */
-
-
-/* ****************************************
-*  Huff0 detailed API
-******************************************/
-/*!
-HUFv05_decompress() does the following:
-1. select the decompression algorithm (X2, X4, X6) based on pre-computed heuristics
-2. build Huffman table from save, using HUFv05_readDTableXn()
-3. decode 1 or 4 segments in parallel using HUFv05_decompressSXn_usingDTable
-*/
-size_t HUFv05_readDTableX2 (unsigned short* DTable, const void* src, size_t srcSize);
-size_t HUFv05_readDTableX4 (unsigned* DTable, const void* src, size_t srcSize);
-
-size_t HUFv05_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned short* DTable);
-size_t HUFv05_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned* DTable);
-
-
-/* single stream variants */
-
-size_t HUFv05_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */
-size_t HUFv05_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbol decoder */
-
-size_t HUFv05_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned short* DTable);
-size_t HUFv05_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned* DTable);
-
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* HUF0_STATIC_H */
-/* ******************************************************************
-   Huff0 : Huffman coder, part of New Generation Entropy library
-   Copyright (C) 2013-2015, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - FSEv05+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy
-    - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-
-/* **************************************************************
-*  Compiler specifics
-****************************************************************/
-#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-/* inline is defined */
-#elif defined(_MSC_VER)
-#  define inline __inline
-#else
-#  define inline /* disable inline */
-#endif
-
-
-#ifdef _MSC_VER    /* Visual Studio */
-#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
-#endif
-
-
-/* **************************************************************
-*  Includes
-****************************************************************/
-#include <stdlib.h>     /* malloc, free, qsort */
-#include <string.h>     /* memcpy, memset */
-#include <stdio.h>      /* printf (debug) */
-
-
-/* **************************************************************
-*  Constants
-****************************************************************/
-#define HUFv05_ABSOLUTEMAX_TABLELOG  16   /* absolute limit of HUFv05_MAX_TABLELOG. Beyond that value, code does not work */
-#define HUFv05_MAX_TABLELOG  12           /* max configured tableLog (for static allocation); can be modified up to HUFv05_ABSOLUTEMAX_TABLELOG */
-#define HUFv05_DEFAULT_TABLELOG  HUFv05_MAX_TABLELOG   /* tableLog by default, when not specified */
-#define HUFv05_MAX_SYMBOL_VALUE 255
-#if (HUFv05_MAX_TABLELOG > HUFv05_ABSOLUTEMAX_TABLELOG)
-#  error "HUFv05_MAX_TABLELOG is too large !"
-#endif
-
-
-/* **************************************************************
-*  Error Management
-****************************************************************/
-unsigned HUFv05_isError(size_t code) { return ERR_isError(code); }
-const char* HUFv05_getErrorName(size_t code) { return ERR_getErrorName(code); }
-#define HUFv05_STATIC_ASSERT(c) { enum { HUFv05_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
-
-
-/* *******************************************************
-*  Huff0 : Huffman block decompression
-*********************************************************/
-typedef struct { BYTE byte; BYTE nbBits; } HUFv05_DEltX2;   /* single-symbol decoding */
-
-typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUFv05_DEltX4;  /* double-symbols decoding */
-
-typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
-
-/*! HUFv05_readStats
-    Read compact Huffman tree, saved by HUFv05_writeCTable
-    @huffWeight : destination buffer
-    @return : size read from `src`
-*/
-static size_t HUFv05_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
-                            U32* nbSymbolsPtr, U32* tableLogPtr,
-                            const void* src, size_t srcSize)
-{
-    U32 weightTotal;
-    U32 tableLog;
-    const BYTE* ip = (const BYTE*) src;
-    size_t iSize;
-    size_t oSize;
-    U32 n;
-
-    if (!srcSize) return ERROR(srcSize_wrong);
-    iSize = ip[0];
-    //memset(huffWeight, 0, hwSize);   /* is not necessary, even though some analyzer complain ... */
-
-    if (iSize >= 128)  { /* special header */
-        if (iSize >= (242)) {  /* RLE */
-            static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
-            oSize = l[iSize-242];
-            memset(huffWeight, 1, hwSize);
-            iSize = 0;
-        }
-        else {   /* Incompressible */
-            oSize = iSize - 127;
-            iSize = ((oSize+1)/2);
-            if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
-            if (oSize >= hwSize) return ERROR(corruption_detected);
-            ip += 1;
-            for (n=0; n<oSize; n+=2) {
-                huffWeight[n]   = ip[n/2] >> 4;
-                huffWeight[n+1] = ip[n/2] & 15;
-    }   }   }
-    else  {   /* header compressed with FSEv05 (normal case) */
-        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
-        oSize = FSEv05_decompress(huffWeight, hwSize-1, ip+1, iSize);   /* max (hwSize-1) values decoded, as last one is implied */
-        if (FSEv05_isError(oSize)) return oSize;
-    }
-
-    /* collect weight stats */
-    memset(rankStats, 0, (HUFv05_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));
-    weightTotal = 0;
-    for (n=0; n<oSize; n++) {
-        if (huffWeight[n] >= HUFv05_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
-        rankStats[huffWeight[n]]++;
-        weightTotal += (1 << huffWeight[n]) >> 1;
-    }
-    if (weightTotal == 0) return ERROR(corruption_detected);
-
-    /* get last non-null symbol weight (implied, total must be 2^n) */
-    tableLog = BITv05_highbit32(weightTotal) + 1;
-    if (tableLog > HUFv05_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
-    {   /* determine last weight */
-        U32 total = 1 << tableLog;
-        U32 rest = total - weightTotal;
-        U32 verif = 1 << BITv05_highbit32(rest);
-        U32 lastWeight = BITv05_highbit32(rest) + 1;
-        if (verif != rest) return ERROR(corruption_detected);    /* last value must be a clean power of 2 */
-        huffWeight[oSize] = (BYTE)lastWeight;
-        rankStats[lastWeight]++;
-    }
-
-    /* check tree construction validity */
-    if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected);   /* by construction : at least 2 elts of rank 1, must be even */
-
-    /* results */
-    *nbSymbolsPtr = (U32)(oSize+1);
-    *tableLogPtr = tableLog;
-    return iSize+1;
-}
-
-
-/*-***************************/
-/*  single-symbol decoding   */
-/*-***************************/
-
-size_t HUFv05_readDTableX2 (U16* DTable, const void* src, size_t srcSize)
-{
-    BYTE huffWeight[HUFv05_MAX_SYMBOL_VALUE + 1];
-    U32 rankVal[HUFv05_ABSOLUTEMAX_TABLELOG + 1];   /* large enough for values from 0 to 16 */
-    U32 tableLog = 0;
-    size_t iSize;
-    U32 nbSymbols = 0;
-    U32 n;
-    U32 nextRankStart;
-    void* const dtPtr = DTable + 1;
-    HUFv05_DEltX2* const dt = (HUFv05_DEltX2*)dtPtr;
-
-    HUFv05_STATIC_ASSERT(sizeof(HUFv05_DEltX2) == sizeof(U16));   /* if compilation fails here, assertion is false */
-    //memset(huffWeight, 0, sizeof(huffWeight));   /* is not necessary, even though some analyzer complain ... */
-
-    iSize = HUFv05_readStats(huffWeight, HUFv05_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
-    if (HUFv05_isError(iSize)) return iSize;
-
-    /* check result */
-    if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge);   /* DTable is too small */
-    DTable[0] = (U16)tableLog;   /* maybe should separate sizeof allocated DTable, from used size of DTable, in case of re-use */
-
-    /* Prepare ranks */
-    nextRankStart = 0;
-    for (n=1; n<=tableLog; n++) {
-        U32 current = nextRankStart;
-        nextRankStart += (rankVal[n] << (n-1));
-        rankVal[n] = current;
-    }
-
-    /* fill DTable */
-    for (n=0; n<nbSymbols; n++) {
-        const U32 w = huffWeight[n];
-        const U32 length = (1 << w) >> 1;
-        U32 i;
-        HUFv05_DEltX2 D;
-        D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
-        for (i = rankVal[w]; i < rankVal[w] + length; i++)
-            dt[i] = D;
-        rankVal[w] += length;
-    }
-
-    return iSize;
-}
-
-static BYTE HUFv05_decodeSymbolX2(BITv05_DStream_t* Dstream, const HUFv05_DEltX2* dt, const U32 dtLog)
-{
-        const size_t val = BITv05_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
-        const BYTE c = dt[val].byte;
-        BITv05_skipBits(Dstream, dt[val].nbBits);
-        return c;
-}
-
-#define HUFv05_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
-    *ptr++ = HUFv05_decodeSymbolX2(DStreamPtr, dt, dtLog)
-
-#define HUFv05_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
-    if (MEM_64bits() || (HUFv05_MAX_TABLELOG<=12)) \
-        HUFv05_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
-
-#define HUFv05_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
-    if (MEM_64bits()) \
-        HUFv05_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
-
-static inline size_t HUFv05_decodeStreamX2(BYTE* p, BITv05_DStream_t* const bitDPtr, BYTE* const pEnd, const HUFv05_DEltX2* const dt, const U32 dtLog)
-{
-    BYTE* const pStart = p;
-
-    /* up to 4 symbols at a time */
-    while ((BITv05_reloadDStream(bitDPtr) == BITv05_DStream_unfinished) && (p <= pEnd-4)) {
-        HUFv05_DECODE_SYMBOLX2_2(p, bitDPtr);
-        HUFv05_DECODE_SYMBOLX2_1(p, bitDPtr);
-        HUFv05_DECODE_SYMBOLX2_2(p, bitDPtr);
-        HUFv05_DECODE_SYMBOLX2_0(p, bitDPtr);
-    }
-
-    /* closer to the end */
-    while ((BITv05_reloadDStream(bitDPtr) == BITv05_DStream_unfinished) && (p < pEnd))
-        HUFv05_DECODE_SYMBOLX2_0(p, bitDPtr);
-
-    /* no more data to retrieve from bitstream, hence no need to reload */
-    while (p < pEnd)
-        HUFv05_DECODE_SYMBOLX2_0(p, bitDPtr);
-
-    return pEnd-pStart;
-}
-
-size_t HUFv05_decompress1X2_usingDTable(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const U16* DTable)
-{
-    BYTE* op = (BYTE*)dst;
-    BYTE* const oend = op + dstSize;
-    const U32 dtLog = DTable[0];
-    const void* dtPtr = DTable;
-    const HUFv05_DEltX2* const dt = ((const HUFv05_DEltX2*)dtPtr)+1;
-    BITv05_DStream_t bitD;
-
-    if (dstSize <= cSrcSize) return ERROR(dstSize_tooSmall);
-    { size_t const errorCode = BITv05_initDStream(&bitD, cSrc, cSrcSize);
-      if (HUFv05_isError(errorCode)) return errorCode; }
-
-    HUFv05_decodeStreamX2(op, &bitD, oend, dt, dtLog);
-
-    /* check */
-    if (!BITv05_endOfDStream(&bitD)) return ERROR(corruption_detected);
-
-    return dstSize;
-}
-
-size_t HUFv05_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    HUFv05_CREATE_STATIC_DTABLEX2(DTable, HUFv05_MAX_TABLELOG);
-    const BYTE* ip = (const BYTE*) cSrc;
-    size_t errorCode;
-
-    errorCode = HUFv05_readDTableX2 (DTable, cSrc, cSrcSize);
-    if (HUFv05_isError(errorCode)) return errorCode;
-    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
-    ip += errorCode;
-    cSrcSize -= errorCode;
-
-    return HUFv05_decompress1X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
-}
-
-
-size_t HUFv05_decompress4X2_usingDTable(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const U16* DTable)
-{
-    /* Check */
-    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
-    {
-        const BYTE* const istart = (const BYTE*) cSrc;
-        BYTE* const ostart = (BYTE*) dst;
-        BYTE* const oend = ostart + dstSize;
-        const void* const dtPtr = DTable;
-        const HUFv05_DEltX2* const dt = ((const HUFv05_DEltX2*)dtPtr) +1;
-        const U32 dtLog = DTable[0];
-        size_t errorCode;
-
-        /* Init */
-        BITv05_DStream_t bitD1;
-        BITv05_DStream_t bitD2;
-        BITv05_DStream_t bitD3;
-        BITv05_DStream_t bitD4;
-        const size_t length1 = MEM_readLE16(istart);
-        const size_t length2 = MEM_readLE16(istart+2);
-        const size_t length3 = MEM_readLE16(istart+4);
-        size_t length4;
-        const BYTE* const istart1 = istart + 6;  /* jumpTable */
-        const BYTE* const istart2 = istart1 + length1;
-        const BYTE* const istart3 = istart2 + length2;
-        const BYTE* const istart4 = istart3 + length3;
-        const size_t segmentSize = (dstSize+3) / 4;
-        BYTE* const opStart2 = ostart + segmentSize;
-        BYTE* const opStart3 = opStart2 + segmentSize;
-        BYTE* const opStart4 = opStart3 + segmentSize;
-        BYTE* op1 = ostart;
-        BYTE* op2 = opStart2;
-        BYTE* op3 = opStart3;
-        BYTE* op4 = opStart4;
-        U32 endSignal;
-
-        length4 = cSrcSize - (length1 + length2 + length3 + 6);
-        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
-        errorCode = BITv05_initDStream(&bitD1, istart1, length1);
-        if (HUFv05_isError(errorCode)) return errorCode;
-        errorCode = BITv05_initDStream(&bitD2, istart2, length2);
-        if (HUFv05_isError(errorCode)) return errorCode;
-        errorCode = BITv05_initDStream(&bitD3, istart3, length3);
-        if (HUFv05_isError(errorCode)) return errorCode;
-        errorCode = BITv05_initDStream(&bitD4, istart4, length4);
-        if (HUFv05_isError(errorCode)) return errorCode;
-
-        /* 16-32 symbols per loop (4-8 symbols per stream) */
-        endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4);
-        for ( ; (endSignal==BITv05_DStream_unfinished) && (op4<(oend-7)) ; ) {
-            HUFv05_DECODE_SYMBOLX2_2(op1, &bitD1);
-            HUFv05_DECODE_SYMBOLX2_2(op2, &bitD2);
-            HUFv05_DECODE_SYMBOLX2_2(op3, &bitD3);
-            HUFv05_DECODE_SYMBOLX2_2(op4, &bitD4);
-            HUFv05_DECODE_SYMBOLX2_1(op1, &bitD1);
-            HUFv05_DECODE_SYMBOLX2_1(op2, &bitD2);
-            HUFv05_DECODE_SYMBOLX2_1(op3, &bitD3);
-            HUFv05_DECODE_SYMBOLX2_1(op4, &bitD4);
-            HUFv05_DECODE_SYMBOLX2_2(op1, &bitD1);
-            HUFv05_DECODE_SYMBOLX2_2(op2, &bitD2);
-            HUFv05_DECODE_SYMBOLX2_2(op3, &bitD3);
-            HUFv05_DECODE_SYMBOLX2_2(op4, &bitD4);
-            HUFv05_DECODE_SYMBOLX2_0(op1, &bitD1);
-            HUFv05_DECODE_SYMBOLX2_0(op2, &bitD2);
-            HUFv05_DECODE_SYMBOLX2_0(op3, &bitD3);
-            HUFv05_DECODE_SYMBOLX2_0(op4, &bitD4);
-            endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4);
-        }
-
-        /* check corruption */
-        if (op1 > opStart2) return ERROR(corruption_detected);
-        if (op2 > opStart3) return ERROR(corruption_detected);
-        if (op3 > opStart4) return ERROR(corruption_detected);
-        /* note : op4 supposed already verified within main loop */
-
-        /* finish bitStreams one by one */
-        HUFv05_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
-        HUFv05_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
-        HUFv05_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
-        HUFv05_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);
-
-        /* check */
-        endSignal = BITv05_endOfDStream(&bitD1) & BITv05_endOfDStream(&bitD2) & BITv05_endOfDStream(&bitD3) & BITv05_endOfDStream(&bitD4);
-        if (!endSignal) return ERROR(corruption_detected);
-
-        /* decoded size */
-        return dstSize;
-    }
-}
-
-
-size_t HUFv05_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    HUFv05_CREATE_STATIC_DTABLEX2(DTable, HUFv05_MAX_TABLELOG);
-    const BYTE* ip = (const BYTE*) cSrc;
-    size_t errorCode;
-
-    errorCode = HUFv05_readDTableX2 (DTable, cSrc, cSrcSize);
-    if (HUFv05_isError(errorCode)) return errorCode;
-    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
-    ip += errorCode;
-    cSrcSize -= errorCode;
-
-    return HUFv05_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
-}
-
-
-/* *************************/
-/* double-symbols decoding */
-/* *************************/
-
-static void HUFv05_fillDTableX4Level2(HUFv05_DEltX4* DTable, U32 sizeLog, const U32 consumed,
-                           const U32* rankValOrigin, const int minWeight,
-                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
-                           U32 nbBitsBaseline, U16 baseSeq)
-{
-    HUFv05_DEltX4 DElt;
-    U32 rankVal[HUFv05_ABSOLUTEMAX_TABLELOG + 1];
-    U32 s;
-
-    /* get pre-calculated rankVal */
-    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
-
-    /* fill skipped values */
-    if (minWeight>1) {
-        U32 i, skipSize = rankVal[minWeight];
-        MEM_writeLE16(&(DElt.sequence), baseSeq);
-        DElt.nbBits   = (BYTE)(consumed);
-        DElt.length   = 1;
-        for (i = 0; i < skipSize; i++)
-            DTable[i] = DElt;
-    }
-
-    /* fill DTable */
-    for (s=0; s<sortedListSize; s++) {   /* note : sortedSymbols already skipped */
-        const U32 symbol = sortedSymbols[s].symbol;
-        const U32 weight = sortedSymbols[s].weight;
-        const U32 nbBits = nbBitsBaseline - weight;
-        const U32 length = 1 << (sizeLog-nbBits);
-        const U32 start = rankVal[weight];
-        U32 i = start;
-        const U32 end = start + length;
-
-        MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
-        DElt.nbBits = (BYTE)(nbBits + consumed);
-        DElt.length = 2;
-        do { DTable[i++] = DElt; } while (i<end);   /* since length >= 1 */
-
-        rankVal[weight] += length;
-    }
-}
-
-typedef U32 rankVal_t[HUFv05_ABSOLUTEMAX_TABLELOG][HUFv05_ABSOLUTEMAX_TABLELOG + 1];
-
-static void HUFv05_fillDTableX4(HUFv05_DEltX4* DTable, const U32 targetLog,
-                           const sortedSymbol_t* sortedList, const U32 sortedListSize,
-                           const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
-                           const U32 nbBitsBaseline)
-{
-    U32 rankVal[HUFv05_ABSOLUTEMAX_TABLELOG + 1];
-    const int scaleLog = nbBitsBaseline - targetLog;   /* note : targetLog >= srcLog, hence scaleLog <= 1 */
-    const U32 minBits  = nbBitsBaseline - maxWeight;
-    U32 s;
-
-    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
-
-    /* fill DTable */
-    for (s=0; s<sortedListSize; s++) {
-        const U16 symbol = sortedList[s].symbol;
-        const U32 weight = sortedList[s].weight;
-        const U32 nbBits = nbBitsBaseline - weight;
-        const U32 start = rankVal[weight];
-        const U32 length = 1 << (targetLog-nbBits);
-
-        if (targetLog-nbBits >= minBits) {   /* enough room for a second symbol */
-            U32 sortedRank;
-            int minWeight = nbBits + scaleLog;
-            if (minWeight < 1) minWeight = 1;
-            sortedRank = rankStart[minWeight];
-            HUFv05_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
-                           rankValOrigin[nbBits], minWeight,
-                           sortedList+sortedRank, sortedListSize-sortedRank,
-                           nbBitsBaseline, symbol);
-        } else {
-            U32 i;
-            const U32 end = start + length;
-            HUFv05_DEltX4 DElt;
-
-            MEM_writeLE16(&(DElt.sequence), symbol);
-            DElt.nbBits   = (BYTE)(nbBits);
-            DElt.length   = 1;
-            for (i = start; i < end; i++)
-                DTable[i] = DElt;
-        }
-        rankVal[weight] += length;
-    }
-}
-
-size_t HUFv05_readDTableX4 (unsigned* DTable, const void* src, size_t srcSize)
-{
-    BYTE weightList[HUFv05_MAX_SYMBOL_VALUE + 1];
-    sortedSymbol_t sortedSymbol[HUFv05_MAX_SYMBOL_VALUE + 1];
-    U32 rankStats[HUFv05_ABSOLUTEMAX_TABLELOG + 1] = { 0 };
-    U32 rankStart0[HUFv05_ABSOLUTEMAX_TABLELOG + 2] = { 0 };
-    U32* const rankStart = rankStart0+1;
-    rankVal_t rankVal;
-    U32 tableLog, maxW, sizeOfSort, nbSymbols;
-    const U32 memLog = DTable[0];
-    size_t iSize;
-    void* dtPtr = DTable;
-    HUFv05_DEltX4* const dt = ((HUFv05_DEltX4*)dtPtr) + 1;
-
-    HUFv05_STATIC_ASSERT(sizeof(HUFv05_DEltX4) == sizeof(unsigned));   /* if compilation fails here, assertion is false */
-    if (memLog > HUFv05_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
-    //memset(weightList, 0, sizeof(weightList));   /* is not necessary, even though some analyzer complain ... */
-
-    iSize = HUFv05_readStats(weightList, HUFv05_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
-    if (HUFv05_isError(iSize)) return iSize;
-
-    /* check result */
-    if (tableLog > memLog) return ERROR(tableLog_tooLarge);   /* DTable can't fit code depth */
-
-    /* find maxWeight */
-    for (maxW = tableLog; rankStats[maxW]==0; maxW--) {}  /* necessarily finds a solution before 0 */
-
-    /* Get start index of each weight */
-    {
-        U32 w, nextRankStart = 0;
-        for (w=1; w<=maxW; w++) {
-            U32 current = nextRankStart;
-            nextRankStart += rankStats[w];
-            rankStart[w] = current;
-        }
-        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/
-        sizeOfSort = nextRankStart;
-    }
-
-    /* sort symbols by weight */
-    {
-        U32 s;
-        for (s=0; s<nbSymbols; s++) {
-            U32 w = weightList[s];
-            U32 r = rankStart[w]++;
-            sortedSymbol[r].symbol = (BYTE)s;
-            sortedSymbol[r].weight = (BYTE)w;
-        }
-        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */
-    }
-
-    /* Build rankVal */
-    {
-        const U32 minBits = tableLog+1 - maxW;
-        U32 nextRankVal = 0;
-        U32 w, consumed;
-        const int rescale = (memLog-tableLog) - 1;   /* tableLog <= memLog */
-        U32* rankVal0 = rankVal[0];
-        for (w=1; w<=maxW; w++) {
-            U32 current = nextRankVal;
-            nextRankVal += rankStats[w] << (w+rescale);
-            rankVal0[w] = current;
-        }
-        for (consumed = minBits; consumed <= memLog - minBits; consumed++) {
-            U32* rankValPtr = rankVal[consumed];
-            for (w = 1; w <= maxW; w++) {
-                rankValPtr[w] = rankVal0[w] >> consumed;
-    }   }   }
-
-    HUFv05_fillDTableX4(dt, memLog,
-                   sortedSymbol, sizeOfSort,
-                   rankStart0, rankVal, maxW,
-                   tableLog+1);
-
-    return iSize;
-}
-
-
-static U32 HUFv05_decodeSymbolX4(void* op, BITv05_DStream_t* DStream, const HUFv05_DEltX4* dt, const U32 dtLog)
-{
-    const size_t val = BITv05_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
-    memcpy(op, dt+val, 2);
-    BITv05_skipBits(DStream, dt[val].nbBits);
-    return dt[val].length;
-}
-
-static U32 HUFv05_decodeLastSymbolX4(void* op, BITv05_DStream_t* DStream, const HUFv05_DEltX4* dt, const U32 dtLog)
-{
-    const size_t val = BITv05_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
-    memcpy(op, dt+val, 1);
-    if (dt[val].length==1) BITv05_skipBits(DStream, dt[val].nbBits);
-    else {
-        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
-            BITv05_skipBits(DStream, dt[val].nbBits);
-            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
-                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);   /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
-    }   }
-    return 1;
-}
-
-
-#define HUFv05_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
-    ptr += HUFv05_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-#define HUFv05_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
-    if (MEM_64bits() || (HUFv05_MAX_TABLELOG<=12)) \
-        ptr += HUFv05_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-#define HUFv05_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
-    if (MEM_64bits()) \
-        ptr += HUFv05_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-static inline size_t HUFv05_decodeStreamX4(BYTE* p, BITv05_DStream_t* bitDPtr, BYTE* const pEnd, const HUFv05_DEltX4* const dt, const U32 dtLog)
-{
-    BYTE* const pStart = p;
-
-    /* up to 8 symbols at a time */
-    while ((BITv05_reloadDStream(bitDPtr) == BITv05_DStream_unfinished) && (p < pEnd-7)) {
-        HUFv05_DECODE_SYMBOLX4_2(p, bitDPtr);
-        HUFv05_DECODE_SYMBOLX4_1(p, bitDPtr);
-        HUFv05_DECODE_SYMBOLX4_2(p, bitDPtr);
-        HUFv05_DECODE_SYMBOLX4_0(p, bitDPtr);
-    }
-
-    /* closer to the end */
-    while ((BITv05_reloadDStream(bitDPtr) == BITv05_DStream_unfinished) && (p <= pEnd-2))
-        HUFv05_DECODE_SYMBOLX4_0(p, bitDPtr);
-
-    while (p <= pEnd-2)
-        HUFv05_DECODE_SYMBOLX4_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */
-
-    if (p < pEnd)
-        p += HUFv05_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
-
-    return p-pStart;
-}
-
-
-size_t HUFv05_decompress1X4_usingDTable(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const unsigned* DTable)
-{
-    const BYTE* const istart = (const BYTE*) cSrc;
-    BYTE* const ostart = (BYTE*) dst;
-    BYTE* const oend = ostart + dstSize;
-
-    const U32 dtLog = DTable[0];
-    const void* const dtPtr = DTable;
-    const HUFv05_DEltX4* const dt = ((const HUFv05_DEltX4*)dtPtr) +1;
-    size_t errorCode;
-
-    /* Init */
-    BITv05_DStream_t bitD;
-    errorCode = BITv05_initDStream(&bitD, istart, cSrcSize);
-    if (HUFv05_isError(errorCode)) return errorCode;
-
-    /* finish bitStreams one by one */
-    HUFv05_decodeStreamX4(ostart, &bitD, oend,     dt, dtLog);
-
-    /* check */
-    if (!BITv05_endOfDStream(&bitD)) return ERROR(corruption_detected);
-
-    /* decoded size */
-    return dstSize;
-}
-
-size_t HUFv05_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    HUFv05_CREATE_STATIC_DTABLEX4(DTable, HUFv05_MAX_TABLELOG);
-    const BYTE* ip = (const BYTE*) cSrc;
-
-    size_t hSize = HUFv05_readDTableX4 (DTable, cSrc, cSrcSize);
-    if (HUFv05_isError(hSize)) return hSize;
-    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
-    ip += hSize;
-    cSrcSize -= hSize;
-
-    return HUFv05_decompress1X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
-}
-
-size_t HUFv05_decompress4X4_usingDTable(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const unsigned* DTable)
-{
-    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
-
-    {
-        const BYTE* const istart = (const BYTE*) cSrc;
-        BYTE* const ostart = (BYTE*) dst;
-        BYTE* const oend = ostart + dstSize;
-        const void* const dtPtr = DTable;
-        const HUFv05_DEltX4* const dt = ((const HUFv05_DEltX4*)dtPtr) +1;
-        const U32 dtLog = DTable[0];
-        size_t errorCode;
-
-        /* Init */
-        BITv05_DStream_t bitD1;
-        BITv05_DStream_t bitD2;
-        BITv05_DStream_t bitD3;
-        BITv05_DStream_t bitD4;
-        const size_t length1 = MEM_readLE16(istart);
-        const size_t length2 = MEM_readLE16(istart+2);
-        const size_t length3 = MEM_readLE16(istart+4);
-        size_t length4;
-        const BYTE* const istart1 = istart + 6;  /* jumpTable */
-        const BYTE* const istart2 = istart1 + length1;
-        const BYTE* const istart3 = istart2 + length2;
-        const BYTE* const istart4 = istart3 + length3;
-        const size_t segmentSize = (dstSize+3) / 4;
-        BYTE* const opStart2 = ostart + segmentSize;
-        BYTE* const opStart3 = opStart2 + segmentSize;
-        BYTE* const opStart4 = opStart3 + segmentSize;
-        BYTE* op1 = ostart;
-        BYTE* op2 = opStart2;
-        BYTE* op3 = opStart3;
-        BYTE* op4 = opStart4;
-        U32 endSignal;
-
-        length4 = cSrcSize - (length1 + length2 + length3 + 6);
-        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
-        errorCode = BITv05_initDStream(&bitD1, istart1, length1);
-        if (HUFv05_isError(errorCode)) return errorCode;
-        errorCode = BITv05_initDStream(&bitD2, istart2, length2);
-        if (HUFv05_isError(errorCode)) return errorCode;
-        errorCode = BITv05_initDStream(&bitD3, istart3, length3);
-        if (HUFv05_isError(errorCode)) return errorCode;
-        errorCode = BITv05_initDStream(&bitD4, istart4, length4);
-        if (HUFv05_isError(errorCode)) return errorCode;
-
-        /* 16-32 symbols per loop (4-8 symbols per stream) */
-        endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4);
-        for ( ; (endSignal==BITv05_DStream_unfinished) && (op4<(oend-7)) ; ) {
-            HUFv05_DECODE_SYMBOLX4_2(op1, &bitD1);
-            HUFv05_DECODE_SYMBOLX4_2(op2, &bitD2);
-            HUFv05_DECODE_SYMBOLX4_2(op3, &bitD3);
-            HUFv05_DECODE_SYMBOLX4_2(op4, &bitD4);
-            HUFv05_DECODE_SYMBOLX4_1(op1, &bitD1);
-            HUFv05_DECODE_SYMBOLX4_1(op2, &bitD2);
-            HUFv05_DECODE_SYMBOLX4_1(op3, &bitD3);
-            HUFv05_DECODE_SYMBOLX4_1(op4, &bitD4);
-            HUFv05_DECODE_SYMBOLX4_2(op1, &bitD1);
-            HUFv05_DECODE_SYMBOLX4_2(op2, &bitD2);
-            HUFv05_DECODE_SYMBOLX4_2(op3, &bitD3);
-            HUFv05_DECODE_SYMBOLX4_2(op4, &bitD4);
-            HUFv05_DECODE_SYMBOLX4_0(op1, &bitD1);
-            HUFv05_DECODE_SYMBOLX4_0(op2, &bitD2);
-            HUFv05_DECODE_SYMBOLX4_0(op3, &bitD3);
-            HUFv05_DECODE_SYMBOLX4_0(op4, &bitD4);
-
-            endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4);
-        }
-
-        /* check corruption */
-        if (op1 > opStart2) return ERROR(corruption_detected);
-        if (op2 > opStart3) return ERROR(corruption_detected);
-        if (op3 > opStart4) return ERROR(corruption_detected);
-        /* note : op4 supposed already verified within main loop */
-
-        /* finish bitStreams one by one */
-        HUFv05_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
-        HUFv05_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
-        HUFv05_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
-        HUFv05_decodeStreamX4(op4, &bitD4, oend,     dt, dtLog);
-
-        /* check */
-        endSignal = BITv05_endOfDStream(&bitD1) & BITv05_endOfDStream(&bitD2) & BITv05_endOfDStream(&bitD3) & BITv05_endOfDStream(&bitD4);
-        if (!endSignal) return ERROR(corruption_detected);
-
-        /* decoded size */
-        return dstSize;
-    }
-}
-
-
-size_t HUFv05_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    HUFv05_CREATE_STATIC_DTABLEX4(DTable, HUFv05_MAX_TABLELOG);
-    const BYTE* ip = (const BYTE*) cSrc;
-
-    size_t hSize = HUFv05_readDTableX4 (DTable, cSrc, cSrcSize);
-    if (HUFv05_isError(hSize)) return hSize;
-    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
-    ip += hSize;
-    cSrcSize -= hSize;
-
-    return HUFv05_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
-}
-
-
-/* ********************************/
-/* Generic decompression selector */
-/* ********************************/
-
-typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
-static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
-{
-    /* single, double, quad */
-    {{0,0}, {1,1}, {2,2}},  /* Q==0 : impossible */
-    {{0,0}, {1,1}, {2,2}},  /* Q==1 : impossible */
-    {{  38,130}, {1313, 74}, {2151, 38}},   /* Q == 2 : 12-18% */
-    {{ 448,128}, {1353, 74}, {2238, 41}},   /* Q == 3 : 18-25% */
-    {{ 556,128}, {1353, 74}, {2238, 47}},   /* Q == 4 : 25-32% */
-    {{ 714,128}, {1418, 74}, {2436, 53}},   /* Q == 5 : 32-38% */
-    {{ 883,128}, {1437, 74}, {2464, 61}},   /* Q == 6 : 38-44% */
-    {{ 897,128}, {1515, 75}, {2622, 68}},   /* Q == 7 : 44-50% */
-    {{ 926,128}, {1613, 75}, {2730, 75}},   /* Q == 8 : 50-56% */
-    {{ 947,128}, {1729, 77}, {3359, 77}},   /* Q == 9 : 56-62% */
-    {{1107,128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */
-    {{1177,128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */
-    {{1242,128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */
-    {{1349,128}, {2644,106}, {5260,106}},   /* Q ==13 : 81-87% */
-    {{1455,128}, {2422,124}, {4174,124}},   /* Q ==14 : 87-93% */
-    {{ 722,128}, {1891,145}, {1936,146}},   /* Q ==15 : 93-99% */
-};
-
-typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
-
-size_t HUFv05_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    static const decompressionAlgo decompress[3] = { HUFv05_decompress4X2, HUFv05_decompress4X4, NULL };
-    /* estimate decompression time */
-    U32 Q;
-    const U32 D256 = (U32)(dstSize >> 8);
-    U32 Dtime[3];
-    U32 algoNb = 0;
-    int n;
-
-    /* validation checks */
-    if (dstSize == 0) return ERROR(dstSize_tooSmall);
-    if (cSrcSize >= dstSize) return ERROR(corruption_detected);   /* invalid, or not compressed, but not compressed already dealt with */
-    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
-
-    /* decoder timing evaluation */
-    Q = (U32)(cSrcSize * 16 / dstSize);   /* Q < 16 since dstSize > cSrcSize */
-    for (n=0; n<3; n++)
-        Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256);
-
-    Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */
-
-    if (Dtime[1] < Dtime[0]) algoNb = 1;
-
-    return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
-
-    //return HUFv05_decompress4X2(dst, dstSize, cSrc, cSrcSize);   /* multi-streams single-symbol decoding */
-    //return HUFv05_decompress4X4(dst, dstSize, cSrc, cSrcSize);   /* multi-streams double-symbols decoding */
-    //return HUFv05_decompress4X6(dst, dstSize, cSrc, cSrcSize);   /* multi-streams quad-symbols decoding */
-}
-/*
-    zstd - standard compression library
-    Copyright (C) 2014-2016, Yann Collet.
-
-    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are
-    met:
-    * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-    copyright notice, this list of conditions and the following disclaimer
-    in the documentation and/or other materials provided with the
-    distribution.
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - zstd source repository : https://github.com/Cyan4973/zstd
-*/
-
-/* ***************************************************************
-*  Tuning parameters
-*****************************************************************/
-/*!
- * HEAPMODE :
- * Select how default decompression function ZSTDv05_decompress() will allocate memory,
- * in memory stack (0), or in memory heap (1, requires malloc())
- */
-#ifndef ZSTDv05_HEAPMODE
-#  define ZSTDv05_HEAPMODE 1
-#endif
-
-
-/*-*******************************************************
-*  Dependencies
-*********************************************************/
-#include <stdlib.h>      /* calloc */
-#include <string.h>      /* memcpy, memmove */
-#include <stdio.h>       /* debug only : printf */
-
-
-/*-*******************************************************
-*  Compiler specifics
-*********************************************************/
-#ifdef _MSC_VER    /* Visual Studio */
-#  include <intrin.h>                    /* For Visual 2005 */
-#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
-#  pragma warning(disable : 4324)        /* disable: C4324: padded structure */
-#endif
-
-
-/*-*************************************
-*  Local types
-***************************************/
-typedef struct
-{
-    blockType_t blockType;
-    U32 origSize;
-} blockProperties_t;
-
-
-/* *******************************************************
-*  Memory operations
-**********************************************************/
-static void ZSTDv05_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
-
-
-/* *************************************
-*  Error Management
-***************************************/
-/*! ZSTDv05_isError() :
-*   tells if a return value is an error code */
-unsigned ZSTDv05_isError(size_t code) { return ERR_isError(code); }
-
-
-/*! ZSTDv05_getErrorName() :
-*   provides error code string (useful for debugging) */
-const char* ZSTDv05_getErrorName(size_t code) { return ERR_getErrorName(code); }
-
-
-/* *************************************************************
-*   Context management
-***************************************************************/
-typedef enum { ZSTDv05ds_getFrameHeaderSize, ZSTDv05ds_decodeFrameHeader,
-               ZSTDv05ds_decodeBlockHeader, ZSTDv05ds_decompressBlock } ZSTDv05_dStage;
-
-struct ZSTDv05_DCtx_s
-{
-    FSEv05_DTable LLTable[FSEv05_DTABLE_SIZE_U32(LLFSEv05Log)];
-    FSEv05_DTable OffTable[FSEv05_DTABLE_SIZE_U32(OffFSEv05Log)];
-    FSEv05_DTable MLTable[FSEv05_DTABLE_SIZE_U32(MLFSEv05Log)];
-    unsigned   hufTableX4[HUFv05_DTABLE_SIZE(HufLog)];
-    const void* previousDstEnd;
-    const void* base;
-    const void* vBase;
-    const void* dictEnd;
-    size_t expected;
-    size_t headerSize;
-    ZSTDv05_parameters params;
-    blockType_t bType;   /* used in ZSTDv05_decompressContinue(), to transfer blockType between header decoding and block decoding stages */
-    ZSTDv05_dStage stage;
-    U32 flagStaticTables;
-    const BYTE* litPtr;
-    size_t litSize;
-    BYTE litBuffer[BLOCKSIZE + WILDCOPY_OVERLENGTH];
-    BYTE headerBuffer[ZSTDv05_frameHeaderSize_max];
-};  /* typedef'd to ZSTDv05_DCtx within "zstd_static.h" */
-
-size_t ZSTDv05_sizeofDCtx (void); /* Hidden declaration */
-size_t ZSTDv05_sizeofDCtx (void) { return sizeof(ZSTDv05_DCtx); }
-
-size_t ZSTDv05_decompressBegin(ZSTDv05_DCtx* dctx)
-{
-    dctx->expected = ZSTDv05_frameHeaderSize_min;
-    dctx->stage = ZSTDv05ds_getFrameHeaderSize;
-    dctx->previousDstEnd = NULL;
-    dctx->base = NULL;
-    dctx->vBase = NULL;
-    dctx->dictEnd = NULL;
-    dctx->hufTableX4[0] = HufLog;
-    dctx->flagStaticTables = 0;
-    return 0;
-}
-
-ZSTDv05_DCtx* ZSTDv05_createDCtx(void)
-{
-    ZSTDv05_DCtx* dctx = (ZSTDv05_DCtx*)malloc(sizeof(ZSTDv05_DCtx));
-    if (dctx==NULL) return NULL;
-    ZSTDv05_decompressBegin(dctx);
-    return dctx;
-}
-
-size_t ZSTDv05_freeDCtx(ZSTDv05_DCtx* dctx)
-{
-    free(dctx);
-    return 0;   /* reserved as a potential error code in the future */
-}
-
-void ZSTDv05_copyDCtx(ZSTDv05_DCtx* dstDCtx, const ZSTDv05_DCtx* srcDCtx)
-{
-    memcpy(dstDCtx, srcDCtx,
-           sizeof(ZSTDv05_DCtx) - (BLOCKSIZE+WILDCOPY_OVERLENGTH + ZSTDv05_frameHeaderSize_max));  /* no need to copy workspace */
-}
-
-
-/* *************************************************************
-*   Decompression section
-***************************************************************/
-
-/* Frame format description
-   Frame Header -  [ Block Header - Block ] - Frame End
-   1) Frame Header
-      - 4 bytes - Magic Number : ZSTDv05_MAGICNUMBER (defined within zstd_internal.h)
-      - 1 byte  - Window Descriptor
-   2) Block Header
-      - 3 bytes, starting with a 2-bits descriptor
-                 Uncompressed, Compressed, Frame End, unused
-   3) Block
-      See Block Format Description
-   4) Frame End
-      - 3 bytes, compatible with Block Header
-*/
-
-/* Block format description
-
-   Block = Literal Section - Sequences Section
-   Prerequisite : size of (compressed) block, maximum size of regenerated data
-
-   1) Literal Section
-
-   1.1) Header : 1-5 bytes
-        flags: 2 bits
-            00 compressed by Huff0
-            01 unused
-            10 is Raw (uncompressed)
-            11 is Rle
-            Note : using 01 => Huff0 with precomputed table ?
-            Note : delta map ? => compressed ?
-
-   1.1.1) Huff0-compressed literal block : 3-5 bytes
-            srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream
-            srcSize < 1 KB => 3 bytes (2-2-10-10)
-            srcSize < 16KB => 4 bytes (2-2-14-14)
-            else           => 5 bytes (2-2-18-18)
-            big endian convention
-
-   1.1.2) Raw (uncompressed) literal block header : 1-3 bytes
-        size :  5 bits: (IS_RAW<<6) + (0<<4) + size
-               12 bits: (IS_RAW<<6) + (2<<4) + (size>>8)
-                        size&255
-               20 bits: (IS_RAW<<6) + (3<<4) + (size>>16)
-                        size>>8&255
-                        size&255
-
-   1.1.3) Rle (repeated single byte) literal block header : 1-3 bytes
-        size :  5 bits: (IS_RLE<<6) + (0<<4) + size
-               12 bits: (IS_RLE<<6) + (2<<4) + (size>>8)
-                        size&255
-               20 bits: (IS_RLE<<6) + (3<<4) + (size>>16)
-                        size>>8&255
-                        size&255
-
-   1.1.4) Huff0-compressed literal block, using precomputed CTables : 3-5 bytes
-            srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream
-            srcSize < 1 KB => 3 bytes (2-2-10-10)
-            srcSize < 16KB => 4 bytes (2-2-14-14)
-            else           => 5 bytes (2-2-18-18)
-            big endian convention
-
-        1- CTable available (stored into workspace ?)
-        2- Small input (fast heuristic ? Full comparison ? depend on clevel ?)
-
-
-   1.2) Literal block content
-
-   1.2.1) Huff0 block, using sizes from header
-        See Huff0 format
-
-   1.2.2) Huff0 block, using prepared table
-
-   1.2.3) Raw content
-
-   1.2.4) single byte
-
-
-   2) Sequences section
-      TO DO
-*/
-
-
-/** ZSTDv05_decodeFrameHeader_Part1() :
-*   decode the 1st part of the Frame Header, which tells Frame Header size.
-*   srcSize must be == ZSTDv05_frameHeaderSize_min.
-*   @return : the full size of the Frame Header */
-static size_t ZSTDv05_decodeFrameHeader_Part1(ZSTDv05_DCtx* zc, const void* src, size_t srcSize)
-{
-    U32 magicNumber;
-    if (srcSize != ZSTDv05_frameHeaderSize_min)
-        return ERROR(srcSize_wrong);
-    magicNumber = MEM_readLE32(src);
-    if (magicNumber != ZSTDv05_MAGICNUMBER) return ERROR(prefix_unknown);
-    zc->headerSize = ZSTDv05_frameHeaderSize_min;
-    return zc->headerSize;
-}
-
-
-size_t ZSTDv05_getFrameParams(ZSTDv05_parameters* params, const void* src, size_t srcSize)
-{
-    U32 magicNumber;
-    if (srcSize < ZSTDv05_frameHeaderSize_min) return ZSTDv05_frameHeaderSize_max;
-    magicNumber = MEM_readLE32(src);
-    if (magicNumber != ZSTDv05_MAGICNUMBER) return ERROR(prefix_unknown);
-    memset(params, 0, sizeof(*params));
-    params->windowLog = (((const BYTE*)src)[4] & 15) + ZSTDv05_WINDOWLOG_ABSOLUTEMIN;
-    if ((((const BYTE*)src)[4] >> 4) != 0) return ERROR(frameParameter_unsupported);   /* reserved bits */
-    return 0;
-}
-
-/** ZSTDv05_decodeFrameHeader_Part2() :
-*   decode the full Frame Header.
-*   srcSize must be the size provided by ZSTDv05_decodeFrameHeader_Part1().
-*   @return : 0, or an error code, which can be tested using ZSTDv05_isError() */
-static size_t ZSTDv05_decodeFrameHeader_Part2(ZSTDv05_DCtx* zc, const void* src, size_t srcSize)
-{
-    size_t result;
-    if (srcSize != zc->headerSize)
-        return ERROR(srcSize_wrong);
-    result = ZSTDv05_getFrameParams(&(zc->params), src, srcSize);
-    if ((MEM_32bits()) && (zc->params.windowLog > 25)) return ERROR(frameParameter_unsupported);
-    return result;
-}
-
-
-static size_t ZSTDv05_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
-{
-    const BYTE* const in = (const BYTE* const)src;
-    BYTE headerFlags;
-    U32 cSize;
-
-    if (srcSize < 3)
-        return ERROR(srcSize_wrong);
-
-    headerFlags = *in;
-    cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
-
-    bpPtr->blockType = (blockType_t)(headerFlags >> 6);
-    bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
-
-    if (bpPtr->blockType == bt_end) return 0;
-    if (bpPtr->blockType == bt_rle) return 1;
-    return cSize;
-}
-
-
-static size_t ZSTDv05_copyRawBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
-    if (dst==NULL) return ERROR(dstSize_tooSmall);
-    if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
-    memcpy(dst, src, srcSize);
-    return srcSize;
-}
-
-
-/*! ZSTDv05_decodeLiteralsBlock() :
-    @return : nb of bytes read from src (< srcSize ) */
-static size_t ZSTDv05_decodeLiteralsBlock(ZSTDv05_DCtx* dctx,
-                                    const void* src, size_t srcSize)   /* note : srcSize < BLOCKSIZE */
-{
-    const BYTE* const istart = (const BYTE*) src;
-
-    /* any compressed block with literals segment must be at least this size */
-    if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
-
-    switch(istart[0]>> 6)
-    {
-    case IS_HUFv05:
-        {
-            size_t litSize, litCSize, singleStream=0;
-            U32 lhSize = ((istart[0]) >> 4) & 3;
-            if (srcSize < 5) return ERROR(corruption_detected);   /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */
-            switch(lhSize)
-            {
-            case 0: case 1: default:   /* note : default is impossible, since lhSize into [0..3] */
-                /* 2 - 2 - 10 - 10 */
-                lhSize=3;
-                singleStream = istart[0] & 16;
-                litSize  = ((istart[0] & 15) << 6) + (istart[1] >> 2);
-                litCSize = ((istart[1] &  3) << 8) + istart[2];
-                break;
-            case 2:
-                /* 2 - 2 - 14 - 14 */
-                lhSize=4;
-                litSize  = ((istart[0] & 15) << 10) + (istart[1] << 2) + (istart[2] >> 6);
-                litCSize = ((istart[2] & 63) <<  8) + istart[3];
-                break;
-            case 3:
-                /* 2 - 2 - 18 - 18 */
-                lhSize=5;
-                litSize  = ((istart[0] & 15) << 14) + (istart[1] << 6) + (istart[2] >> 2);
-                litCSize = ((istart[2] &  3) << 16) + (istart[3] << 8) + istart[4];
-                break;
-            }
-            if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
-            if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
-
-            if (HUFv05_isError(singleStream ?
-                            HUFv05_decompress1X2(dctx->litBuffer, litSize, istart+lhSize, litCSize) :
-                            HUFv05_decompress   (dctx->litBuffer, litSize, istart+lhSize, litCSize) ))
-                return ERROR(corruption_detected);
-
-            dctx->litPtr = dctx->litBuffer;
-            dctx->litSize = litSize;
-            memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
-            return litCSize + lhSize;
-        }
-    case IS_PCH:
-        {
-            size_t errorCode;
-            size_t litSize, litCSize;
-            U32 lhSize = ((istart[0]) >> 4) & 3;
-            if (lhSize != 1)  /* only case supported for now : small litSize, single stream */
-                return ERROR(corruption_detected);
-            if (!dctx->flagStaticTables)
-                return ERROR(dictionary_corrupted);
-
-            /* 2 - 2 - 10 - 10 */
-            lhSize=3;
-            litSize  = ((istart[0] & 15) << 6) + (istart[1] >> 2);
-            litCSize = ((istart[1] &  3) << 8) + istart[2];
-            if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
-
-            errorCode = HUFv05_decompress1X4_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->hufTableX4);
-            if (HUFv05_isError(errorCode)) return ERROR(corruption_detected);
-
-            dctx->litPtr = dctx->litBuffer;
-            dctx->litSize = litSize;
-            memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
-            return litCSize + lhSize;
-        }
-    case IS_RAW:
-        {
-            size_t litSize;
-            U32 lhSize = ((istart[0]) >> 4) & 3;
-            switch(lhSize)
-            {
-            case 0: case 1: default:   /* note : default is impossible, since lhSize into [0..3] */
-                lhSize=1;
-                litSize = istart[0] & 31;
-                break;
-            case 2:
-                litSize = ((istart[0] & 15) << 8) + istart[1];
-                break;
-            case 3:
-                litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];
-                break;
-            }
-
-            if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) {  /* risk reading beyond src buffer with wildcopy */
-                if (litSize+lhSize > srcSize) return ERROR(corruption_detected);
-                memcpy(dctx->litBuffer, istart+lhSize, litSize);
-                dctx->litPtr = dctx->litBuffer;
-                dctx->litSize = litSize;
-                memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
-                return lhSize+litSize;
-            }
-            /* direct reference into compressed stream */
-            dctx->litPtr = istart+lhSize;
-            dctx->litSize = litSize;
-            return lhSize+litSize;
-        }
-    case IS_RLE:
-        {
-            size_t litSize;
-            U32 lhSize = ((istart[0]) >> 4) & 3;
-            switch(lhSize)
-            {
-            case 0: case 1: default:   /* note : default is impossible, since lhSize into [0..3] */
-                lhSize = 1;
-                litSize = istart[0] & 31;
-                break;
-            case 2:
-                litSize = ((istart[0] & 15) << 8) + istart[1];
-                break;
-            case 3:
-                litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];
-                if (srcSize<4) return ERROR(corruption_detected);   /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
-                break;
-            }
-            if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
-            memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
-            dctx->litPtr = dctx->litBuffer;
-            dctx->litSize = litSize;
-            return lhSize+1;
-        }
-    default:
-        return ERROR(corruption_detected);   /* impossible */
-    }
-}
-
-
-static size_t ZSTDv05_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,
-                         FSEv05_DTable* DTableLL, FSEv05_DTable* DTableML, FSEv05_DTable* DTableOffb,
-                         const void* src, size_t srcSize, U32 flagStaticTable)
-{
-    const BYTE* const istart = (const BYTE* const)src;
-    const BYTE* ip = istart;
-    const BYTE* const iend = istart + srcSize;
-    U32 LLtype, Offtype, MLtype;
-    unsigned LLlog, Offlog, MLlog;
-    size_t dumpsLength;
-
-    /* check */
-    if (srcSize < MIN_SEQUENCES_SIZE)
-        return ERROR(srcSize_wrong);
-
-    /* SeqHead */
-    *nbSeq = *ip++;
-    if (*nbSeq==0) return 1;
-    if (*nbSeq >= 128) {
-        if (ip >= iend) return ERROR(srcSize_wrong);
-        *nbSeq = ((nbSeq[0]-128)<<8) + *ip++;
-    }
-
-    if (ip >= iend) return ERROR(srcSize_wrong);
-    LLtype  = *ip >> 6;
-    Offtype = (*ip >> 4) & 3;
-    MLtype  = (*ip >> 2) & 3;
-    if (*ip & 2) {
-        if (ip+3 > iend) return ERROR(srcSize_wrong);
-        dumpsLength  = ip[2];
-        dumpsLength += ip[1] << 8;
-        ip += 3;
-    } else {
-        if (ip+2 > iend) return ERROR(srcSize_wrong);
-        dumpsLength  = ip[1];
-        dumpsLength += (ip[0] & 1) << 8;
-        ip += 2;
-    }
-    *dumpsPtr = ip;
-    ip += dumpsLength;
-    *dumpsLengthPtr = dumpsLength;
-
-    /* check */
-    if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
-
-    /* sequences */
-    {
-        S16 norm[MaxML+1];    /* assumption : MaxML >= MaxLL >= MaxOff */
-        size_t headerSize;
-
-        /* Build DTables */
-        switch(LLtype)
-        {
-        case FSEv05_ENCODING_RLE :
-            LLlog = 0;
-            FSEv05_buildDTable_rle(DTableLL, *ip++);
-            break;
-        case FSEv05_ENCODING_RAW :
-            LLlog = LLbits;
-            FSEv05_buildDTable_raw(DTableLL, LLbits);
-            break;
-        case FSEv05_ENCODING_STATIC:
-            if (!flagStaticTable) return ERROR(corruption_detected);
-            break;
-        case FSEv05_ENCODING_DYNAMIC :
-        default :   /* impossible */
-            {   unsigned max = MaxLL;
-                headerSize = FSEv05_readNCount(norm, &max, &LLlog, ip, iend-ip);
-                if (FSEv05_isError(headerSize)) return ERROR(GENERIC);
-                if (LLlog > LLFSEv05Log) return ERROR(corruption_detected);
-                ip += headerSize;
-                FSEv05_buildDTable(DTableLL, norm, max, LLlog);
-        }   }
-
-        switch(Offtype)
-        {
-        case FSEv05_ENCODING_RLE :
-            Offlog = 0;
-            if (ip > iend-2) return ERROR(srcSize_wrong);   /* min : "raw", hence no header, but at least xxLog bits */
-            FSEv05_buildDTable_rle(DTableOffb, *ip++ & MaxOff); /* if *ip > MaxOff, data is corrupted */
-            break;
-        case FSEv05_ENCODING_RAW :
-            Offlog = Offbits;
-            FSEv05_buildDTable_raw(DTableOffb, Offbits);
-            break;
-        case FSEv05_ENCODING_STATIC:
-            if (!flagStaticTable) return ERROR(corruption_detected);
-            break;
-        case FSEv05_ENCODING_DYNAMIC :
-        default :   /* impossible */
-            {   unsigned max = MaxOff;
-                headerSize = FSEv05_readNCount(norm, &max, &Offlog, ip, iend-ip);
-                if (FSEv05_isError(headerSize)) return ERROR(GENERIC);
-                if (Offlog > OffFSEv05Log) return ERROR(corruption_detected);
-                ip += headerSize;
-                FSEv05_buildDTable(DTableOffb, norm, max, Offlog);
-        }   }
-
-        switch(MLtype)
-        {
-        case FSEv05_ENCODING_RLE :
-            MLlog = 0;
-            if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
-            FSEv05_buildDTable_rle(DTableML, *ip++);
-            break;
-        case FSEv05_ENCODING_RAW :
-            MLlog = MLbits;
-            FSEv05_buildDTable_raw(DTableML, MLbits);
-            break;
-        case FSEv05_ENCODING_STATIC:
-            if (!flagStaticTable) return ERROR(corruption_detected);
-            break;
-        case FSEv05_ENCODING_DYNAMIC :
-        default :   /* impossible */
-            {   unsigned max = MaxML;
-                headerSize = FSEv05_readNCount(norm, &max, &MLlog, ip, iend-ip);
-                if (FSEv05_isError(headerSize)) return ERROR(GENERIC);
-                if (MLlog > MLFSEv05Log) return ERROR(corruption_detected);
-                ip += headerSize;
-                FSEv05_buildDTable(DTableML, norm, max, MLlog);
-    }   }   }
-
-    return ip-istart;
-}
-
-
-typedef struct {
-    size_t litLength;
-    size_t matchLength;
-    size_t offset;
-} seq_t;
-
-typedef struct {
-    BITv05_DStream_t DStream;
-    FSEv05_DState_t stateLL;
-    FSEv05_DState_t stateOffb;
-    FSEv05_DState_t stateML;
-    size_t prevOffset;
-    const BYTE* dumps;
-    const BYTE* dumpsEnd;
-} seqState_t;
-
-
-
-static void ZSTDv05_decodeSequence(seq_t* seq, seqState_t* seqState)
-{
-    size_t litLength;
-    size_t prevOffset;
-    size_t offset;
-    size_t matchLength;
-    const BYTE* dumps = seqState->dumps;
-    const BYTE* const de = seqState->dumpsEnd;
-
-    /* Literal length */
-    litLength = FSEv05_peakSymbol(&(seqState->stateLL));
-    prevOffset = litLength ? seq->offset : seqState->prevOffset;
-    if (litLength == MaxLL) {
-        const U32 add = *dumps++;
-        if (add < 255) litLength += add;
-        else if (dumps + 3 <= de) {
-            litLength = MEM_readLE24(dumps);
-            if (litLength&1) litLength>>=1, dumps += 3;
-            else litLength = (U16)(litLength)>>1, dumps += 2;
-        }
-        if (dumps >= de) { dumps = de-1; }  /* late correction, to avoid read overflow (data is now corrupted anyway) */
-    }
-
-    /* Offset */
-    {
-        static const U32 offsetPrefix[MaxOff+1] = {
-                1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256,
-                512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144,
-                524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 };
-        U32 offsetCode = FSEv05_peakSymbol(&(seqState->stateOffb));   /* <= maxOff, by table construction */
-        U32 nbBits = offsetCode - 1;
-        if (offsetCode==0) nbBits = 0;   /* cmove */
-        offset = offsetPrefix[offsetCode] + BITv05_readBits(&(seqState->DStream), nbBits);
-        if (MEM_32bits()) BITv05_reloadDStream(&(seqState->DStream));
-        if (offsetCode==0) offset = prevOffset;   /* repcode, cmove */
-        if (offsetCode | !litLength) seqState->prevOffset = seq->offset;   /* cmove */
-        FSEv05_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream));    /* update */
-    }
-
-    /* Literal length update */
-    FSEv05_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));   /* update */
-    if (MEM_32bits()) BITv05_reloadDStream(&(seqState->DStream));
-
-    /* MatchLength */
-    matchLength = FSEv05_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
-    if (matchLength == MaxML) {
-        const U32 add = dumps<de ? *dumps++ : 0;
-        if (add < 255) matchLength += add;
-        else if (dumps + 3 <= de) {
-            matchLength = MEM_readLE24(dumps);
-            if (matchLength&1) matchLength>>=1, dumps += 3;
-            else matchLength = (U16)(matchLength)>>1, dumps += 2;
-        }
-        if (dumps >= de) { dumps = de-1; }  /* late correction, to avoid read overflow (data is now corrupted anyway) */
-    }
-    matchLength += MINMATCH;
-
-    /* save result */
-    seq->litLength = litLength;
-    seq->offset = offset;
-    seq->matchLength = matchLength;
-    seqState->dumps = dumps;
-
-#if 0   /* debug */
-    {
-        static U64 totalDecoded = 0;
-        printf("pos %6u : %3u literals & match %3u bytes at distance %6u \n",
-           (U32)(totalDecoded), (U32)litLength, (U32)matchLength, (U32)offset);
-        totalDecoded += litLength + matchLength;
-    }
-#endif
-}
-
-
-static size_t ZSTDv05_execSequence(BYTE* op,
-                                BYTE* const oend, seq_t sequence,
-                                const BYTE** litPtr, const BYTE* const litLimit,
-                                const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
-{
-    static const int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
-    static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */
-    BYTE* const oLitEnd = op + sequence.litLength;
-    const size_t sequenceLength = sequence.litLength + sequence.matchLength;
-    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
-    BYTE* const oend_8 = oend-8;
-    const BYTE* const litEnd = *litPtr + sequence.litLength;
-    const BYTE* match = oLitEnd - sequence.offset;
-
-    /* check */
-    if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall);   /* last match must start at a minimum distance of 8 from oend */
-    if (oMatchEnd > oend) return ERROR(dstSize_tooSmall);   /* overwrite beyond dst buffer */
-    if (litEnd > litLimit) return ERROR(corruption_detected);   /* risk read beyond lit buffer */
-
-    /* copy Literals */
-    ZSTDv05_wildcopy(op, *litPtr, sequence.litLength);   /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
-    op = oLitEnd;
-    *litPtr = litEnd;   /* update for next sequence */
-
-    /* copy Match */
-    if (sequence.offset > (size_t)(oLitEnd - base)) {
-        /* offset beyond prefix */
-        if (sequence.offset > (size_t)(oLitEnd - vBase))
-            return ERROR(corruption_detected);
-        match = dictEnd - (base-match);
-        if (match + sequence.matchLength <= dictEnd) {
-            memmove(oLitEnd, match, sequence.matchLength);
-            return sequenceLength;
-        }
-        /* span extDict & currentPrefixSegment */
-        {
-            size_t length1 = dictEnd - match;
-            memmove(oLitEnd, match, length1);
-            op = oLitEnd + length1;
-            sequence.matchLength -= length1;
-            match = base;
-            if (op > oend_8 || sequence.matchLength < MINMATCH) {
-              while (op < oMatchEnd) *op++ = *match++;
-              return sequenceLength;
-            }
-    }   }
-    /* Requirement: op <= oend_8 */
-
-    /* match within prefix */
-    if (sequence.offset < 8) {
-        /* close range match, overlap */
-        const int sub2 = dec64table[sequence.offset];
-        op[0] = match[0];
-        op[1] = match[1];
-        op[2] = match[2];
-        op[3] = match[3];
-        match += dec32table[sequence.offset];
-        ZSTDv05_copy4(op+4, match);
-        match -= sub2;
-    } else {
-        ZSTDv05_copy8(op, match);
-    }
-    op += 8; match += 8;
-
-    if (oMatchEnd > oend-(16-MINMATCH)) {
-        if (op < oend_8) {
-            ZSTDv05_wildcopy(op, match, oend_8 - op);
-            match += oend_8 - op;
-            op = oend_8;
-        }
-        while (op < oMatchEnd)
-            *op++ = *match++;
-    } else {
-        ZSTDv05_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8 */
-    }
-    return sequenceLength;
-}
-
-
-static size_t ZSTDv05_decompressSequences(
-                               ZSTDv05_DCtx* dctx,
-                               void* dst, size_t maxDstSize,
-                         const void* seqStart, size_t seqSize)
-{
-    const BYTE* ip = (const BYTE*)seqStart;
-    const BYTE* const iend = ip + seqSize;
-    BYTE* const ostart = (BYTE* const)dst;
-    BYTE* op = ostart;
-    BYTE* const oend = ostart + maxDstSize;
-    size_t errorCode, dumpsLength=0;
-    const BYTE* litPtr = dctx->litPtr;
-    const BYTE* const litEnd = litPtr + dctx->litSize;
-    int nbSeq=0;
-    const BYTE* dumps = NULL;
-    unsigned* DTableLL = dctx->LLTable;
-    unsigned* DTableML = dctx->MLTable;
-    unsigned* DTableOffb = dctx->OffTable;
-    const BYTE* const base = (const BYTE*) (dctx->base);
-    const BYTE* const vBase = (const BYTE*) (dctx->vBase);
-    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
-
-    /* Build Decoding Tables */
-    errorCode = ZSTDv05_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,
-                                      DTableLL, DTableML, DTableOffb,
-                                      ip, seqSize, dctx->flagStaticTables);
-    if (ZSTDv05_isError(errorCode)) return errorCode;
-    ip += errorCode;
-
-    /* Regen sequences */
-    if (nbSeq) {
-        seq_t sequence;
-        seqState_t seqState;
-
-        memset(&sequence, 0, sizeof(sequence));
-        sequence.offset = REPCODE_STARTVALUE;
-        seqState.dumps = dumps;
-        seqState.dumpsEnd = dumps + dumpsLength;
-        seqState.prevOffset = REPCODE_STARTVALUE;
-        errorCode = BITv05_initDStream(&(seqState.DStream), ip, iend-ip);
-        if (ERR_isError(errorCode)) return ERROR(corruption_detected);
-        FSEv05_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
-        FSEv05_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
-        FSEv05_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
-
-        for ( ; (BITv05_reloadDStream(&(seqState.DStream)) <= BITv05_DStream_completed) && nbSeq ; ) {
-            size_t oneSeqSize;
-            nbSeq--;
-            ZSTDv05_decodeSequence(&sequence, &seqState);
-            oneSeqSize = ZSTDv05_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
-            if (ZSTDv05_isError(oneSeqSize)) return oneSeqSize;
-            op += oneSeqSize;
-        }
-
-        /* check if reached exact end */
-        if (nbSeq) return ERROR(corruption_detected);
-    }
-
-    /* last literal segment */
-    {
-        size_t lastLLSize = litEnd - litPtr;
-        if (litPtr > litEnd) return ERROR(corruption_detected);   /* too many literals already used */
-        if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
-        memcpy(op, litPtr, lastLLSize);
-        op += lastLLSize;
-    }
-
-    return op-ostart;
-}
-
-
-static void ZSTDv05_checkContinuity(ZSTDv05_DCtx* dctx, const void* dst)
-{
-    if (dst != dctx->previousDstEnd) {   /* not contiguous */
-        dctx->dictEnd = dctx->previousDstEnd;
-        dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
-        dctx->base = dst;
-        dctx->previousDstEnd = dst;
-    }
-}
-
-
-static size_t ZSTDv05_decompressBlock_internal(ZSTDv05_DCtx* dctx,
-                            void* dst, size_t dstCapacity,
-                      const void* src, size_t srcSize)
-{   /* blockType == blockCompressed */
-    const BYTE* ip = (const BYTE*)src;
-    size_t litCSize;
-
-    if (srcSize >= BLOCKSIZE) return ERROR(srcSize_wrong);
-
-    /* Decode literals sub-block */
-    litCSize = ZSTDv05_decodeLiteralsBlock(dctx, src, srcSize);
-    if (ZSTDv05_isError(litCSize)) return litCSize;
-    ip += litCSize;
-    srcSize -= litCSize;
-
-    return ZSTDv05_decompressSequences(dctx, dst, dstCapacity, ip, srcSize);
-}
-
-
-size_t ZSTDv05_decompressBlock(ZSTDv05_DCtx* dctx,
-                            void* dst, size_t dstCapacity,
-                      const void* src, size_t srcSize)
-{
-    ZSTDv05_checkContinuity(dctx, dst);
-    return ZSTDv05_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
-}
-
-
-/*! ZSTDv05_decompress_continueDCtx
-*   dctx must have been properly initialized */
-static size_t ZSTDv05_decompress_continueDCtx(ZSTDv05_DCtx* dctx,
-                                 void* dst, size_t maxDstSize,
-                                 const void* src, size_t srcSize)
-{
-    const BYTE* ip = (const BYTE*)src;
-    const BYTE* iend = ip + srcSize;
-    BYTE* const ostart = (BYTE* const)dst;
-    BYTE* op = ostart;
-    BYTE* const oend = ostart + maxDstSize;
-    size_t remainingSize = srcSize;
-    blockProperties_t blockProperties;
-    memset(&blockProperties, 0, sizeof(blockProperties));
-
-    /* Frame Header */
-    {   size_t frameHeaderSize;
-        if (srcSize < ZSTDv05_frameHeaderSize_min+ZSTDv05_blockHeaderSize) return ERROR(srcSize_wrong);
-        frameHeaderSize = ZSTDv05_decodeFrameHeader_Part1(dctx, src, ZSTDv05_frameHeaderSize_min);
-        if (ZSTDv05_isError(frameHeaderSize)) return frameHeaderSize;
-        if (srcSize < frameHeaderSize+ZSTDv05_blockHeaderSize) return ERROR(srcSize_wrong);
-        ip += frameHeaderSize; remainingSize -= frameHeaderSize;
-        frameHeaderSize = ZSTDv05_decodeFrameHeader_Part2(dctx, src, frameHeaderSize);
-        if (ZSTDv05_isError(frameHeaderSize)) return frameHeaderSize;
-    }
-
-    /* Loop on each block */
-    while (1)
-    {
-        size_t decodedSize=0;
-        size_t cBlockSize = ZSTDv05_getcBlockSize(ip, iend-ip, &blockProperties);
-        if (ZSTDv05_isError(cBlockSize)) return cBlockSize;
-
-        ip += ZSTDv05_blockHeaderSize;
-        remainingSize -= ZSTDv05_blockHeaderSize;
-        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
-
-        switch(blockProperties.blockType)
-        {
-        case bt_compressed:
-            decodedSize = ZSTDv05_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize);
-            break;
-        case bt_raw :
-            decodedSize = ZSTDv05_copyRawBlock(op, oend-op, ip, cBlockSize);
-            break;
-        case bt_rle :
-            return ERROR(GENERIC);   /* not yet supported */
-            break;
-        case bt_end :
-            /* end of frame */
-            if (remainingSize) return ERROR(srcSize_wrong);
-            break;
-        default:
-            return ERROR(GENERIC);   /* impossible */
-        }
-        if (cBlockSize == 0) break;   /* bt_end */
-
-        if (ZSTDv05_isError(decodedSize)) return decodedSize;
-        op += decodedSize;
-        ip += cBlockSize;
-        remainingSize -= cBlockSize;
-    }
-
-    return op-ostart;
-}
-
-
-size_t ZSTDv05_decompress_usingPreparedDCtx(ZSTDv05_DCtx* dctx, const ZSTDv05_DCtx* refDCtx,
-                                         void* dst, size_t maxDstSize,
-                                   const void* src, size_t srcSize)
-{
-    ZSTDv05_copyDCtx(dctx, refDCtx);
-    ZSTDv05_checkContinuity(dctx, dst);
-    return ZSTDv05_decompress_continueDCtx(dctx, dst, maxDstSize, src, srcSize);
-}
-
-
-size_t ZSTDv05_decompress_usingDict(ZSTDv05_DCtx* dctx,
-                                 void* dst, size_t maxDstSize,
-                                 const void* src, size_t srcSize,
-                                 const void* dict, size_t dictSize)
-{
-    ZSTDv05_decompressBegin_usingDict(dctx, dict, dictSize);
-    ZSTDv05_checkContinuity(dctx, dst);
-    return ZSTDv05_decompress_continueDCtx(dctx, dst, maxDstSize, src, srcSize);
-}
-
-
-size_t ZSTDv05_decompressDCtx(ZSTDv05_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
-    return ZSTDv05_decompress_usingDict(dctx, dst, maxDstSize, src, srcSize, NULL, 0);
-}
-
-size_t ZSTDv05_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
-#if defined(ZSTDv05_HEAPMODE) && (ZSTDv05_HEAPMODE==1)
-    size_t regenSize;
-    ZSTDv05_DCtx* dctx = ZSTDv05_createDCtx();
-    if (dctx==NULL) return ERROR(memory_allocation);
-    regenSize = ZSTDv05_decompressDCtx(dctx, dst, maxDstSize, src, srcSize);
-    ZSTDv05_freeDCtx(dctx);
-    return regenSize;
-#else
-    ZSTDv05_DCtx dctx;
-    return ZSTDv05_decompressDCtx(&dctx, dst, maxDstSize, src, srcSize);
-#endif
-}
-
-/* ZSTD_errorFrameSizeInfoLegacy() :
-   assumes `cSize` and `dBound` are _not_ NULL */
-static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
-{
-    *cSize = ret;
-    *dBound = ZSTD_CONTENTSIZE_ERROR;
-}
-
-void ZSTDv05_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
-{
-    const BYTE* ip = (const BYTE*)src;
-    size_t remainingSize = srcSize;
-    size_t nbBlocks = 0;
-    blockProperties_t blockProperties;
-
-    /* Frame Header */
-    if (srcSize < ZSTDv05_frameHeaderSize_min) {
-        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
-        return;
-    }
-    if (MEM_readLE32(src) != ZSTDv05_MAGICNUMBER) {
-        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
-        return;
-    }
-    ip += ZSTDv05_frameHeaderSize_min; remainingSize -= ZSTDv05_frameHeaderSize_min;
-
-    /* Loop on each block */
-    while (1)
-    {
-        size_t cBlockSize = ZSTDv05_getcBlockSize(ip, remainingSize, &blockProperties);
-        if (ZSTDv05_isError(cBlockSize)) {
-            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
-            return;
-        }
-
-        ip += ZSTDv05_blockHeaderSize;
-        remainingSize -= ZSTDv05_blockHeaderSize;
-        if (cBlockSize > remainingSize) {
-            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
-            return;
-        }
-
-        if (cBlockSize == 0) break;   /* bt_end */
-
-        ip += cBlockSize;
-        remainingSize -= cBlockSize;
-        nbBlocks++;
-    }
-
-    *cSize = ip - (const BYTE*)src;
-    *dBound = nbBlocks * BLOCKSIZE;
-}
-
-/* ******************************
-*  Streaming Decompression API
-********************************/
-size_t ZSTDv05_nextSrcSizeToDecompress(ZSTDv05_DCtx* dctx)
-{
-    return dctx->expected;
-}
-
-size_t ZSTDv05_decompressContinue(ZSTDv05_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
-    /* Sanity check */
-    if (srcSize != dctx->expected) return ERROR(srcSize_wrong);
-    ZSTDv05_checkContinuity(dctx, dst);
-
-    /* Decompress : frame header; part 1 */
-    switch (dctx->stage)
-    {
-    case ZSTDv05ds_getFrameHeaderSize :
-        /* get frame header size */
-        if (srcSize != ZSTDv05_frameHeaderSize_min) return ERROR(srcSize_wrong);   /* impossible */
-        dctx->headerSize = ZSTDv05_decodeFrameHeader_Part1(dctx, src, ZSTDv05_frameHeaderSize_min);
-        if (ZSTDv05_isError(dctx->headerSize)) return dctx->headerSize;
-        memcpy(dctx->headerBuffer, src, ZSTDv05_frameHeaderSize_min);
-        if (dctx->headerSize > ZSTDv05_frameHeaderSize_min) return ERROR(GENERIC); /* should never happen */
-        dctx->expected = 0;   /* not necessary to copy more */
-        /* fallthrough */
-    case ZSTDv05ds_decodeFrameHeader:
-        /* get frame header */
-        {   size_t const result = ZSTDv05_decodeFrameHeader_Part2(dctx, dctx->headerBuffer, dctx->headerSize);
-            if (ZSTDv05_isError(result)) return result;
-            dctx->expected = ZSTDv05_blockHeaderSize;
-            dctx->stage = ZSTDv05ds_decodeBlockHeader;
-            return 0;
-        }
-    case ZSTDv05ds_decodeBlockHeader:
-        {
-            /* Decode block header */
-            blockProperties_t bp;
-            size_t blockSize = ZSTDv05_getcBlockSize(src, ZSTDv05_blockHeaderSize, &bp);
-            if (ZSTDv05_isError(blockSize)) return blockSize;
-            if (bp.blockType == bt_end) {
-                dctx->expected = 0;
-                dctx->stage = ZSTDv05ds_getFrameHeaderSize;
-            }
-            else {
-                dctx->expected = blockSize;
-                dctx->bType = bp.blockType;
-                dctx->stage = ZSTDv05ds_decompressBlock;
-            }
-            return 0;
-        }
-    case ZSTDv05ds_decompressBlock:
-        {
-            /* Decompress : block content */
-            size_t rSize;
-            switch(dctx->bType)
-            {
-            case bt_compressed:
-                rSize = ZSTDv05_decompressBlock_internal(dctx, dst, maxDstSize, src, srcSize);
-                break;
-            case bt_raw :
-                rSize = ZSTDv05_copyRawBlock(dst, maxDstSize, src, srcSize);
-                break;
-            case bt_rle :
-                return ERROR(GENERIC);   /* not yet handled */
-                break;
-            case bt_end :   /* should never happen (filtered at phase 1) */
-                rSize = 0;
-                break;
-            default:
-                return ERROR(GENERIC);   /* impossible */
-            }
-            dctx->stage = ZSTDv05ds_decodeBlockHeader;
-            dctx->expected = ZSTDv05_blockHeaderSize;
-            dctx->previousDstEnd = (char*)dst + rSize;
-            return rSize;
-        }
-    default:
-        return ERROR(GENERIC);   /* impossible */
-    }
-}
-
-
-static void ZSTDv05_refDictContent(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize)
-{
-    dctx->dictEnd = dctx->previousDstEnd;
-    dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
-    dctx->base = dict;
-    dctx->previousDstEnd = (const char*)dict + dictSize;
-}
-
-static size_t ZSTDv05_loadEntropy(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize)
-{
-    size_t hSize, offcodeHeaderSize, matchlengthHeaderSize, errorCode, litlengthHeaderSize;
-    short offcodeNCount[MaxOff+1];
-    unsigned offcodeMaxValue=MaxOff, offcodeLog;
-    short matchlengthNCount[MaxML+1];
-    unsigned matchlengthMaxValue = MaxML, matchlengthLog;
-    short litlengthNCount[MaxLL+1];
-    unsigned litlengthMaxValue = MaxLL, litlengthLog;
-
-    hSize = HUFv05_readDTableX4(dctx->hufTableX4, dict, dictSize);
-    if (HUFv05_isError(hSize)) return ERROR(dictionary_corrupted);
-    dict = (const char*)dict + hSize;
-    dictSize -= hSize;
-
-    offcodeHeaderSize = FSEv05_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dict, dictSize);
-    if (FSEv05_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
-    if (offcodeLog > OffFSEv05Log) return ERROR(dictionary_corrupted);
-    errorCode = FSEv05_buildDTable(dctx->OffTable, offcodeNCount, offcodeMaxValue, offcodeLog);
-    if (FSEv05_isError(errorCode)) return ERROR(dictionary_corrupted);
-    dict = (const char*)dict + offcodeHeaderSize;
-    dictSize -= offcodeHeaderSize;
-
-    matchlengthHeaderSize = FSEv05_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dict, dictSize);
-    if (FSEv05_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
-    if (matchlengthLog > MLFSEv05Log) return ERROR(dictionary_corrupted);
-    errorCode = FSEv05_buildDTable(dctx->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog);
-    if (FSEv05_isError(errorCode)) return ERROR(dictionary_corrupted);
-    dict = (const char*)dict + matchlengthHeaderSize;
-    dictSize -= matchlengthHeaderSize;
-
-    litlengthHeaderSize = FSEv05_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dict, dictSize);
-    if (litlengthLog > LLFSEv05Log) return ERROR(dictionary_corrupted);
-    if (FSEv05_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
-    errorCode = FSEv05_buildDTable(dctx->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog);
-    if (FSEv05_isError(errorCode)) return ERROR(dictionary_corrupted);
-
-    dctx->flagStaticTables = 1;
-    return hSize + offcodeHeaderSize + matchlengthHeaderSize + litlengthHeaderSize;
-}
-
-static size_t ZSTDv05_decompress_insertDictionary(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize)
-{
-    size_t eSize;
-    U32 magic = MEM_readLE32(dict);
-    if (magic != ZSTDv05_DICT_MAGIC) {
-        /* pure content mode */
-        ZSTDv05_refDictContent(dctx, dict, dictSize);
-        return 0;
-    }
-    /* load entropy tables */
-    dict = (const char*)dict + 4;
-    dictSize -= 4;
-    eSize = ZSTDv05_loadEntropy(dctx, dict, dictSize);
-    if (ZSTDv05_isError(eSize)) return ERROR(dictionary_corrupted);
-
-    /* reference dictionary content */
-    dict = (const char*)dict + eSize;
-    dictSize -= eSize;
-    ZSTDv05_refDictContent(dctx, dict, dictSize);
-
-    return 0;
-}
-
-
-size_t ZSTDv05_decompressBegin_usingDict(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize)
-{
-    size_t errorCode;
-    errorCode = ZSTDv05_decompressBegin(dctx);
-    if (ZSTDv05_isError(errorCode)) return errorCode;
-
-    if (dict && dictSize) {
-        errorCode = ZSTDv05_decompress_insertDictionary(dctx, dict, dictSize);
-        if (ZSTDv05_isError(errorCode)) return ERROR(dictionary_corrupted);
-    }
-
-    return 0;
-}
-
-/*
-    Buffered version of Zstd compression library
-    Copyright (C) 2015-2016, Yann Collet.
-
-    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are
-    met:
-    * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-    copyright notice, this list of conditions and the following disclaimer
-    in the documentation and/or other materials provided with the
-    distribution.
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - zstd source repository : https://github.com/Cyan4973/zstd
-    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
-*/
-
-/* The objects defined into this file should be considered experimental.
- * They are not labelled stable, as their prototype may change in the future.
- * You can use them for tests, provide feedback, or if you can endure risk of future changes.
- */
-
-
-
-/* *************************************
-*  Constants
-***************************************/
-static size_t ZBUFFv05_blockHeaderSize = 3;
-
-
-
-/* *** Compression *** */
-
-static size_t ZBUFFv05_limitCopy(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
-    size_t length = MIN(maxDstSize, srcSize);
-    memcpy(dst, src, length);
-    return length;
-}
-
-
-
-
-/** ************************************************
-*  Streaming decompression
-*
-*  A ZBUFFv05_DCtx object is required to track streaming operation.
-*  Use ZBUFFv05_createDCtx() and ZBUFFv05_freeDCtx() to create/release resources.
-*  Use ZBUFFv05_decompressInit() to start a new decompression operation.
-*  ZBUFFv05_DCtx objects can be reused multiple times.
-*
-*  Use ZBUFFv05_decompressContinue() repetitively to consume your input.
-*  *srcSizePtr and *maxDstSizePtr can be any size.
-*  The function will report how many bytes were read or written by modifying *srcSizePtr and *maxDstSizePtr.
-*  Note that it may not consume the entire input, in which case it's up to the caller to call again the function with remaining input.
-*  The content of dst will be overwritten (up to *maxDstSizePtr) at each function call, so save its content if it matters or change dst .
-*  return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency)
-*            or 0 when a frame is completely decoded
-*            or an error code, which can be tested using ZBUFFv05_isError().
-*
-*  Hint : recommended buffer sizes (not compulsory)
-*  output : 128 KB block size is the internal unit, it ensures it's always possible to write a full block when it's decoded.
-*  input : just follow indications from ZBUFFv05_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
-* **************************************************/
-
-typedef enum { ZBUFFv05ds_init, ZBUFFv05ds_readHeader, ZBUFFv05ds_loadHeader, ZBUFFv05ds_decodeHeader,
-               ZBUFFv05ds_read, ZBUFFv05ds_load, ZBUFFv05ds_flush } ZBUFFv05_dStage;
-
-/* *** Resource management *** */
-
-#define ZSTDv05_frameHeaderSize_max 5   /* too magical, should come from reference */
-struct ZBUFFv05_DCtx_s {
-    ZSTDv05_DCtx* zc;
-    ZSTDv05_parameters params;
-    char* inBuff;
-    size_t inBuffSize;
-    size_t inPos;
-    char* outBuff;
-    size_t outBuffSize;
-    size_t outStart;
-    size_t outEnd;
-    size_t hPos;
-    ZBUFFv05_dStage stage;
-    unsigned char headerBuffer[ZSTDv05_frameHeaderSize_max];
-};   /* typedef'd to ZBUFFv05_DCtx within "zstd_buffered.h" */
-
-
-ZBUFFv05_DCtx* ZBUFFv05_createDCtx(void)
-{
-    ZBUFFv05_DCtx* zbc = (ZBUFFv05_DCtx*)malloc(sizeof(ZBUFFv05_DCtx));
-    if (zbc==NULL) return NULL;
-    memset(zbc, 0, sizeof(*zbc));
-    zbc->zc = ZSTDv05_createDCtx();
-    zbc->stage = ZBUFFv05ds_init;
-    return zbc;
-}
-
-size_t ZBUFFv05_freeDCtx(ZBUFFv05_DCtx* zbc)
-{
-    if (zbc==NULL) return 0;   /* support free on null */
-    ZSTDv05_freeDCtx(zbc->zc);
-    free(zbc->inBuff);
-    free(zbc->outBuff);
-    free(zbc);
-    return 0;
-}
-
-
-/* *** Initialization *** */
-
-size_t ZBUFFv05_decompressInitDictionary(ZBUFFv05_DCtx* zbc, const void* dict, size_t dictSize)
-{
-    zbc->stage = ZBUFFv05ds_readHeader;
-    zbc->hPos = zbc->inPos = zbc->outStart = zbc->outEnd = 0;
-    return ZSTDv05_decompressBegin_usingDict(zbc->zc, dict, dictSize);
-}
-
-size_t ZBUFFv05_decompressInit(ZBUFFv05_DCtx* zbc)
-{
-    return ZBUFFv05_decompressInitDictionary(zbc, NULL, 0);
-}
-
-
-/* *** Decompression *** */
-
-size_t ZBUFFv05_decompressContinue(ZBUFFv05_DCtx* zbc, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr)
-{
-    const char* const istart = (const char*)src;
-    const char* ip = istart;
-    const char* const iend = istart + *srcSizePtr;
-    char* const ostart = (char*)dst;
-    char* op = ostart;
-    char* const oend = ostart + *maxDstSizePtr;
-    U32 notDone = 1;
-
-    while (notDone) {
-        switch(zbc->stage)
-        {
-        case ZBUFFv05ds_init :
-            return ERROR(init_missing);
-
-        case ZBUFFv05ds_readHeader :
-            /* read header from src */
-            {
-                size_t headerSize = ZSTDv05_getFrameParams(&(zbc->params), src, *srcSizePtr);
-                if (ZSTDv05_isError(headerSize)) return headerSize;
-                if (headerSize) {
-                    /* not enough input to decode header : tell how many bytes would be necessary */
-                    memcpy(zbc->headerBuffer+zbc->hPos, src, *srcSizePtr);
-                    zbc->hPos += *srcSizePtr;
-                    *maxDstSizePtr = 0;
-                    zbc->stage = ZBUFFv05ds_loadHeader;
-                    return headerSize - zbc->hPos;
-                }
-                zbc->stage = ZBUFFv05ds_decodeHeader;
-                break;
-            }
-	    /* fall-through */
-        case ZBUFFv05ds_loadHeader:
-            /* complete header from src */
-            {
-                size_t headerSize = ZBUFFv05_limitCopy(
-                    zbc->headerBuffer + zbc->hPos, ZSTDv05_frameHeaderSize_max - zbc->hPos,
-                    src, *srcSizePtr);
-                zbc->hPos += headerSize;
-                ip += headerSize;
-                headerSize = ZSTDv05_getFrameParams(&(zbc->params), zbc->headerBuffer, zbc->hPos);
-                if (ZSTDv05_isError(headerSize)) return headerSize;
-                if (headerSize) {
-                    /* not enough input to decode header : tell how many bytes would be necessary */
-                    *maxDstSizePtr = 0;
-                    return headerSize - zbc->hPos;
-                }
-                // zbc->stage = ZBUFFv05ds_decodeHeader; break;   /* useless : stage follows */
-            }
-	    /* fall-through */
-        case ZBUFFv05ds_decodeHeader:
-                /* apply header to create / resize buffers */
-                {
-                    size_t neededOutSize = (size_t)1 << zbc->params.windowLog;
-                    size_t neededInSize = BLOCKSIZE;   /* a block is never > BLOCKSIZE */
-                    if (zbc->inBuffSize < neededInSize) {
-                        free(zbc->inBuff);
-                        zbc->inBuffSize = neededInSize;
-                        zbc->inBuff = (char*)malloc(neededInSize);
-                        if (zbc->inBuff == NULL) return ERROR(memory_allocation);
-                    }
-                    if (zbc->outBuffSize < neededOutSize) {
-                        free(zbc->outBuff);
-                        zbc->outBuffSize = neededOutSize;
-                        zbc->outBuff = (char*)malloc(neededOutSize);
-                        if (zbc->outBuff == NULL) return ERROR(memory_allocation);
-                }   }
-                if (zbc->hPos) {
-                    /* some data already loaded into headerBuffer : transfer into inBuff */
-                    memcpy(zbc->inBuff, zbc->headerBuffer, zbc->hPos);
-                    zbc->inPos = zbc->hPos;
-                    zbc->hPos = 0;
-                    zbc->stage = ZBUFFv05ds_load;
-                    break;
-                }
-                zbc->stage = ZBUFFv05ds_read;
-		/* fall-through */
-        case ZBUFFv05ds_read:
-            {
-                size_t neededInSize = ZSTDv05_nextSrcSizeToDecompress(zbc->zc);
-                if (neededInSize==0) {  /* end of frame */
-                    zbc->stage = ZBUFFv05ds_init;
-                    notDone = 0;
-                    break;
-                }
-                if ((size_t)(iend-ip) >= neededInSize) {
-                    /* directly decode from src */
-                    size_t decodedSize = ZSTDv05_decompressContinue(zbc->zc,
-                        zbc->outBuff + zbc->outStart, zbc->outBuffSize - zbc->outStart,
-                        ip, neededInSize);
-                    if (ZSTDv05_isError(decodedSize)) return decodedSize;
-                    ip += neededInSize;
-                    if (!decodedSize) break;   /* this was just a header */
-                    zbc->outEnd = zbc->outStart +  decodedSize;
-                    zbc->stage = ZBUFFv05ds_flush;
-                    break;
-                }
-                if (ip==iend) { notDone = 0; break; }   /* no more input */
-                zbc->stage = ZBUFFv05ds_load;
-            }
-	    /* fall-through */
-        case ZBUFFv05ds_load:
-            {
-                size_t neededInSize = ZSTDv05_nextSrcSizeToDecompress(zbc->zc);
-                size_t toLoad = neededInSize - zbc->inPos;   /* should always be <= remaining space within inBuff */
-                size_t loadedSize;
-                if (toLoad > zbc->inBuffSize - zbc->inPos) return ERROR(corruption_detected);   /* should never happen */
-                loadedSize = ZBUFFv05_limitCopy(zbc->inBuff + zbc->inPos, toLoad, ip, iend-ip);
-                ip += loadedSize;
-                zbc->inPos += loadedSize;
-                if (loadedSize < toLoad) { notDone = 0; break; }   /* not enough input, wait for more */
-                {
-                    size_t decodedSize = ZSTDv05_decompressContinue(zbc->zc,
-                        zbc->outBuff + zbc->outStart, zbc->outBuffSize - zbc->outStart,
-                        zbc->inBuff, neededInSize);
-                    if (ZSTDv05_isError(decodedSize)) return decodedSize;
-                    zbc->inPos = 0;   /* input is consumed */
-                    if (!decodedSize) { zbc->stage = ZBUFFv05ds_read; break; }   /* this was just a header */
-                    zbc->outEnd = zbc->outStart +  decodedSize;
-                    zbc->stage = ZBUFFv05ds_flush;
-                    // break; /* ZBUFFv05ds_flush follows */
-                }
-	    }
-	    /* fall-through */
-        case ZBUFFv05ds_flush:
-            {
-                size_t toFlushSize = zbc->outEnd - zbc->outStart;
-                size_t flushedSize = ZBUFFv05_limitCopy(op, oend-op, zbc->outBuff + zbc->outStart, toFlushSize);
-                op += flushedSize;
-                zbc->outStart += flushedSize;
-                if (flushedSize == toFlushSize) {
-                    zbc->stage = ZBUFFv05ds_read;
-                    if (zbc->outStart + BLOCKSIZE > zbc->outBuffSize)
-                        zbc->outStart = zbc->outEnd = 0;
-                    break;
-                }
-                /* cannot flush everything */
-                notDone = 0;
-                break;
-            }
-        default: return ERROR(GENERIC);   /* impossible */
-    }   }
-
-    *srcSizePtr = ip-istart;
-    *maxDstSizePtr = op-ostart;
-
-    {   size_t nextSrcSizeHint = ZSTDv05_nextSrcSizeToDecompress(zbc->zc);
-        if (nextSrcSizeHint > ZBUFFv05_blockHeaderSize) nextSrcSizeHint+= ZBUFFv05_blockHeaderSize;   /* get next block header too */
-        nextSrcSizeHint -= zbc->inPos;   /* already loaded*/
-        return nextSrcSizeHint;
-    }
-}
-
-
-
-/* *************************************
-*  Tool functions
-***************************************/
-unsigned ZBUFFv05_isError(size_t errorCode) { return ERR_isError(errorCode); }
-const char* ZBUFFv05_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
-
-size_t ZBUFFv05_recommendedDInSize(void)  { return BLOCKSIZE + ZBUFFv05_blockHeaderSize /* block header size*/ ; }
-size_t ZBUFFv05_recommendedDOutSize(void) { return BLOCKSIZE; }
diff --git a/vendor/github.com/DataDog/zstd/zstd_v05.h b/vendor/github.com/DataDog/zstd/zstd_v05.h
deleted file mode 100644
index 4a97985..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_v05.h
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef ZSTDv05_H
-#define ZSTDv05_H
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/*-*************************************
-*  Dependencies
-***************************************/
-#include <stddef.h>   /* size_t */
-#include "mem.h"      /* U64, U32 */
-
-
-/* *************************************
-*  Simple functions
-***************************************/
-/*! ZSTDv05_decompress() :
-    `compressedSize` : is the _exact_ size of the compressed blob, otherwise decompression will fail.
-    `dstCapacity` must be large enough, equal or larger than originalSize.
-    @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
-              or an errorCode if it fails (which can be tested using ZSTDv05_isError()) */
-size_t ZSTDv05_decompress( void* dst, size_t dstCapacity,
-                     const void* src, size_t compressedSize);
-
- /**
- ZSTDv05_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.5.x format
-     srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
-     cSize (output parameter)  : the number of bytes that would be read to decompress this frame
-                                 or an error code if it fails (which can be tested using ZSTDv01_isError())
-     dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
-                                 or ZSTD_CONTENTSIZE_ERROR if an error occurs
-
-    note : assumes `cSize` and `dBound` are _not_ NULL.
- */
-void ZSTDv05_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
-                                     size_t* cSize, unsigned long long* dBound);
-
-/* *************************************
-*  Helper functions
-***************************************/
-/* Error Management */
-unsigned    ZSTDv05_isError(size_t code);          /*!< tells if a `size_t` function result is an error code */
-const char* ZSTDv05_getErrorName(size_t code);     /*!< provides readable string for an error code */
-
-
-/* *************************************
-*  Explicit memory management
-***************************************/
-/** Decompression context */
-typedef struct ZSTDv05_DCtx_s ZSTDv05_DCtx;
-ZSTDv05_DCtx* ZSTDv05_createDCtx(void);
-size_t ZSTDv05_freeDCtx(ZSTDv05_DCtx* dctx);      /*!< @return : errorCode */
-
-/** ZSTDv05_decompressDCtx() :
-*   Same as ZSTDv05_decompress(), but requires an already allocated ZSTDv05_DCtx (see ZSTDv05_createDCtx()) */
-size_t ZSTDv05_decompressDCtx(ZSTDv05_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
-
-
-/*-***********************
-*  Simple Dictionary API
-*************************/
-/*! ZSTDv05_decompress_usingDict() :
-*   Decompression using a pre-defined Dictionary content (see dictBuilder).
-*   Dictionary must be identical to the one used during compression, otherwise regenerated data will be corrupted.
-*   Note : dict can be NULL, in which case, it's equivalent to ZSTDv05_decompressDCtx() */
-size_t ZSTDv05_decompress_usingDict(ZSTDv05_DCtx* dctx,
-                                            void* dst, size_t dstCapacity,
-                                      const void* src, size_t srcSize,
-                                      const void* dict,size_t dictSize);
-
-/*-************************
-*  Advanced Streaming API
-***************************/
-typedef enum { ZSTDv05_fast, ZSTDv05_greedy, ZSTDv05_lazy, ZSTDv05_lazy2, ZSTDv05_btlazy2, ZSTDv05_opt, ZSTDv05_btopt } ZSTDv05_strategy;
-typedef struct {
-    U64 srcSize;
-    U32 windowLog;     /* the only useful information to retrieve */
-    U32 contentLog; U32 hashLog; U32 searchLog; U32 searchLength; U32 targetLength; ZSTDv05_strategy strategy;
-} ZSTDv05_parameters;
-size_t ZSTDv05_getFrameParams(ZSTDv05_parameters* params, const void* src, size_t srcSize);
-
-size_t ZSTDv05_decompressBegin_usingDict(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize);
-void   ZSTDv05_copyDCtx(ZSTDv05_DCtx* dstDCtx, const ZSTDv05_DCtx* srcDCtx);
-size_t ZSTDv05_nextSrcSizeToDecompress(ZSTDv05_DCtx* dctx);
-size_t ZSTDv05_decompressContinue(ZSTDv05_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
-
-
-/*-***********************
-*  ZBUFF API
-*************************/
-typedef struct ZBUFFv05_DCtx_s ZBUFFv05_DCtx;
-ZBUFFv05_DCtx* ZBUFFv05_createDCtx(void);
-size_t         ZBUFFv05_freeDCtx(ZBUFFv05_DCtx* dctx);
-
-size_t ZBUFFv05_decompressInit(ZBUFFv05_DCtx* dctx);
-size_t ZBUFFv05_decompressInitDictionary(ZBUFFv05_DCtx* dctx, const void* dict, size_t dictSize);
-
-size_t ZBUFFv05_decompressContinue(ZBUFFv05_DCtx* dctx,
-                                            void* dst, size_t* dstCapacityPtr,
-                                      const void* src, size_t* srcSizePtr);
-
-/*-***************************************************************************
-*  Streaming decompression
-*
-*  A ZBUFFv05_DCtx object is required to track streaming operations.
-*  Use ZBUFFv05_createDCtx() and ZBUFFv05_freeDCtx() to create/release resources.
-*  Use ZBUFFv05_decompressInit() to start a new decompression operation,
-*   or ZBUFFv05_decompressInitDictionary() if decompression requires a dictionary.
-*  Note that ZBUFFv05_DCtx objects can be reused multiple times.
-*
-*  Use ZBUFFv05_decompressContinue() repetitively to consume your input.
-*  *srcSizePtr and *dstCapacityPtr can be any size.
-*  The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
-*  Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
-*  The content of @dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters or change @dst.
-*  @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency)
-*            or 0 when a frame is completely decoded
-*            or an error code, which can be tested using ZBUFFv05_isError().
-*
-*  Hint : recommended buffer sizes (not compulsory) : ZBUFFv05_recommendedDInSize() / ZBUFFv05_recommendedDOutSize()
-*  output : ZBUFFv05_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
-*  input  : ZBUFFv05_recommendedDInSize==128Kb+3; just follow indications from ZBUFFv05_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
-* *******************************************************************************/
-
-
-/* *************************************
-*  Tool functions
-***************************************/
-unsigned ZBUFFv05_isError(size_t errorCode);
-const char* ZBUFFv05_getErrorName(size_t errorCode);
-
-/** Functions below provide recommended buffer sizes for Compression or Decompression operations.
-*   These sizes are just hints, and tend to offer better latency */
-size_t ZBUFFv05_recommendedDInSize(void);
-size_t ZBUFFv05_recommendedDOutSize(void);
-
-
-
-/*-*************************************
-*  Constants
-***************************************/
-#define ZSTDv05_MAGICNUMBER 0xFD2FB525   /* v0.5 */
-
-
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif  /* ZSTDv0505_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_v06.c b/vendor/github.com/DataDog/zstd/zstd_v06.c
deleted file mode 100644
index f907a3a..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_v06.c
+++ /dev/null
@@ -1,4150 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-
-/*- Dependencies -*/
-#include "zstd_v06.h"
-#include <stddef.h>    /* size_t, ptrdiff_t */
-#include <string.h>    /* memcpy */
-#include <stdlib.h>    /* malloc, free, qsort */
-#include "error_private.h"
-
-
-
-/* ******************************************************************
-   mem.h
-   low-level memory access routines
-   Copyright (C) 2013-2015, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
-    - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-#ifndef MEM_H_MODULE
-#define MEM_H_MODULE
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-/*-****************************************
-*  Compiler specifics
-******************************************/
-#if defined(_MSC_VER)   /* Visual Studio */
-#   include <stdlib.h>  /* _byteswap_ulong */
-#   include <intrin.h>  /* _byteswap_* */
-#endif
-#if defined(__GNUC__)
-#  define MEM_STATIC static __attribute__((unused))
-#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-#  define MEM_STATIC static inline
-#elif defined(_MSC_VER)
-#  define MEM_STATIC static __inline
-#else
-#  define MEM_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */
-#endif
-
-
-/*-**************************************************************
-*  Basic Types
-*****************************************************************/
-#if  !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
-# include <stdint.h>
-  typedef  uint8_t BYTE;
-  typedef uint16_t U16;
-  typedef  int16_t S16;
-  typedef uint32_t U32;
-  typedef  int32_t S32;
-  typedef uint64_t U64;
-  typedef  int64_t S64;
-#else
-  typedef unsigned char       BYTE;
-  typedef unsigned short      U16;
-  typedef   signed short      S16;
-  typedef unsigned int        U32;
-  typedef   signed int        S32;
-  typedef unsigned long long  U64;
-  typedef   signed long long  S64;
-#endif
-
-
-/*-**************************************************************
-*  Memory I/O
-*****************************************************************/
-/* MEM_FORCE_MEMORY_ACCESS :
- * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
- * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
- * The below switch allow to select different access method for improved performance.
- * Method 0 (default) : use `memcpy()`. Safe and portable.
- * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
- *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
- * Method 2 : direct access. This method is portable but violate C standard.
- *            It can generate buggy code on targets depending on alignment.
- *            In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
- * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
- * Prefer these methods in priority order (0 > 1 > 2)
- */
-#ifndef MEM_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
-#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
-#    define MEM_FORCE_MEMORY_ACCESS 2
-#  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
-  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
-#    define MEM_FORCE_MEMORY_ACCESS 1
-#  endif
-#endif
-
-MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; }
-MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }
-
-MEM_STATIC unsigned MEM_isLittleEndian(void)
-{
-    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */
-    return one.c[0];
-}
-
-#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
-
-/* violates C standard, by lying on structure alignment.
-Only use if no other choice to achieve best performance on target platform */
-MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
-MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
-MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
-
-MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
-
-#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
-
-/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
-/* currently only defined for gcc and icc */
-typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign;
-
-MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
-MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
-MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
-
-MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
-
-#else
-
-/* default method, safe and standard.
-   can sometimes prove slower */
-
-MEM_STATIC U16 MEM_read16(const void* memPtr)
-{
-    U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-MEM_STATIC U32 MEM_read32(const void* memPtr)
-{
-    U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-MEM_STATIC U64 MEM_read64(const void* memPtr)
-{
-    U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-MEM_STATIC void MEM_write16(void* memPtr, U16 value)
-{
-    memcpy(memPtr, &value, sizeof(value));
-}
-
-
-#endif /* MEM_FORCE_MEMORY_ACCESS */
-
-MEM_STATIC U32 MEM_swap32(U32 in)
-{
-#if defined(_MSC_VER)     /* Visual Studio */
-    return _byteswap_ulong(in);
-#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
-    return __builtin_bswap32(in);
-#else
-    return  ((in << 24) & 0xff000000 ) |
-            ((in <<  8) & 0x00ff0000 ) |
-            ((in >>  8) & 0x0000ff00 ) |
-            ((in >> 24) & 0x000000ff );
-#endif
-}
-
-MEM_STATIC U64 MEM_swap64(U64 in)
-{
-#if defined(_MSC_VER)     /* Visual Studio */
-    return _byteswap_uint64(in);
-#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
-    return __builtin_bswap64(in);
-#else
-    return  ((in << 56) & 0xff00000000000000ULL) |
-            ((in << 40) & 0x00ff000000000000ULL) |
-            ((in << 24) & 0x0000ff0000000000ULL) |
-            ((in << 8)  & 0x000000ff00000000ULL) |
-            ((in >> 8)  & 0x00000000ff000000ULL) |
-            ((in >> 24) & 0x0000000000ff0000ULL) |
-            ((in >> 40) & 0x000000000000ff00ULL) |
-            ((in >> 56) & 0x00000000000000ffULL);
-#endif
-}
-
-
-/*=== Little endian r/w ===*/
-
-MEM_STATIC U16 MEM_readLE16(const void* memPtr)
-{
-    if (MEM_isLittleEndian())
-        return MEM_read16(memPtr);
-    else {
-        const BYTE* p = (const BYTE*)memPtr;
-        return (U16)(p[0] + (p[1]<<8));
-    }
-}
-
-MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
-{
-    if (MEM_isLittleEndian()) {
-        MEM_write16(memPtr, val);
-    } else {
-        BYTE* p = (BYTE*)memPtr;
-        p[0] = (BYTE)val;
-        p[1] = (BYTE)(val>>8);
-    }
-}
-
-MEM_STATIC U32 MEM_readLE32(const void* memPtr)
-{
-    if (MEM_isLittleEndian())
-        return MEM_read32(memPtr);
-    else
-        return MEM_swap32(MEM_read32(memPtr));
-}
-
-
-MEM_STATIC U64 MEM_readLE64(const void* memPtr)
-{
-    if (MEM_isLittleEndian())
-        return MEM_read64(memPtr);
-    else
-        return MEM_swap64(MEM_read64(memPtr));
-}
-
-
-MEM_STATIC size_t MEM_readLEST(const void* memPtr)
-{
-    if (MEM_32bits())
-        return (size_t)MEM_readLE32(memPtr);
-    else
-        return (size_t)MEM_readLE64(memPtr);
-}
-
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* MEM_H_MODULE */
-
-/*
-    zstd - standard compression library
-    Header File for static linking only
-    Copyright (C) 2014-2016, Yann Collet.
-
-    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are
-    met:
-    * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-    copyright notice, this list of conditions and the following disclaimer
-    in the documentation and/or other materials provided with the
-    distribution.
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - zstd homepage : http://www.zstd.net
-*/
-#ifndef ZSTDv06_STATIC_H
-#define ZSTDv06_STATIC_H
-
-/* The prototypes defined within this file are considered experimental.
- * They should not be used in the context DLL as they may change in the future.
- * Prefer static linking if you need them, to control breaking version changes issues.
- */
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-
-/*- Advanced Decompression functions -*/
-
-/*! ZSTDv06_decompress_usingPreparedDCtx() :
-*   Same as ZSTDv06_decompress_usingDict, but using a reference context `preparedDCtx`, where dictionary has been loaded.
-*   It avoids reloading the dictionary each time.
-*   `preparedDCtx` must have been properly initialized using ZSTDv06_decompressBegin_usingDict().
-*   Requires 2 contexts : 1 for reference (preparedDCtx), which will not be modified, and 1 to run the decompression operation (dctx) */
-ZSTDLIBv06_API size_t ZSTDv06_decompress_usingPreparedDCtx(
-                                           ZSTDv06_DCtx* dctx, const ZSTDv06_DCtx* preparedDCtx,
-                                           void* dst, size_t dstCapacity,
-                                     const void* src, size_t srcSize);
-
-
-
-#define ZSTDv06_FRAMEHEADERSIZE_MAX 13    /* for static allocation */
-static const size_t ZSTDv06_frameHeaderSize_min = 5;
-static const size_t ZSTDv06_frameHeaderSize_max = ZSTDv06_FRAMEHEADERSIZE_MAX;
-
-ZSTDLIBv06_API size_t ZSTDv06_decompressBegin(ZSTDv06_DCtx* dctx);
-
-/*
-  Streaming decompression, direct mode (bufferless)
-
-  A ZSTDv06_DCtx object is required to track streaming operations.
-  Use ZSTDv06_createDCtx() / ZSTDv06_freeDCtx() to manage it.
-  A ZSTDv06_DCtx object can be re-used multiple times.
-
-  First optional operation is to retrieve frame parameters, using ZSTDv06_getFrameParams(), which doesn't consume the input.
-  It can provide the minimum size of rolling buffer required to properly decompress data,
-  and optionally the final size of uncompressed content.
-  (Note : content size is an optional info that may not be present. 0 means : content size unknown)
-  Frame parameters are extracted from the beginning of compressed frame.
-  The amount of data to read is variable, from ZSTDv06_frameHeaderSize_min to ZSTDv06_frameHeaderSize_max (so if `srcSize` >= ZSTDv06_frameHeaderSize_max, it will always work)
-  If `srcSize` is too small for operation to succeed, function will return the minimum size it requires to produce a result.
-  Result : 0 when successful, it means the ZSTDv06_frameParams structure has been filled.
-          >0 : means there is not enough data into `src`. Provides the expected size to successfully decode header.
-           errorCode, which can be tested using ZSTDv06_isError()
-
-  Start decompression, with ZSTDv06_decompressBegin() or ZSTDv06_decompressBegin_usingDict().
-  Alternatively, you can copy a prepared context, using ZSTDv06_copyDCtx().
-
-  Then use ZSTDv06_nextSrcSizeToDecompress() and ZSTDv06_decompressContinue() alternatively.
-  ZSTDv06_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTDv06_decompressContinue().
-  ZSTDv06_decompressContinue() requires this exact amount of bytes, or it will fail.
-  ZSTDv06_decompressContinue() needs previous data blocks during decompression, up to (1 << windowlog).
-  They should preferably be located contiguously, prior to current block. Alternatively, a round buffer is also possible.
-
-  @result of ZSTDv06_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity)
-  It can be zero, which is not an error; it just means ZSTDv06_decompressContinue() has decoded some header.
-
-  A frame is fully decoded when ZSTDv06_nextSrcSizeToDecompress() returns zero.
-  Context can then be reset to start a new decompression.
-*/
-
-
-/* **************************************
-*  Block functions
-****************************************/
-/*! Block functions produce and decode raw zstd blocks, without frame metadata.
-    User will have to take in charge required information to regenerate data, such as compressed and content sizes.
-
-    A few rules to respect :
-    - Uncompressed block size must be <= ZSTDv06_BLOCKSIZE_MAX (128 KB)
-    - Compressing or decompressing requires a context structure
-      + Use ZSTDv06_createCCtx() and ZSTDv06_createDCtx()
-    - It is necessary to init context before starting
-      + compression : ZSTDv06_compressBegin()
-      + decompression : ZSTDv06_decompressBegin()
-      + variants _usingDict() are also allowed
-      + copyCCtx() and copyDCtx() work too
-    - When a block is considered not compressible enough, ZSTDv06_compressBlock() result will be zero.
-      In which case, nothing is produced into `dst`.
-      + User must test for such outcome and deal directly with uncompressed data
-      + ZSTDv06_decompressBlock() doesn't accept uncompressed data as input !!
-*/
-
-#define ZSTDv06_BLOCKSIZE_MAX (128 * 1024)   /* define, for static allocation */
-ZSTDLIBv06_API size_t ZSTDv06_decompressBlock(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
-
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif  /* ZSTDv06_STATIC_H */
-/*
-    zstd_internal - common functions to include
-    Header File for include
-    Copyright (C) 2014-2016, Yann Collet.
-
-    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are
-    met:
-    * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-    copyright notice, this list of conditions and the following disclaimer
-    in the documentation and/or other materials provided with the
-    distribution.
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - zstd homepage : https://www.zstd.net
-*/
-#ifndef ZSTDv06_CCOMMON_H_MODULE
-#define ZSTDv06_CCOMMON_H_MODULE
-
-
-/*-*************************************
-*  Common macros
-***************************************/
-#define MIN(a,b) ((a)<(b) ? (a) : (b))
-#define MAX(a,b) ((a)>(b) ? (a) : (b))
-
-
-/*-*************************************
-*  Common constants
-***************************************/
-#define ZSTDv06_DICT_MAGIC  0xEC30A436
-
-#define ZSTDv06_REP_NUM    3
-#define ZSTDv06_REP_INIT   ZSTDv06_REP_NUM
-#define ZSTDv06_REP_MOVE   (ZSTDv06_REP_NUM-1)
-
-#define KB *(1 <<10)
-#define MB *(1 <<20)
-#define GB *(1U<<30)
-
-#define BIT7 128
-#define BIT6  64
-#define BIT5  32
-#define BIT4  16
-#define BIT1   2
-#define BIT0   1
-
-#define ZSTDv06_WINDOWLOG_ABSOLUTEMIN 12
-static const size_t ZSTDv06_fcs_fieldSize[4] = { 0, 1, 2, 8 };
-
-#define ZSTDv06_BLOCKHEADERSIZE 3   /* because C standard does not allow a static const value to be defined using another static const value .... :( */
-static const size_t ZSTDv06_blockHeaderSize = ZSTDv06_BLOCKHEADERSIZE;
-typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
-
-#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
-#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */)   /* for a non-null block */
-
-#define HufLog 12
-
-#define IS_HUF 0
-#define IS_PCH 1
-#define IS_RAW 2
-#define IS_RLE 3
-
-#define LONGNBSEQ 0x7F00
-
-#define MINMATCH 3
-#define EQUAL_READ32 4
-#define REPCODE_STARTVALUE 1
-
-#define Litbits  8
-#define MaxLit ((1<<Litbits) - 1)
-#define MaxML  52
-#define MaxLL  35
-#define MaxOff 28
-#define MaxSeq MAX(MaxLL, MaxML)   /* Assumption : MaxOff < MaxLL,MaxML */
-#define MLFSELog    9
-#define LLFSELog    9
-#define OffFSELog   8
-
-#define FSEv06_ENCODING_RAW     0
-#define FSEv06_ENCODING_RLE     1
-#define FSEv06_ENCODING_STATIC  2
-#define FSEv06_ENCODING_DYNAMIC 3
-
-#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
-
-static const U32 LL_bits[MaxLL+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                                      1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9,10,11,12,
-                                     13,14,15,16 };
-static const S16 LL_defaultNorm[MaxLL+1] = { 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
-                                             2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
-                                            -1,-1,-1,-1 };
-static const U32 LL_defaultNormLog = 6;
-
-static const U32 ML_bits[MaxML+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                                      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                                      1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9,10,11,
-                                     12,13,14,15,16 };
-static const S16 ML_defaultNorm[MaxML+1] = { 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
-                                             1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-                                             1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,
-                                            -1,-1,-1,-1,-1 };
-static const U32 ML_defaultNormLog = 6;
-
-static const S16 OF_defaultNorm[MaxOff+1] = { 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
-                                              1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1 };
-static const U32 OF_defaultNormLog = 5;
-
-
-/*-*******************************************
-*  Shared functions to include for inlining
-*********************************************/
-static void ZSTDv06_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
-#define COPY8(d,s) { ZSTDv06_copy8(d,s); d+=8; s+=8; }
-
-/*! ZSTDv06_wildcopy() :
-*   custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
-#define WILDCOPY_OVERLENGTH 8
-MEM_STATIC void ZSTDv06_wildcopy(void* dst, const void* src, ptrdiff_t length)
-{
-    const BYTE* ip = (const BYTE*)src;
-    BYTE* op = (BYTE*)dst;
-    BYTE* const oend = op + length;
-    do
-        COPY8(op, ip)
-    while (op < oend);
-}
-
-
-
-/*-*******************************************
-*  Private interfaces
-*********************************************/
-typedef struct {
-    U32 off;
-    U32 len;
-} ZSTDv06_match_t;
-
-typedef struct {
-    U32 price;
-    U32 off;
-    U32 mlen;
-    U32 litlen;
-    U32 rep[ZSTDv06_REP_INIT];
-} ZSTDv06_optimal_t;
-
-typedef struct { U32  unused; } ZSTDv06_stats_t;
-
-typedef struct {
-    void* buffer;
-    U32*  offsetStart;
-    U32*  offset;
-    BYTE* offCodeStart;
-    BYTE* litStart;
-    BYTE* lit;
-    U16*  litLengthStart;
-    U16*  litLength;
-    BYTE* llCodeStart;
-    U16*  matchLengthStart;
-    U16*  matchLength;
-    BYTE* mlCodeStart;
-    U32   longLengthID;   /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
-    U32   longLengthPos;
-    /* opt */
-    ZSTDv06_optimal_t* priceTable;
-    ZSTDv06_match_t* matchTable;
-    U32* matchLengthFreq;
-    U32* litLengthFreq;
-    U32* litFreq;
-    U32* offCodeFreq;
-    U32  matchLengthSum;
-    U32  matchSum;
-    U32  litLengthSum;
-    U32  litSum;
-    U32  offCodeSum;
-    U32  log2matchLengthSum;
-    U32  log2matchSum;
-    U32  log2litLengthSum;
-    U32  log2litSum;
-    U32  log2offCodeSum;
-    U32  factor;
-    U32  cachedPrice;
-    U32  cachedLitLength;
-    const BYTE* cachedLiterals;
-    ZSTDv06_stats_t stats;
-} seqStore_t;
-
-void ZSTDv06_seqToCodes(const seqStore_t* seqStorePtr, size_t const nbSeq);
-
-
-#endif   /* ZSTDv06_CCOMMON_H_MODULE */
-/* ******************************************************************
-   FSE : Finite State Entropy codec
-   Public Prototypes declaration
-   Copyright (C) 2013-2016, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-****************************************************************** */
-#ifndef FSEv06_H
-#define FSEv06_H
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-
-/*-****************************************
-*  FSE simple functions
-******************************************/
-/*! FSEv06_decompress():
-    Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
-    into already allocated destination buffer 'dst', of size 'dstCapacity'.
-    @return : size of regenerated data (<= maxDstSize),
-              or an error code, which can be tested using FSEv06_isError() .
-
-    ** Important ** : FSEv06_decompress() does not decompress non-compressible nor RLE data !!!
-    Why ? : making this distinction requires a header.
-    Header management is intentionally delegated to the user layer, which can better manage special cases.
-*/
-size_t FSEv06_decompress(void* dst,  size_t dstCapacity,
-                const void* cSrc, size_t cSrcSize);
-
-
-/*-*****************************************
-*  Tool functions
-******************************************/
-size_t FSEv06_compressBound(size_t size);       /* maximum compressed size */
-
-/* Error Management */
-unsigned    FSEv06_isError(size_t code);        /* tells if a return value is an error code */
-const char* FSEv06_getErrorName(size_t code);   /* provides error code string (useful for debugging) */
-
-
-
-/*-*****************************************
-*  FSE detailed API
-******************************************/
-/*!
-
-FSEv06_decompress() does the following:
-1. read normalized counters with readNCount()
-2. build decoding table 'DTable' from normalized counters
-3. decode the data stream using decoding table 'DTable'
-
-The following API allows targeting specific sub-functions for advanced tasks.
-For example, it's possible to compress several blocks using the same 'CTable',
-or to save and provide normalized distribution using external method.
-*/
-
-
-/* *** DECOMPRESSION *** */
-
-/*! FSEv06_readNCount():
-    Read compactly saved 'normalizedCounter' from 'rBuffer'.
-    @return : size read from 'rBuffer',
-              or an errorCode, which can be tested using FSEv06_isError().
-              maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
-size_t FSEv06_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);
-
-/*! Constructor and Destructor of FSEv06_DTable.
-    Note that its size depends on 'tableLog' */
-typedef unsigned FSEv06_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */
-FSEv06_DTable* FSEv06_createDTable(unsigned tableLog);
-void        FSEv06_freeDTable(FSEv06_DTable* dt);
-
-/*! FSEv06_buildDTable():
-    Builds 'dt', which must be already allocated, using FSEv06_createDTable().
-    return : 0, or an errorCode, which can be tested using FSEv06_isError() */
-size_t FSEv06_buildDTable (FSEv06_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
-
-/*! FSEv06_decompress_usingDTable():
-    Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
-    into `dst` which must be already allocated.
-    @return : size of regenerated data (necessarily <= `dstCapacity`),
-              or an errorCode, which can be tested using FSEv06_isError() */
-size_t FSEv06_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSEv06_DTable* dt);
-
-/*!
-Tutorial :
-----------
-(Note : these functions only decompress FSE-compressed blocks.
- If block is uncompressed, use memcpy() instead
- If block is a single repeated byte, use memset() instead )
-
-The first step is to obtain the normalized frequencies of symbols.
-This can be performed by FSEv06_readNCount() if it was saved using FSEv06_writeNCount().
-'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
-In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
-or size the table to handle worst case situations (typically 256).
-FSEv06_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
-The result of FSEv06_readNCount() is the number of bytes read from 'rBuffer'.
-Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
-If there is an error, the function will return an error code, which can be tested using FSEv06_isError().
-
-The next step is to build the decompression tables 'FSEv06_DTable' from 'normalizedCounter'.
-This is performed by the function FSEv06_buildDTable().
-The space required by 'FSEv06_DTable' must be already allocated using FSEv06_createDTable().
-If there is an error, the function will return an error code, which can be tested using FSEv06_isError().
-
-`FSEv06_DTable` can then be used to decompress `cSrc`, with FSEv06_decompress_usingDTable().
-`cSrcSize` must be strictly correct, otherwise decompression will fail.
-FSEv06_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
-If there is an error, the function will return an error code, which can be tested using FSEv06_isError(). (ex: dst buffer too small)
-*/
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif  /* FSEv06_H */
-/* ******************************************************************
-   bitstream
-   Part of FSE library
-   header file (to include)
-   Copyright (C) 2013-2016, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-****************************************************************** */
-#ifndef BITSTREAM_H_MODULE
-#define BITSTREAM_H_MODULE
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-/*
-*  This API consists of small unitary functions, which must be inlined for best performance.
-*  Since link-time-optimization is not available for all compilers,
-*  these functions are defined into a .h to be included.
-*/
-
-
-/*=========================================
-*  Target specific
-=========================================*/
-#if defined(__BMI__) && defined(__GNUC__)
-#  include <immintrin.h>   /* support for bextr (experimental) */
-#endif
-
-
-
-/*-********************************************
-*  bitStream decoding API (read backward)
-**********************************************/
-typedef struct
-{
-    size_t   bitContainer;
-    unsigned bitsConsumed;
-    const char* ptr;
-    const char* start;
-} BITv06_DStream_t;
-
-typedef enum { BITv06_DStream_unfinished = 0,
-               BITv06_DStream_endOfBuffer = 1,
-               BITv06_DStream_completed = 2,
-               BITv06_DStream_overflow = 3 } BITv06_DStream_status;  /* result of BITv06_reloadDStream() */
-               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
-
-MEM_STATIC size_t   BITv06_initDStream(BITv06_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
-MEM_STATIC size_t   BITv06_readBits(BITv06_DStream_t* bitD, unsigned nbBits);
-MEM_STATIC BITv06_DStream_status BITv06_reloadDStream(BITv06_DStream_t* bitD);
-MEM_STATIC unsigned BITv06_endOfDStream(const BITv06_DStream_t* bitD);
-
-
-
-/*-****************************************
-*  unsafe API
-******************************************/
-MEM_STATIC size_t BITv06_readBitsFast(BITv06_DStream_t* bitD, unsigned nbBits);
-/* faster, but works only if nbBits >= 1 */
-
-
-
-/*-**************************************************************
-*  Internal functions
-****************************************************************/
-MEM_STATIC unsigned BITv06_highbit32 ( U32 val)
-{
-#   if defined(_MSC_VER)   /* Visual */
-    unsigned long r=0;
-    _BitScanReverse ( &r, val );
-    return (unsigned) r;
-#   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* Use GCC Intrinsic */
-    return 31 - __builtin_clz (val);
-#   else   /* Software version */
-    static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
-    U32 v = val;
-    unsigned r;
-    v |= v >> 1;
-    v |= v >> 2;
-    v |= v >> 4;
-    v |= v >> 8;
-    v |= v >> 16;
-    r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
-    return r;
-#   endif
-}
-
-
-
-/*-********************************************************
-* bitStream decoding
-**********************************************************/
-/*! BITv06_initDStream() :
-*   Initialize a BITv06_DStream_t.
-*   `bitD` : a pointer to an already allocated BITv06_DStream_t structure.
-*   `srcSize` must be the *exact* size of the bitStream, in bytes.
-*   @return : size of stream (== srcSize) or an errorCode if a problem is detected
-*/
-MEM_STATIC size_t BITv06_initDStream(BITv06_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
-{
-    if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
-
-    if (srcSize >=  sizeof(bitD->bitContainer)) {  /* normal case */
-        bitD->start = (const char*)srcBuffer;
-        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
-        bitD->bitContainer = MEM_readLEST(bitD->ptr);
-        { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
-          if (lastByte == 0) return ERROR(GENERIC);   /* endMark not present */
-          bitD->bitsConsumed = 8 - BITv06_highbit32(lastByte); }
-    } else {
-        bitD->start = (const char*)srcBuffer;
-        bitD->ptr   = bitD->start;
-        bitD->bitContainer = *(const BYTE*)(bitD->start);
-        switch(srcSize)
-        {
-            case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);/* fall-through */
-            case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);/* fall-through */
-            case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);/* fall-through */
-            case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; /* fall-through */
-            case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; /* fall-through */
-            case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) <<  8; /* fall-through */
-            default: break;
-        }
-        { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
-          if (lastByte == 0) return ERROR(GENERIC);   /* endMark not present */
-          bitD->bitsConsumed = 8 - BITv06_highbit32(lastByte); }
-        bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
-    }
-
-    return srcSize;
-}
-
-
- MEM_STATIC size_t BITv06_lookBits(const BITv06_DStream_t* bitD, U32 nbBits)
-{
-    U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1;
-    return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
-}
-
-/*! BITv06_lookBitsFast() :
-*   unsafe version; only works only if nbBits >= 1 */
-MEM_STATIC size_t BITv06_lookBitsFast(const BITv06_DStream_t* bitD, U32 nbBits)
-{
-    U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1;
-    return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
-}
-
-MEM_STATIC void BITv06_skipBits(BITv06_DStream_t* bitD, U32 nbBits)
-{
-    bitD->bitsConsumed += nbBits;
-}
-
-MEM_STATIC size_t BITv06_readBits(BITv06_DStream_t* bitD, U32 nbBits)
-{
-    size_t const value = BITv06_lookBits(bitD, nbBits);
-    BITv06_skipBits(bitD, nbBits);
-    return value;
-}
-
-/*! BITv06_readBitsFast() :
-*   unsafe version; only works only if nbBits >= 1 */
-MEM_STATIC size_t BITv06_readBitsFast(BITv06_DStream_t* bitD, U32 nbBits)
-{
-    size_t const value = BITv06_lookBitsFast(bitD, nbBits);
-    BITv06_skipBits(bitD, nbBits);
-    return value;
-}
-
-MEM_STATIC BITv06_DStream_status BITv06_reloadDStream(BITv06_DStream_t* bitD)
-{
-    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should never happen */
-        return BITv06_DStream_overflow;
-
-    if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {
-        bitD->ptr -= bitD->bitsConsumed >> 3;
-        bitD->bitsConsumed &= 7;
-        bitD->bitContainer = MEM_readLEST(bitD->ptr);
-        return BITv06_DStream_unfinished;
-    }
-    if (bitD->ptr == bitD->start) {
-        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BITv06_DStream_endOfBuffer;
-        return BITv06_DStream_completed;
-    }
-    {   U32 nbBytes = bitD->bitsConsumed >> 3;
-        BITv06_DStream_status result = BITv06_DStream_unfinished;
-        if (bitD->ptr - nbBytes < bitD->start) {
-            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */
-            result = BITv06_DStream_endOfBuffer;
-        }
-        bitD->ptr -= nbBytes;
-        bitD->bitsConsumed -= nbBytes*8;
-        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD) */
-        return result;
-    }
-}
-
-/*! BITv06_endOfDStream() :
-*   @return Tells if DStream has exactly reached its end (all bits consumed).
-*/
-MEM_STATIC unsigned BITv06_endOfDStream(const BITv06_DStream_t* DStream)
-{
-    return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
-}
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* BITSTREAM_H_MODULE */
-/* ******************************************************************
-   FSE : Finite State Entropy coder
-   header file for static linking (only)
-   Copyright (C) 2013-2015, Yann Collet
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-   - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-#ifndef FSEv06_STATIC_H
-#define FSEv06_STATIC_H
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-/* *****************************************
-*  Static allocation
-*******************************************/
-/* FSE buffer bounds */
-#define FSEv06_NCOUNTBOUND 512
-#define FSEv06_BLOCKBOUND(size) (size + (size>>7))
-#define FSEv06_COMPRESSBOUND(size) (FSEv06_NCOUNTBOUND + FSEv06_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
-
-/* It is possible to statically allocate FSE CTable/DTable as a table of unsigned using below macros */
-#define FSEv06_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<maxTableLog))
-
-
-/* *****************************************
-*  FSE advanced API
-*******************************************/
-size_t FSEv06_countFast(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
-/* same as FSEv06_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr  */
-
-size_t FSEv06_buildDTable_raw (FSEv06_DTable* dt, unsigned nbBits);
-/* build a fake FSEv06_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */
-
-size_t FSEv06_buildDTable_rle (FSEv06_DTable* dt, unsigned char symbolValue);
-/* build a fake FSEv06_DTable, designed to always generate the same symbolValue */
-
-
-/* *****************************************
-*  FSE symbol decompression API
-*******************************************/
-typedef struct
-{
-    size_t      state;
-    const void* table;   /* precise table may vary, depending on U16 */
-} FSEv06_DState_t;
-
-
-static void     FSEv06_initDState(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD, const FSEv06_DTable* dt);
-
-static unsigned char FSEv06_decodeSymbol(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD);
-
-
-/* *****************************************
-*  FSE unsafe API
-*******************************************/
-static unsigned char FSEv06_decodeSymbolFast(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD);
-/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
-
-
-/* *****************************************
-*  Implementation of inlined functions
-*******************************************/
-
-
-/* ======    Decompression    ====== */
-
-typedef struct {
-    U16 tableLog;
-    U16 fastMode;
-} FSEv06_DTableHeader;   /* sizeof U32 */
-
-typedef struct
-{
-    unsigned short newState;
-    unsigned char  symbol;
-    unsigned char  nbBits;
-} FSEv06_decode_t;   /* size == U32 */
-
-MEM_STATIC void FSEv06_initDState(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD, const FSEv06_DTable* dt)
-{
-    const void* ptr = dt;
-    const FSEv06_DTableHeader* const DTableH = (const FSEv06_DTableHeader*)ptr;
-    DStatePtr->state = BITv06_readBits(bitD, DTableH->tableLog);
-    BITv06_reloadDStream(bitD);
-    DStatePtr->table = dt + 1;
-}
-
-MEM_STATIC BYTE FSEv06_peekSymbol(const FSEv06_DState_t* DStatePtr)
-{
-    FSEv06_decode_t const DInfo = ((const FSEv06_decode_t*)(DStatePtr->table))[DStatePtr->state];
-    return DInfo.symbol;
-}
-
-MEM_STATIC void FSEv06_updateState(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD)
-{
-    FSEv06_decode_t const DInfo = ((const FSEv06_decode_t*)(DStatePtr->table))[DStatePtr->state];
-    U32 const nbBits = DInfo.nbBits;
-    size_t const lowBits = BITv06_readBits(bitD, nbBits);
-    DStatePtr->state = DInfo.newState + lowBits;
-}
-
-MEM_STATIC BYTE FSEv06_decodeSymbol(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD)
-{
-    FSEv06_decode_t const DInfo = ((const FSEv06_decode_t*)(DStatePtr->table))[DStatePtr->state];
-    U32 const nbBits = DInfo.nbBits;
-    BYTE const symbol = DInfo.symbol;
-    size_t const lowBits = BITv06_readBits(bitD, nbBits);
-
-    DStatePtr->state = DInfo.newState + lowBits;
-    return symbol;
-}
-
-/*! FSEv06_decodeSymbolFast() :
-    unsafe, only works if no symbol has a probability > 50% */
-MEM_STATIC BYTE FSEv06_decodeSymbolFast(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD)
-{
-    FSEv06_decode_t const DInfo = ((const FSEv06_decode_t*)(DStatePtr->table))[DStatePtr->state];
-    U32 const nbBits = DInfo.nbBits;
-    BYTE const symbol = DInfo.symbol;
-    size_t const lowBits = BITv06_readBitsFast(bitD, nbBits);
-
-    DStatePtr->state = DInfo.newState + lowBits;
-    return symbol;
-}
-
-
-
-#ifndef FSEv06_COMMONDEFS_ONLY
-
-/* **************************************************************
-*  Tuning parameters
-****************************************************************/
-/*!MEMORY_USAGE :
-*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
-*  Increasing memory usage improves compression ratio
-*  Reduced memory usage can improve speed, due to cache effect
-*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
-#define FSEv06_MAX_MEMORY_USAGE 14
-#define FSEv06_DEFAULT_MEMORY_USAGE 13
-
-/*!FSEv06_MAX_SYMBOL_VALUE :
-*  Maximum symbol value authorized.
-*  Required for proper stack allocation */
-#define FSEv06_MAX_SYMBOL_VALUE 255
-
-
-/* **************************************************************
-*  template functions type & suffix
-****************************************************************/
-#define FSEv06_FUNCTION_TYPE BYTE
-#define FSEv06_FUNCTION_EXTENSION
-#define FSEv06_DECODE_TYPE FSEv06_decode_t
-
-
-#endif   /* !FSEv06_COMMONDEFS_ONLY */
-
-
-/* ***************************************************************
-*  Constants
-*****************************************************************/
-#define FSEv06_MAX_TABLELOG  (FSEv06_MAX_MEMORY_USAGE-2)
-#define FSEv06_MAX_TABLESIZE (1U<<FSEv06_MAX_TABLELOG)
-#define FSEv06_MAXTABLESIZE_MASK (FSEv06_MAX_TABLESIZE-1)
-#define FSEv06_DEFAULT_TABLELOG (FSEv06_DEFAULT_MEMORY_USAGE-2)
-#define FSEv06_MIN_TABLELOG 5
-
-#define FSEv06_TABLELOG_ABSOLUTE_MAX 15
-#if FSEv06_MAX_TABLELOG > FSEv06_TABLELOG_ABSOLUTE_MAX
-#error "FSEv06_MAX_TABLELOG > FSEv06_TABLELOG_ABSOLUTE_MAX is not supported"
-#endif
-
-#define FSEv06_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3)
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif  /* FSEv06_STATIC_H */
-/*
-   Common functions of New Generation Entropy library
-   Copyright (C) 2016, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
-    - Public forum : https://groups.google.com/forum/#!forum/lz4c
-*************************************************************************** */
-
-
-/*-****************************************
-*  FSE Error Management
-******************************************/
-unsigned FSEv06_isError(size_t code) { return ERR_isError(code); }
-
-const char* FSEv06_getErrorName(size_t code) { return ERR_getErrorName(code); }
-
-
-/* **************************************************************
-*  HUF Error Management
-****************************************************************/
-static unsigned HUFv06_isError(size_t code) { return ERR_isError(code); }
-
-
-/*-**************************************************************
-*  FSE NCount encoding-decoding
-****************************************************************/
-static short FSEv06_abs(short a) { return a<0 ? -a : a; }
-
-size_t FSEv06_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
-                 const void* headerBuffer, size_t hbSize)
-{
-    const BYTE* const istart = (const BYTE*) headerBuffer;
-    const BYTE* const iend = istart + hbSize;
-    const BYTE* ip = istart;
-    int nbBits;
-    int remaining;
-    int threshold;
-    U32 bitStream;
-    int bitCount;
-    unsigned charnum = 0;
-    int previous0 = 0;
-
-    if (hbSize < 4) return ERROR(srcSize_wrong);
-    bitStream = MEM_readLE32(ip);
-    nbBits = (bitStream & 0xF) + FSEv06_MIN_TABLELOG;   /* extract tableLog */
-    if (nbBits > FSEv06_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
-    bitStream >>= 4;
-    bitCount = 4;
-    *tableLogPtr = nbBits;
-    remaining = (1<<nbBits)+1;
-    threshold = 1<<nbBits;
-    nbBits++;
-
-    while ((remaining>1) && (charnum<=*maxSVPtr)) {
-        if (previous0) {
-            unsigned n0 = charnum;
-            while ((bitStream & 0xFFFF) == 0xFFFF) {
-                n0+=24;
-                if (ip < iend-5) {
-                    ip+=2;
-                    bitStream = MEM_readLE32(ip) >> bitCount;
-                } else {
-                    bitStream >>= 16;
-                    bitCount+=16;
-            }   }
-            while ((bitStream & 3) == 3) {
-                n0+=3;
-                bitStream>>=2;
-                bitCount+=2;
-            }
-            n0 += bitStream & 3;
-            bitCount += 2;
-            if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
-            while (charnum < n0) normalizedCounter[charnum++] = 0;
-            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
-                ip += bitCount>>3;
-                bitCount &= 7;
-                bitStream = MEM_readLE32(ip) >> bitCount;
-            }
-            else
-                bitStream >>= 2;
-        }
-        {   short const max = (short)((2*threshold-1)-remaining);
-            short count;
-
-            if ((bitStream & (threshold-1)) < (U32)max) {
-                count = (short)(bitStream & (threshold-1));
-                bitCount   += nbBits-1;
-            } else {
-                count = (short)(bitStream & (2*threshold-1));
-                if (count >= threshold) count -= max;
-                bitCount   += nbBits;
-            }
-
-            count--;   /* extra accuracy */
-            remaining -= FSEv06_abs(count);
-            normalizedCounter[charnum++] = count;
-            previous0 = !count;
-            while (remaining < threshold) {
-                nbBits--;
-                threshold >>= 1;
-            }
-
-            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
-                ip += bitCount>>3;
-                bitCount &= 7;
-            } else {
-                bitCount -= (int)(8 * (iend - 4 - ip));
-                ip = iend - 4;
-            }
-            bitStream = MEM_readLE32(ip) >> (bitCount & 31);
-    }   }   /* while ((remaining>1) && (charnum<=*maxSVPtr)) */
-    if (remaining != 1) return ERROR(GENERIC);
-    *maxSVPtr = charnum-1;
-
-    ip += (bitCount+7)>>3;
-    if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);
-    return ip-istart;
-}
-/* ******************************************************************
-   FSE : Finite State Entropy decoder
-   Copyright (C) 2013-2015, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
-    - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-
-
-/* **************************************************************
-*  Compiler specifics
-****************************************************************/
-#ifdef _MSC_VER    /* Visual Studio */
-#  define FORCE_INLINE static __forceinline
-#  include <intrin.h>                    /* For Visual 2005 */
-#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
-#  pragma warning(disable : 4214)        /* disable: C4214: non-int bitfields */
-#else
-#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
-#    ifdef __GNUC__
-#      define FORCE_INLINE static inline __attribute__((always_inline))
-#    else
-#      define FORCE_INLINE static inline
-#    endif
-#  else
-#    define FORCE_INLINE static
-#  endif /* __STDC_VERSION__ */
-#endif
-
-
-/* **************************************************************
-*  Error Management
-****************************************************************/
-#define FSEv06_isError ERR_isError
-#define FSEv06_STATIC_ASSERT(c) { enum { FSEv06_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
-
-
-/* **************************************************************
-*  Complex types
-****************************************************************/
-typedef U32 DTable_max_t[FSEv06_DTABLE_SIZE_U32(FSEv06_MAX_TABLELOG)];
-
-
-/* **************************************************************
-*  Templates
-****************************************************************/
-/*
-  designed to be included
-  for type-specific functions (template emulation in C)
-  Objective is to write these functions only once, for improved maintenance
-*/
-
-/* safety checks */
-#ifndef FSEv06_FUNCTION_EXTENSION
-#  error "FSEv06_FUNCTION_EXTENSION must be defined"
-#endif
-#ifndef FSEv06_FUNCTION_TYPE
-#  error "FSEv06_FUNCTION_TYPE must be defined"
-#endif
-
-/* Function names */
-#define FSEv06_CAT(X,Y) X##Y
-#define FSEv06_FUNCTION_NAME(X,Y) FSEv06_CAT(X,Y)
-#define FSEv06_TYPE_NAME(X,Y) FSEv06_CAT(X,Y)
-
-
-/* Function templates */
-FSEv06_DTable* FSEv06_createDTable (unsigned tableLog)
-{
-    if (tableLog > FSEv06_TABLELOG_ABSOLUTE_MAX) tableLog = FSEv06_TABLELOG_ABSOLUTE_MAX;
-    return (FSEv06_DTable*)malloc( FSEv06_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
-}
-
-void FSEv06_freeDTable (FSEv06_DTable* dt)
-{
-    free(dt);
-}
-
-size_t FSEv06_buildDTable(FSEv06_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
-{
-    void* const tdPtr = dt+1;   /* because *dt is unsigned, 32-bits aligned on 32-bits */
-    FSEv06_DECODE_TYPE* const tableDecode = (FSEv06_DECODE_TYPE*) (tdPtr);
-    U16 symbolNext[FSEv06_MAX_SYMBOL_VALUE+1];
-
-    U32 const maxSV1 = maxSymbolValue + 1;
-    U32 const tableSize = 1 << tableLog;
-    U32 highThreshold = tableSize-1;
-
-    /* Sanity Checks */
-    if (maxSymbolValue > FSEv06_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
-    if (tableLog > FSEv06_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
-
-    /* Init, lay down lowprob symbols */
-    {   FSEv06_DTableHeader DTableH;
-        DTableH.tableLog = (U16)tableLog;
-        DTableH.fastMode = 1;
-        {   S16 const largeLimit= (S16)(1 << (tableLog-1));
-            U32 s;
-            for (s=0; s<maxSV1; s++) {
-                if (normalizedCounter[s]==-1) {
-                    tableDecode[highThreshold--].symbol = (FSEv06_FUNCTION_TYPE)s;
-                    symbolNext[s] = 1;
-                } else {
-                    if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
-                    symbolNext[s] = normalizedCounter[s];
-        }   }   }
-        memcpy(dt, &DTableH, sizeof(DTableH));
-    }
-
-    /* Spread symbols */
-    {   U32 const tableMask = tableSize-1;
-        U32 const step = FSEv06_TABLESTEP(tableSize);
-        U32 s, position = 0;
-        for (s=0; s<maxSV1; s++) {
-            int i;
-            for (i=0; i<normalizedCounter[s]; i++) {
-                tableDecode[position].symbol = (FSEv06_FUNCTION_TYPE)s;
-                position = (position + step) & tableMask;
-                while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
-        }   }
-
-        if (position!=0) return ERROR(GENERIC);   /* position must reach all cells once, otherwise normalizedCounter is incorrect */
-    }
-
-    /* Build Decoding table */
-    {   U32 u;
-        for (u=0; u<tableSize; u++) {
-            FSEv06_FUNCTION_TYPE const symbol = (FSEv06_FUNCTION_TYPE)(tableDecode[u].symbol);
-            U16 nextState = symbolNext[symbol]++;
-            tableDecode[u].nbBits = (BYTE) (tableLog - BITv06_highbit32 ((U32)nextState) );
-            tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
-    }   }
-
-    return 0;
-}
-
-
-
-#ifndef FSEv06_COMMONDEFS_ONLY
-
-/*-*******************************************************
-*  Decompression (Byte symbols)
-*********************************************************/
-size_t FSEv06_buildDTable_rle (FSEv06_DTable* dt, BYTE symbolValue)
-{
-    void* ptr = dt;
-    FSEv06_DTableHeader* const DTableH = (FSEv06_DTableHeader*)ptr;
-    void* dPtr = dt + 1;
-    FSEv06_decode_t* const cell = (FSEv06_decode_t*)dPtr;
-
-    DTableH->tableLog = 0;
-    DTableH->fastMode = 0;
-
-    cell->newState = 0;
-    cell->symbol = symbolValue;
-    cell->nbBits = 0;
-
-    return 0;
-}
-
-
-size_t FSEv06_buildDTable_raw (FSEv06_DTable* dt, unsigned nbBits)
-{
-    void* ptr = dt;
-    FSEv06_DTableHeader* const DTableH = (FSEv06_DTableHeader*)ptr;
-    void* dPtr = dt + 1;
-    FSEv06_decode_t* const dinfo = (FSEv06_decode_t*)dPtr;
-    const unsigned tableSize = 1 << nbBits;
-    const unsigned tableMask = tableSize - 1;
-    const unsigned maxSV1 = tableMask+1;
-    unsigned s;
-
-    /* Sanity checks */
-    if (nbBits < 1) return ERROR(GENERIC);         /* min size */
-
-    /* Build Decoding Table */
-    DTableH->tableLog = (U16)nbBits;
-    DTableH->fastMode = 1;
-    for (s=0; s<maxSV1; s++) {
-        dinfo[s].newState = 0;
-        dinfo[s].symbol = (BYTE)s;
-        dinfo[s].nbBits = (BYTE)nbBits;
-    }
-
-    return 0;
-}
-
-FORCE_INLINE size_t FSEv06_decompress_usingDTable_generic(
-          void* dst, size_t maxDstSize,
-    const void* cSrc, size_t cSrcSize,
-    const FSEv06_DTable* dt, const unsigned fast)
-{
-    BYTE* const ostart = (BYTE*) dst;
-    BYTE* op = ostart;
-    BYTE* const omax = op + maxDstSize;
-    BYTE* const olimit = omax-3;
-
-    BITv06_DStream_t bitD;
-    FSEv06_DState_t state1;
-    FSEv06_DState_t state2;
-
-    /* Init */
-    { size_t const errorCode = BITv06_initDStream(&bitD, cSrc, cSrcSize);   /* replaced last arg by maxCompressed Size */
-      if (FSEv06_isError(errorCode)) return errorCode; }
-
-    FSEv06_initDState(&state1, &bitD, dt);
-    FSEv06_initDState(&state2, &bitD, dt);
-
-#define FSEv06_GETSYMBOL(statePtr) fast ? FSEv06_decodeSymbolFast(statePtr, &bitD) : FSEv06_decodeSymbol(statePtr, &bitD)
-
-    /* 4 symbols per loop */
-    for ( ; (BITv06_reloadDStream(&bitD)==BITv06_DStream_unfinished) && (op<olimit) ; op+=4) {
-        op[0] = FSEv06_GETSYMBOL(&state1);
-
-        if (FSEv06_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
-            BITv06_reloadDStream(&bitD);
-
-        op[1] = FSEv06_GETSYMBOL(&state2);
-
-        if (FSEv06_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
-            { if (BITv06_reloadDStream(&bitD) > BITv06_DStream_unfinished) { op+=2; break; } }
-
-        op[2] = FSEv06_GETSYMBOL(&state1);
-
-        if (FSEv06_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
-            BITv06_reloadDStream(&bitD);
-
-        op[3] = FSEv06_GETSYMBOL(&state2);
-    }
-
-    /* tail */
-    /* note : BITv06_reloadDStream(&bitD) >= FSEv06_DStream_partiallyFilled; Ends at exactly BITv06_DStream_completed */
-    while (1) {
-        if (op>(omax-2)) return ERROR(dstSize_tooSmall);
-
-        *op++ = FSEv06_GETSYMBOL(&state1);
-
-        if (BITv06_reloadDStream(&bitD)==BITv06_DStream_overflow) {
-            *op++ = FSEv06_GETSYMBOL(&state2);
-            break;
-        }
-
-        if (op>(omax-2)) return ERROR(dstSize_tooSmall);
-
-        *op++ = FSEv06_GETSYMBOL(&state2);
-
-        if (BITv06_reloadDStream(&bitD)==BITv06_DStream_overflow) {
-            *op++ = FSEv06_GETSYMBOL(&state1);
-            break;
-    }   }
-
-    return op-ostart;
-}
-
-
-size_t FSEv06_decompress_usingDTable(void* dst, size_t originalSize,
-                            const void* cSrc, size_t cSrcSize,
-                            const FSEv06_DTable* dt)
-{
-    const void* ptr = dt;
-    const FSEv06_DTableHeader* DTableH = (const FSEv06_DTableHeader*)ptr;
-    const U32 fastMode = DTableH->fastMode;
-
-    /* select fast mode (static) */
-    if (fastMode) return FSEv06_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
-    return FSEv06_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
-}
-
-
-size_t FSEv06_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
-{
-    const BYTE* const istart = (const BYTE*)cSrc;
-    const BYTE* ip = istart;
-    short counting[FSEv06_MAX_SYMBOL_VALUE+1];
-    DTable_max_t dt;   /* Static analyzer seems unable to understand this table will be properly initialized later */
-    unsigned tableLog;
-    unsigned maxSymbolValue = FSEv06_MAX_SYMBOL_VALUE;
-
-    if (cSrcSize<2) return ERROR(srcSize_wrong);   /* too small input size */
-
-    /* normal FSE decoding mode */
-    {   size_t const NCountLength = FSEv06_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
-        if (FSEv06_isError(NCountLength)) return NCountLength;
-        if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong);   /* too small input size */
-        ip += NCountLength;
-        cSrcSize -= NCountLength;
-    }
-
-    { size_t const errorCode = FSEv06_buildDTable (dt, counting, maxSymbolValue, tableLog);
-      if (FSEv06_isError(errorCode)) return errorCode; }
-
-    return FSEv06_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);   /* always return, even if it is an error code */
-}
-
-
-
-#endif   /* FSEv06_COMMONDEFS_ONLY */
-/* ******************************************************************
-   Huffman coder, part of New Generation Entropy library
-   header file
-   Copyright (C) 2013-2016, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-****************************************************************** */
-#ifndef HUFv06_H
-#define HUFv06_H
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-/* ****************************************
-*  HUF simple functions
-******************************************/
-size_t HUFv06_decompress(void* dst,  size_t dstSize,
-                const void* cSrc, size_t cSrcSize);
-/*
-HUFv06_decompress() :
-    Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
-    into already allocated destination buffer 'dst', of size 'dstSize'.
-    `dstSize` : must be the **exact** size of original (uncompressed) data.
-    Note : in contrast with FSE, HUFv06_decompress can regenerate
-           RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
-           because it knows size to regenerate.
-    @return : size of regenerated data (== dstSize)
-              or an error code, which can be tested using HUFv06_isError()
-*/
-
-
-/* ****************************************
-*  Tool functions
-******************************************/
-size_t HUFv06_compressBound(size_t size);       /**< maximum compressed size */
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif   /* HUFv06_H */
-/* ******************************************************************
-   Huffman codec, part of New Generation Entropy library
-   header file, for static linking only
-   Copyright (C) 2013-2016, Yann Collet
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-****************************************************************** */
-#ifndef HUFv06_STATIC_H
-#define HUFv06_STATIC_H
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-/* ****************************************
-*  Static allocation
-******************************************/
-/* HUF buffer bounds */
-#define HUFv06_CTABLEBOUND 129
-#define HUFv06_BLOCKBOUND(size) (size + (size>>8) + 8)   /* only true if incompressible pre-filtered with fast heuristic */
-#define HUFv06_COMPRESSBOUND(size) (HUFv06_CTABLEBOUND + HUFv06_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
-
-/* static allocation of HUF's DTable */
-#define HUFv06_DTABLE_SIZE(maxTableLog)   (1 + (1<<maxTableLog))
-#define HUFv06_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
-        unsigned short DTable[HUFv06_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
-#define HUFv06_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
-        unsigned int DTable[HUFv06_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
-#define HUFv06_CREATE_STATIC_DTABLEX6(DTable, maxTableLog) \
-        unsigned int DTable[HUFv06_DTABLE_SIZE(maxTableLog) * 3 / 2] = { maxTableLog }
-
-
-/* ****************************************
-*  Advanced decompression functions
-******************************************/
-size_t HUFv06_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */
-size_t HUFv06_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbols decoder */
-
-
-
-/*!
-HUFv06_decompress() does the following:
-1. select the decompression algorithm (X2, X4, X6) based on pre-computed heuristics
-2. build Huffman table from save, using HUFv06_readDTableXn()
-3. decode 1 or 4 segments in parallel using HUFv06_decompressSXn_usingDTable
-*/
-size_t HUFv06_readDTableX2 (unsigned short* DTable, const void* src, size_t srcSize);
-size_t HUFv06_readDTableX4 (unsigned* DTable, const void* src, size_t srcSize);
-
-size_t HUFv06_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned short* DTable);
-size_t HUFv06_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned* DTable);
-
-
-/* single stream variants */
-size_t HUFv06_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */
-size_t HUFv06_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbol decoder */
-
-size_t HUFv06_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned short* DTable);
-size_t HUFv06_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned* DTable);
-
-
-
-/* **************************************************************
-*  Constants
-****************************************************************/
-#define HUFv06_ABSOLUTEMAX_TABLELOG  16   /* absolute limit of HUFv06_MAX_TABLELOG. Beyond that value, code does not work */
-#define HUFv06_MAX_TABLELOG  12           /* max configured tableLog (for static allocation); can be modified up to HUFv06_ABSOLUTEMAX_TABLELOG */
-#define HUFv06_DEFAULT_TABLELOG  HUFv06_MAX_TABLELOG   /* tableLog by default, when not specified */
-#define HUFv06_MAX_SYMBOL_VALUE 255
-#if (HUFv06_MAX_TABLELOG > HUFv06_ABSOLUTEMAX_TABLELOG)
-#  error "HUFv06_MAX_TABLELOG is too large !"
-#endif
-
-
-
-/*! HUFv06_readStats() :
-    Read compact Huffman tree, saved by HUFv06_writeCTable().
-    `huffWeight` is destination buffer.
-    @return : size read from `src`
-*/
-MEM_STATIC size_t HUFv06_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
-                            U32* nbSymbolsPtr, U32* tableLogPtr,
-                            const void* src, size_t srcSize)
-{
-    U32 weightTotal;
-    const BYTE* ip = (const BYTE*) src;
-    size_t iSize;
-    size_t oSize;
-
-    if (!srcSize) return ERROR(srcSize_wrong);
-    iSize = ip[0];
-    //memset(huffWeight, 0, hwSize);   /* is not necessary, even though some analyzer complain ... */
-
-    if (iSize >= 128)  { /* special header */
-        if (iSize >= (242)) {  /* RLE */
-            static U32 l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
-            oSize = l[iSize-242];
-            memset(huffWeight, 1, hwSize);
-            iSize = 0;
-        }
-        else {   /* Incompressible */
-            oSize = iSize - 127;
-            iSize = ((oSize+1)/2);
-            if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
-            if (oSize >= hwSize) return ERROR(corruption_detected);
-            ip += 1;
-            {   U32 n;
-                for (n=0; n<oSize; n+=2) {
-                    huffWeight[n]   = ip[n/2] >> 4;
-                    huffWeight[n+1] = ip[n/2] & 15;
-    }   }   }   }
-    else  {   /* header compressed with FSE (normal case) */
-        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
-        oSize = FSEv06_decompress(huffWeight, hwSize-1, ip+1, iSize);   /* max (hwSize-1) values decoded, as last one is implied */
-        if (FSEv06_isError(oSize)) return oSize;
-    }
-
-    /* collect weight stats */
-    memset(rankStats, 0, (HUFv06_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));
-    weightTotal = 0;
-    {   U32 n; for (n=0; n<oSize; n++) {
-            if (huffWeight[n] >= HUFv06_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
-            rankStats[huffWeight[n]]++;
-            weightTotal += (1 << huffWeight[n]) >> 1;
-    }   }
-    if (weightTotal == 0) return ERROR(corruption_detected);
-
-    /* get last non-null symbol weight (implied, total must be 2^n) */
-    {   U32 const tableLog = BITv06_highbit32(weightTotal) + 1;
-        if (tableLog > HUFv06_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
-        *tableLogPtr = tableLog;
-        /* determine last weight */
-        {   U32 const total = 1 << tableLog;
-            U32 const rest = total - weightTotal;
-            U32 const verif = 1 << BITv06_highbit32(rest);
-            U32 const lastWeight = BITv06_highbit32(rest) + 1;
-            if (verif != rest) return ERROR(corruption_detected);    /* last value must be a clean power of 2 */
-            huffWeight[oSize] = (BYTE)lastWeight;
-            rankStats[lastWeight]++;
-    }   }
-
-    /* check tree construction validity */
-    if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected);   /* by construction : at least 2 elts of rank 1, must be even */
-
-    /* results */
-    *nbSymbolsPtr = (U32)(oSize+1);
-    return iSize+1;
-}
-
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* HUFv06_STATIC_H */
-/* ******************************************************************
-   Huffman decoder, part of New Generation Entropy library
-   Copyright (C) 2013-2016, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
-    - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-
-/* **************************************************************
-*  Compiler specifics
-****************************************************************/
-#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-/* inline is defined */
-#elif defined(_MSC_VER)
-#  define inline __inline
-#else
-#  define inline /* disable inline */
-#endif
-
-
-#ifdef _MSC_VER    /* Visual Studio */
-#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
-#endif
-
-
-
-/* **************************************************************
-*  Error Management
-****************************************************************/
-#define HUFv06_STATIC_ASSERT(c) { enum { HUFv06_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
-
-
-
-/* *******************************************************
-*  HUF : Huffman block decompression
-*********************************************************/
-typedef struct { BYTE byte; BYTE nbBits; } HUFv06_DEltX2;   /* single-symbol decoding */
-
-typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUFv06_DEltX4;  /* double-symbols decoding */
-
-typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
-
-
-
-/*-***************************/
-/*  single-symbol decoding   */
-/*-***************************/
-
-size_t HUFv06_readDTableX2 (U16* DTable, const void* src, size_t srcSize)
-{
-    BYTE huffWeight[HUFv06_MAX_SYMBOL_VALUE + 1];
-    U32 rankVal[HUFv06_ABSOLUTEMAX_TABLELOG + 1];   /* large enough for values from 0 to 16 */
-    U32 tableLog = 0;
-    size_t iSize;
-    U32 nbSymbols = 0;
-    U32 n;
-    U32 nextRankStart;
-    void* const dtPtr = DTable + 1;
-    HUFv06_DEltX2* const dt = (HUFv06_DEltX2*)dtPtr;
-
-    HUFv06_STATIC_ASSERT(sizeof(HUFv06_DEltX2) == sizeof(U16));   /* if compilation fails here, assertion is false */
-    //memset(huffWeight, 0, sizeof(huffWeight));   /* is not necessary, even though some analyzer complain ... */
-
-    iSize = HUFv06_readStats(huffWeight, HUFv06_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
-    if (HUFv06_isError(iSize)) return iSize;
-
-    /* check result */
-    if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge);   /* DTable is too small */
-    DTable[0] = (U16)tableLog;   /* maybe should separate sizeof allocated DTable, from used size of DTable, in case of re-use */
-
-    /* Prepare ranks */
-    nextRankStart = 0;
-    for (n=1; n<tableLog+1; n++) {
-        U32 current = nextRankStart;
-        nextRankStart += (rankVal[n] << (n-1));
-        rankVal[n] = current;
-    }
-
-    /* fill DTable */
-    for (n=0; n<nbSymbols; n++) {
-        const U32 w = huffWeight[n];
-        const U32 length = (1 << w) >> 1;
-        U32 i;
-        HUFv06_DEltX2 D;
-        D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
-        for (i = rankVal[w]; i < rankVal[w] + length; i++)
-            dt[i] = D;
-        rankVal[w] += length;
-    }
-
-    return iSize;
-}
-
-
-static BYTE HUFv06_decodeSymbolX2(BITv06_DStream_t* Dstream, const HUFv06_DEltX2* dt, const U32 dtLog)
-{
-    const size_t val = BITv06_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
-    const BYTE c = dt[val].byte;
-    BITv06_skipBits(Dstream, dt[val].nbBits);
-    return c;
-}
-
-#define HUFv06_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
-    *ptr++ = HUFv06_decodeSymbolX2(DStreamPtr, dt, dtLog)
-
-#define HUFv06_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
-    if (MEM_64bits() || (HUFv06_MAX_TABLELOG<=12)) \
-        HUFv06_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
-
-#define HUFv06_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
-    if (MEM_64bits()) \
-        HUFv06_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
-
-static inline size_t HUFv06_decodeStreamX2(BYTE* p, BITv06_DStream_t* const bitDPtr, BYTE* const pEnd, const HUFv06_DEltX2* const dt, const U32 dtLog)
-{
-    BYTE* const pStart = p;
-
-    /* up to 4 symbols at a time */
-    while ((BITv06_reloadDStream(bitDPtr) == BITv06_DStream_unfinished) && (p <= pEnd-4)) {
-        HUFv06_DECODE_SYMBOLX2_2(p, bitDPtr);
-        HUFv06_DECODE_SYMBOLX2_1(p, bitDPtr);
-        HUFv06_DECODE_SYMBOLX2_2(p, bitDPtr);
-        HUFv06_DECODE_SYMBOLX2_0(p, bitDPtr);
-    }
-
-    /* closer to the end */
-    while ((BITv06_reloadDStream(bitDPtr) == BITv06_DStream_unfinished) && (p < pEnd))
-        HUFv06_DECODE_SYMBOLX2_0(p, bitDPtr);
-
-    /* no more data to retrieve from bitstream, hence no need to reload */
-    while (p < pEnd)
-        HUFv06_DECODE_SYMBOLX2_0(p, bitDPtr);
-
-    return pEnd-pStart;
-}
-
-size_t HUFv06_decompress1X2_usingDTable(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const U16* DTable)
-{
-    BYTE* op = (BYTE*)dst;
-    BYTE* const oend = op + dstSize;
-    const U32 dtLog = DTable[0];
-    const void* dtPtr = DTable;
-    const HUFv06_DEltX2* const dt = ((const HUFv06_DEltX2*)dtPtr)+1;
-    BITv06_DStream_t bitD;
-
-    { size_t const errorCode = BITv06_initDStream(&bitD, cSrc, cSrcSize);
-      if (HUFv06_isError(errorCode)) return errorCode; }
-
-    HUFv06_decodeStreamX2(op, &bitD, oend, dt, dtLog);
-
-    /* check */
-    if (!BITv06_endOfDStream(&bitD)) return ERROR(corruption_detected);
-
-    return dstSize;
-}
-
-size_t HUFv06_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    HUFv06_CREATE_STATIC_DTABLEX2(DTable, HUFv06_MAX_TABLELOG);
-    const BYTE* ip = (const BYTE*) cSrc;
-
-    size_t const errorCode = HUFv06_readDTableX2 (DTable, cSrc, cSrcSize);
-    if (HUFv06_isError(errorCode)) return errorCode;
-    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
-    ip += errorCode;
-    cSrcSize -= errorCode;
-
-    return HUFv06_decompress1X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
-}
-
-
-size_t HUFv06_decompress4X2_usingDTable(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const U16* DTable)
-{
-    /* Check */
-    if (cSrcSize < 10) return ERROR(corruption_detected);  /* strict minimum : jump table + 1 byte per stream */
-
-    {   const BYTE* const istart = (const BYTE*) cSrc;
-        BYTE* const ostart = (BYTE*) dst;
-        BYTE* const oend = ostart + dstSize;
-        const void* const dtPtr = DTable;
-        const HUFv06_DEltX2* const dt = ((const HUFv06_DEltX2*)dtPtr) +1;
-        const U32 dtLog = DTable[0];
-        size_t errorCode;
-
-        /* Init */
-        BITv06_DStream_t bitD1;
-        BITv06_DStream_t bitD2;
-        BITv06_DStream_t bitD3;
-        BITv06_DStream_t bitD4;
-        const size_t length1 = MEM_readLE16(istart);
-        const size_t length2 = MEM_readLE16(istart+2);
-        const size_t length3 = MEM_readLE16(istart+4);
-        size_t length4;
-        const BYTE* const istart1 = istart + 6;  /* jumpTable */
-        const BYTE* const istart2 = istart1 + length1;
-        const BYTE* const istart3 = istart2 + length2;
-        const BYTE* const istart4 = istart3 + length3;
-        const size_t segmentSize = (dstSize+3) / 4;
-        BYTE* const opStart2 = ostart + segmentSize;
-        BYTE* const opStart3 = opStart2 + segmentSize;
-        BYTE* const opStart4 = opStart3 + segmentSize;
-        BYTE* op1 = ostart;
-        BYTE* op2 = opStart2;
-        BYTE* op3 = opStart3;
-        BYTE* op4 = opStart4;
-        U32 endSignal;
-
-        length4 = cSrcSize - (length1 + length2 + length3 + 6);
-        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
-        errorCode = BITv06_initDStream(&bitD1, istart1, length1);
-        if (HUFv06_isError(errorCode)) return errorCode;
-        errorCode = BITv06_initDStream(&bitD2, istart2, length2);
-        if (HUFv06_isError(errorCode)) return errorCode;
-        errorCode = BITv06_initDStream(&bitD3, istart3, length3);
-        if (HUFv06_isError(errorCode)) return errorCode;
-        errorCode = BITv06_initDStream(&bitD4, istart4, length4);
-        if (HUFv06_isError(errorCode)) return errorCode;
-
-        /* 16-32 symbols per loop (4-8 symbols per stream) */
-        endSignal = BITv06_reloadDStream(&bitD1) | BITv06_reloadDStream(&bitD2) | BITv06_reloadDStream(&bitD3) | BITv06_reloadDStream(&bitD4);
-        for ( ; (endSignal==BITv06_DStream_unfinished) && (op4<(oend-7)) ; ) {
-            HUFv06_DECODE_SYMBOLX2_2(op1, &bitD1);
-            HUFv06_DECODE_SYMBOLX2_2(op2, &bitD2);
-            HUFv06_DECODE_SYMBOLX2_2(op3, &bitD3);
-            HUFv06_DECODE_SYMBOLX2_2(op4, &bitD4);
-            HUFv06_DECODE_SYMBOLX2_1(op1, &bitD1);
-            HUFv06_DECODE_SYMBOLX2_1(op2, &bitD2);
-            HUFv06_DECODE_SYMBOLX2_1(op3, &bitD3);
-            HUFv06_DECODE_SYMBOLX2_1(op4, &bitD4);
-            HUFv06_DECODE_SYMBOLX2_2(op1, &bitD1);
-            HUFv06_DECODE_SYMBOLX2_2(op2, &bitD2);
-            HUFv06_DECODE_SYMBOLX2_2(op3, &bitD3);
-            HUFv06_DECODE_SYMBOLX2_2(op4, &bitD4);
-            HUFv06_DECODE_SYMBOLX2_0(op1, &bitD1);
-            HUFv06_DECODE_SYMBOLX2_0(op2, &bitD2);
-            HUFv06_DECODE_SYMBOLX2_0(op3, &bitD3);
-            HUFv06_DECODE_SYMBOLX2_0(op4, &bitD4);
-            endSignal = BITv06_reloadDStream(&bitD1) | BITv06_reloadDStream(&bitD2) | BITv06_reloadDStream(&bitD3) | BITv06_reloadDStream(&bitD4);
-        }
-
-        /* check corruption */
-        if (op1 > opStart2) return ERROR(corruption_detected);
-        if (op2 > opStart3) return ERROR(corruption_detected);
-        if (op3 > opStart4) return ERROR(corruption_detected);
-        /* note : op4 supposed already verified within main loop */
-
-        /* finish bitStreams one by one */
-        HUFv06_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
-        HUFv06_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
-        HUFv06_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
-        HUFv06_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);
-
-        /* check */
-        endSignal = BITv06_endOfDStream(&bitD1) & BITv06_endOfDStream(&bitD2) & BITv06_endOfDStream(&bitD3) & BITv06_endOfDStream(&bitD4);
-        if (!endSignal) return ERROR(corruption_detected);
-
-        /* decoded size */
-        return dstSize;
-    }
-}
-
-
-size_t HUFv06_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    HUFv06_CREATE_STATIC_DTABLEX2(DTable, HUFv06_MAX_TABLELOG);
-    const BYTE* ip = (const BYTE*) cSrc;
-
-    size_t const errorCode = HUFv06_readDTableX2 (DTable, cSrc, cSrcSize);
-    if (HUFv06_isError(errorCode)) return errorCode;
-    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
-    ip += errorCode;
-    cSrcSize -= errorCode;
-
-    return HUFv06_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
-}
-
-
-/* *************************/
-/* double-symbols decoding */
-/* *************************/
-
-static void HUFv06_fillDTableX4Level2(HUFv06_DEltX4* DTable, U32 sizeLog, const U32 consumed,
-                           const U32* rankValOrigin, const int minWeight,
-                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
-                           U32 nbBitsBaseline, U16 baseSeq)
-{
-    HUFv06_DEltX4 DElt;
-    U32 rankVal[HUFv06_ABSOLUTEMAX_TABLELOG + 1];
-
-    /* get pre-calculated rankVal */
-    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
-
-    /* fill skipped values */
-    if (minWeight>1) {
-        U32 i, skipSize = rankVal[minWeight];
-        MEM_writeLE16(&(DElt.sequence), baseSeq);
-        DElt.nbBits   = (BYTE)(consumed);
-        DElt.length   = 1;
-        for (i = 0; i < skipSize; i++)
-            DTable[i] = DElt;
-    }
-
-    /* fill DTable */
-    { U32 s; for (s=0; s<sortedListSize; s++) {   /* note : sortedSymbols already skipped */
-        const U32 symbol = sortedSymbols[s].symbol;
-        const U32 weight = sortedSymbols[s].weight;
-        const U32 nbBits = nbBitsBaseline - weight;
-        const U32 length = 1 << (sizeLog-nbBits);
-        const U32 start = rankVal[weight];
-        U32 i = start;
-        const U32 end = start + length;
-
-        MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
-        DElt.nbBits = (BYTE)(nbBits + consumed);
-        DElt.length = 2;
-        do { DTable[i++] = DElt; } while (i<end);   /* since length >= 1 */
-
-        rankVal[weight] += length;
-    }}
-}
-
-typedef U32 rankVal_t[HUFv06_ABSOLUTEMAX_TABLELOG][HUFv06_ABSOLUTEMAX_TABLELOG + 1];
-
-static void HUFv06_fillDTableX4(HUFv06_DEltX4* DTable, const U32 targetLog,
-                           const sortedSymbol_t* sortedList, const U32 sortedListSize,
-                           const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
-                           const U32 nbBitsBaseline)
-{
-    U32 rankVal[HUFv06_ABSOLUTEMAX_TABLELOG + 1];
-    const int scaleLog = nbBitsBaseline - targetLog;   /* note : targetLog >= srcLog, hence scaleLog <= 1 */
-    const U32 minBits  = nbBitsBaseline - maxWeight;
-    U32 s;
-
-    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
-
-    /* fill DTable */
-    for (s=0; s<sortedListSize; s++) {
-        const U16 symbol = sortedList[s].symbol;
-        const U32 weight = sortedList[s].weight;
-        const U32 nbBits = nbBitsBaseline - weight;
-        const U32 start = rankVal[weight];
-        const U32 length = 1 << (targetLog-nbBits);
-
-        if (targetLog-nbBits >= minBits) {   /* enough room for a second symbol */
-            U32 sortedRank;
-            int minWeight = nbBits + scaleLog;
-            if (minWeight < 1) minWeight = 1;
-            sortedRank = rankStart[minWeight];
-            HUFv06_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
-                           rankValOrigin[nbBits], minWeight,
-                           sortedList+sortedRank, sortedListSize-sortedRank,
-                           nbBitsBaseline, symbol);
-        } else {
-            HUFv06_DEltX4 DElt;
-            MEM_writeLE16(&(DElt.sequence), symbol);
-            DElt.nbBits = (BYTE)(nbBits);
-            DElt.length = 1;
-            {   U32 u;
-                const U32 end = start + length;
-                for (u = start; u < end; u++) DTable[u] = DElt;
-        }   }
-        rankVal[weight] += length;
-    }
-}
-
-size_t HUFv06_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
-{
-    BYTE weightList[HUFv06_MAX_SYMBOL_VALUE + 1];
-    sortedSymbol_t sortedSymbol[HUFv06_MAX_SYMBOL_VALUE + 1];
-    U32 rankStats[HUFv06_ABSOLUTEMAX_TABLELOG + 1] = { 0 };
-    U32 rankStart0[HUFv06_ABSOLUTEMAX_TABLELOG + 2] = { 0 };
-    U32* const rankStart = rankStart0+1;
-    rankVal_t rankVal;
-    U32 tableLog, maxW, sizeOfSort, nbSymbols;
-    const U32 memLog = DTable[0];
-    size_t iSize;
-    void* dtPtr = DTable;
-    HUFv06_DEltX4* const dt = ((HUFv06_DEltX4*)dtPtr) + 1;
-
-    HUFv06_STATIC_ASSERT(sizeof(HUFv06_DEltX4) == sizeof(U32));   /* if compilation fails here, assertion is false */
-    if (memLog > HUFv06_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
-    //memset(weightList, 0, sizeof(weightList));   /* is not necessary, even though some analyzer complain ... */
-
-    iSize = HUFv06_readStats(weightList, HUFv06_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
-    if (HUFv06_isError(iSize)) return iSize;
-
-    /* check result */
-    if (tableLog > memLog) return ERROR(tableLog_tooLarge);   /* DTable can't fit code depth */
-
-    /* find maxWeight */
-    for (maxW = tableLog; rankStats[maxW]==0; maxW--) {}  /* necessarily finds a solution before 0 */
-
-    /* Get start index of each weight */
-    {   U32 w, nextRankStart = 0;
-        for (w=1; w<maxW+1; w++) {
-            U32 current = nextRankStart;
-            nextRankStart += rankStats[w];
-            rankStart[w] = current;
-        }
-        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/
-        sizeOfSort = nextRankStart;
-    }
-
-    /* sort symbols by weight */
-    {   U32 s;
-        for (s=0; s<nbSymbols; s++) {
-            U32 const w = weightList[s];
-            U32 const r = rankStart[w]++;
-            sortedSymbol[r].symbol = (BYTE)s;
-            sortedSymbol[r].weight = (BYTE)w;
-        }
-        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */
-    }
-
-    /* Build rankVal */
-    {   U32* const rankVal0 = rankVal[0];
-        {   int const rescale = (memLog-tableLog) - 1;   /* tableLog <= memLog */
-            U32 nextRankVal = 0;
-            U32 w;
-            for (w=1; w<maxW+1; w++) {
-                U32 current = nextRankVal;
-                nextRankVal += rankStats[w] << (w+rescale);
-                rankVal0[w] = current;
-        }   }
-        {   U32 const minBits = tableLog+1 - maxW;
-            U32 consumed;
-            for (consumed = minBits; consumed < memLog - minBits + 1; consumed++) {
-                U32* const rankValPtr = rankVal[consumed];
-                U32 w;
-                for (w = 1; w < maxW+1; w++) {
-                    rankValPtr[w] = rankVal0[w] >> consumed;
-    }   }   }   }
-
-    HUFv06_fillDTableX4(dt, memLog,
-                   sortedSymbol, sizeOfSort,
-                   rankStart0, rankVal, maxW,
-                   tableLog+1);
-
-    return iSize;
-}
-
-
-static U32 HUFv06_decodeSymbolX4(void* op, BITv06_DStream_t* DStream, const HUFv06_DEltX4* dt, const U32 dtLog)
-{
-    const size_t val = BITv06_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
-    memcpy(op, dt+val, 2);
-    BITv06_skipBits(DStream, dt[val].nbBits);
-    return dt[val].length;
-}
-
-static U32 HUFv06_decodeLastSymbolX4(void* op, BITv06_DStream_t* DStream, const HUFv06_DEltX4* dt, const U32 dtLog)
-{
-    const size_t val = BITv06_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
-    memcpy(op, dt+val, 1);
-    if (dt[val].length==1) BITv06_skipBits(DStream, dt[val].nbBits);
-    else {
-        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
-            BITv06_skipBits(DStream, dt[val].nbBits);
-            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
-                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);   /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
-    }   }
-    return 1;
-}
-
-
-#define HUFv06_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
-    ptr += HUFv06_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-#define HUFv06_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
-    if (MEM_64bits() || (HUFv06_MAX_TABLELOG<=12)) \
-        ptr += HUFv06_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-#define HUFv06_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
-    if (MEM_64bits()) \
-        ptr += HUFv06_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-static inline size_t HUFv06_decodeStreamX4(BYTE* p, BITv06_DStream_t* bitDPtr, BYTE* const pEnd, const HUFv06_DEltX4* const dt, const U32 dtLog)
-{
-    BYTE* const pStart = p;
-
-    /* up to 8 symbols at a time */
-    while ((BITv06_reloadDStream(bitDPtr) == BITv06_DStream_unfinished) && (p < pEnd-7)) {
-        HUFv06_DECODE_SYMBOLX4_2(p, bitDPtr);
-        HUFv06_DECODE_SYMBOLX4_1(p, bitDPtr);
-        HUFv06_DECODE_SYMBOLX4_2(p, bitDPtr);
-        HUFv06_DECODE_SYMBOLX4_0(p, bitDPtr);
-    }
-
-    /* closer to the end */
-    while ((BITv06_reloadDStream(bitDPtr) == BITv06_DStream_unfinished) && (p <= pEnd-2))
-        HUFv06_DECODE_SYMBOLX4_0(p, bitDPtr);
-
-    while (p <= pEnd-2)
-        HUFv06_DECODE_SYMBOLX4_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */
-
-    if (p < pEnd)
-        p += HUFv06_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
-
-    return p-pStart;
-}
-
-
-size_t HUFv06_decompress1X4_usingDTable(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const U32* DTable)
-{
-    const BYTE* const istart = (const BYTE*) cSrc;
-    BYTE* const ostart = (BYTE*) dst;
-    BYTE* const oend = ostart + dstSize;
-
-    const U32 dtLog = DTable[0];
-    const void* const dtPtr = DTable;
-    const HUFv06_DEltX4* const dt = ((const HUFv06_DEltX4*)dtPtr) +1;
-
-    /* Init */
-    BITv06_DStream_t bitD;
-    { size_t const errorCode = BITv06_initDStream(&bitD, istart, cSrcSize);
-      if (HUFv06_isError(errorCode)) return errorCode; }
-
-    /* decode */
-    HUFv06_decodeStreamX4(ostart, &bitD, oend, dt, dtLog);
-
-    /* check */
-    if (!BITv06_endOfDStream(&bitD)) return ERROR(corruption_detected);
-
-    /* decoded size */
-    return dstSize;
-}
-
-size_t HUFv06_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    HUFv06_CREATE_STATIC_DTABLEX4(DTable, HUFv06_MAX_TABLELOG);
-    const BYTE* ip = (const BYTE*) cSrc;
-
-    size_t const hSize = HUFv06_readDTableX4 (DTable, cSrc, cSrcSize);
-    if (HUFv06_isError(hSize)) return hSize;
-    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
-    ip += hSize;
-    cSrcSize -= hSize;
-
-    return HUFv06_decompress1X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
-}
-
-size_t HUFv06_decompress4X4_usingDTable(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const U32* DTable)
-{
-    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
-
-    {   const BYTE* const istart = (const BYTE*) cSrc;
-        BYTE* const ostart = (BYTE*) dst;
-        BYTE* const oend = ostart + dstSize;
-        const void* const dtPtr = DTable;
-        const HUFv06_DEltX4* const dt = ((const HUFv06_DEltX4*)dtPtr) +1;
-        const U32 dtLog = DTable[0];
-        size_t errorCode;
-
-        /* Init */
-        BITv06_DStream_t bitD1;
-        BITv06_DStream_t bitD2;
-        BITv06_DStream_t bitD3;
-        BITv06_DStream_t bitD4;
-        const size_t length1 = MEM_readLE16(istart);
-        const size_t length2 = MEM_readLE16(istart+2);
-        const size_t length3 = MEM_readLE16(istart+4);
-        size_t length4;
-        const BYTE* const istart1 = istart + 6;  /* jumpTable */
-        const BYTE* const istart2 = istart1 + length1;
-        const BYTE* const istart3 = istart2 + length2;
-        const BYTE* const istart4 = istart3 + length3;
-        const size_t segmentSize = (dstSize+3) / 4;
-        BYTE* const opStart2 = ostart + segmentSize;
-        BYTE* const opStart3 = opStart2 + segmentSize;
-        BYTE* const opStart4 = opStart3 + segmentSize;
-        BYTE* op1 = ostart;
-        BYTE* op2 = opStart2;
-        BYTE* op3 = opStart3;
-        BYTE* op4 = opStart4;
-        U32 endSignal;
-
-        length4 = cSrcSize - (length1 + length2 + length3 + 6);
-        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
-        errorCode = BITv06_initDStream(&bitD1, istart1, length1);
-        if (HUFv06_isError(errorCode)) return errorCode;
-        errorCode = BITv06_initDStream(&bitD2, istart2, length2);
-        if (HUFv06_isError(errorCode)) return errorCode;
-        errorCode = BITv06_initDStream(&bitD3, istart3, length3);
-        if (HUFv06_isError(errorCode)) return errorCode;
-        errorCode = BITv06_initDStream(&bitD4, istart4, length4);
-        if (HUFv06_isError(errorCode)) return errorCode;
-
-        /* 16-32 symbols per loop (4-8 symbols per stream) */
-        endSignal = BITv06_reloadDStream(&bitD1) | BITv06_reloadDStream(&bitD2) | BITv06_reloadDStream(&bitD3) | BITv06_reloadDStream(&bitD4);
-        for ( ; (endSignal==BITv06_DStream_unfinished) && (op4<(oend-7)) ; ) {
-            HUFv06_DECODE_SYMBOLX4_2(op1, &bitD1);
-            HUFv06_DECODE_SYMBOLX4_2(op2, &bitD2);
-            HUFv06_DECODE_SYMBOLX4_2(op3, &bitD3);
-            HUFv06_DECODE_SYMBOLX4_2(op4, &bitD4);
-            HUFv06_DECODE_SYMBOLX4_1(op1, &bitD1);
-            HUFv06_DECODE_SYMBOLX4_1(op2, &bitD2);
-            HUFv06_DECODE_SYMBOLX4_1(op3, &bitD3);
-            HUFv06_DECODE_SYMBOLX4_1(op4, &bitD4);
-            HUFv06_DECODE_SYMBOLX4_2(op1, &bitD1);
-            HUFv06_DECODE_SYMBOLX4_2(op2, &bitD2);
-            HUFv06_DECODE_SYMBOLX4_2(op3, &bitD3);
-            HUFv06_DECODE_SYMBOLX4_2(op4, &bitD4);
-            HUFv06_DECODE_SYMBOLX4_0(op1, &bitD1);
-            HUFv06_DECODE_SYMBOLX4_0(op2, &bitD2);
-            HUFv06_DECODE_SYMBOLX4_0(op3, &bitD3);
-            HUFv06_DECODE_SYMBOLX4_0(op4, &bitD4);
-
-            endSignal = BITv06_reloadDStream(&bitD1) | BITv06_reloadDStream(&bitD2) | BITv06_reloadDStream(&bitD3) | BITv06_reloadDStream(&bitD4);
-        }
-
-        /* check corruption */
-        if (op1 > opStart2) return ERROR(corruption_detected);
-        if (op2 > opStart3) return ERROR(corruption_detected);
-        if (op3 > opStart4) return ERROR(corruption_detected);
-        /* note : op4 supposed already verified within main loop */
-
-        /* finish bitStreams one by one */
-        HUFv06_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
-        HUFv06_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
-        HUFv06_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
-        HUFv06_decodeStreamX4(op4, &bitD4, oend,     dt, dtLog);
-
-        /* check */
-        endSignal = BITv06_endOfDStream(&bitD1) & BITv06_endOfDStream(&bitD2) & BITv06_endOfDStream(&bitD3) & BITv06_endOfDStream(&bitD4);
-        if (!endSignal) return ERROR(corruption_detected);
-
-        /* decoded size */
-        return dstSize;
-    }
-}
-
-
-size_t HUFv06_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    HUFv06_CREATE_STATIC_DTABLEX4(DTable, HUFv06_MAX_TABLELOG);
-    const BYTE* ip = (const BYTE*) cSrc;
-
-    size_t hSize = HUFv06_readDTableX4 (DTable, cSrc, cSrcSize);
-    if (HUFv06_isError(hSize)) return hSize;
-    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
-    ip += hSize;
-    cSrcSize -= hSize;
-
-    return HUFv06_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
-}
-
-
-
-
-/* ********************************/
-/* Generic decompression selector */
-/* ********************************/
-
-typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
-static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
-{
-    /* single, double, quad */
-    {{0,0}, {1,1}, {2,2}},  /* Q==0 : impossible */
-    {{0,0}, {1,1}, {2,2}},  /* Q==1 : impossible */
-    {{  38,130}, {1313, 74}, {2151, 38}},   /* Q == 2 : 12-18% */
-    {{ 448,128}, {1353, 74}, {2238, 41}},   /* Q == 3 : 18-25% */
-    {{ 556,128}, {1353, 74}, {2238, 47}},   /* Q == 4 : 25-32% */
-    {{ 714,128}, {1418, 74}, {2436, 53}},   /* Q == 5 : 32-38% */
-    {{ 883,128}, {1437, 74}, {2464, 61}},   /* Q == 6 : 38-44% */
-    {{ 897,128}, {1515, 75}, {2622, 68}},   /* Q == 7 : 44-50% */
-    {{ 926,128}, {1613, 75}, {2730, 75}},   /* Q == 8 : 50-56% */
-    {{ 947,128}, {1729, 77}, {3359, 77}},   /* Q == 9 : 56-62% */
-    {{1107,128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */
-    {{1177,128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */
-    {{1242,128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */
-    {{1349,128}, {2644,106}, {5260,106}},   /* Q ==13 : 81-87% */
-    {{1455,128}, {2422,124}, {4174,124}},   /* Q ==14 : 87-93% */
-    {{ 722,128}, {1891,145}, {1936,146}},   /* Q ==15 : 93-99% */
-};
-
-typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
-
-size_t HUFv06_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    static const decompressionAlgo decompress[3] = { HUFv06_decompress4X2, HUFv06_decompress4X4, NULL };
-    U32 Dtime[3];   /* decompression time estimation */
-
-    /* validation checks */
-    if (dstSize == 0) return ERROR(dstSize_tooSmall);
-    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */
-    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */
-    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
-
-    /* decoder timing evaluation */
-    {   U32 const Q = (U32)(cSrcSize * 16 / dstSize);   /* Q < 16 since dstSize > cSrcSize */
-        U32 const D256 = (U32)(dstSize >> 8);
-        U32 n; for (n=0; n<3; n++)
-            Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256);
-    }
-
-    Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */
-
-    {   U32 algoNb = 0;
-        if (Dtime[1] < Dtime[0]) algoNb = 1;
-        // if (Dtime[2] < Dtime[algoNb]) algoNb = 2;   /* current speed of HUFv06_decompress4X6 is not good */
-        return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
-    }
-
-    //return HUFv06_decompress4X2(dst, dstSize, cSrc, cSrcSize);   /* multi-streams single-symbol decoding */
-    //return HUFv06_decompress4X4(dst, dstSize, cSrc, cSrcSize);   /* multi-streams double-symbols decoding */
-    //return HUFv06_decompress4X6(dst, dstSize, cSrc, cSrcSize);   /* multi-streams quad-symbols decoding */
-}
-/*
-    Common functions of Zstd compression library
-    Copyright (C) 2015-2016, Yann Collet.
-
-    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are
-    met:
-    * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-    copyright notice, this list of conditions and the following disclaimer
-    in the documentation and/or other materials provided with the
-    distribution.
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - zstd homepage : http://www.zstd.net/
-*/
-
-
-/*-****************************************
-*  Version
-******************************************/
-
-/*-****************************************
-*  ZSTD Error Management
-******************************************/
-/*! ZSTDv06_isError() :
-*   tells if a return value is an error code */
-unsigned ZSTDv06_isError(size_t code) { return ERR_isError(code); }
-
-/*! ZSTDv06_getErrorName() :
-*   provides error code string from function result (useful for debugging) */
-const char* ZSTDv06_getErrorName(size_t code) { return ERR_getErrorName(code); }
-
-
-/* **************************************************************
-*  ZBUFF Error Management
-****************************************************************/
-unsigned ZBUFFv06_isError(size_t errorCode) { return ERR_isError(errorCode); }
-
-const char* ZBUFFv06_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
-/*
-    zstd - standard compression library
-    Copyright (C) 2014-2016, Yann Collet.
-
-    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are
-    met:
-    * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-    copyright notice, this list of conditions and the following disclaimer
-    in the documentation and/or other materials provided with the
-    distribution.
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - zstd homepage : http://www.zstd.net
-*/
-
-/* ***************************************************************
-*  Tuning parameters
-*****************************************************************/
-/*!
- * HEAPMODE :
- * Select how default decompression function ZSTDv06_decompress() will allocate memory,
- * in memory stack (0), or in memory heap (1, requires malloc())
- */
-#ifndef ZSTDv06_HEAPMODE
-#  define ZSTDv06_HEAPMODE 1
-#endif
-
-
-
-/*-*******************************************************
-*  Compiler specifics
-*********************************************************/
-#ifdef _MSC_VER    /* Visual Studio */
-#  include <intrin.h>                    /* For Visual 2005 */
-#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
-#  pragma warning(disable : 4324)        /* disable: C4324: padded structure */
-#endif
-
-
-/*-*************************************
-*  Macros
-***************************************/
-#define ZSTDv06_isError ERR_isError   /* for inlining */
-#define FSEv06_isError  ERR_isError
-#define HUFv06_isError  ERR_isError
-
-
-/*_*******************************************************
-*  Memory operations
-**********************************************************/
-static void ZSTDv06_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
-
-
-/*-*************************************************************
-*   Context management
-***************************************************************/
-typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
-               ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock } ZSTDv06_dStage;
-
-struct ZSTDv06_DCtx_s
-{
-    FSEv06_DTable LLTable[FSEv06_DTABLE_SIZE_U32(LLFSELog)];
-    FSEv06_DTable OffTable[FSEv06_DTABLE_SIZE_U32(OffFSELog)];
-    FSEv06_DTable MLTable[FSEv06_DTABLE_SIZE_U32(MLFSELog)];
-    unsigned   hufTableX4[HUFv06_DTABLE_SIZE(HufLog)];
-    const void* previousDstEnd;
-    const void* base;
-    const void* vBase;
-    const void* dictEnd;
-    size_t expected;
-    size_t headerSize;
-    ZSTDv06_frameParams fParams;
-    blockType_t bType;   /* used in ZSTDv06_decompressContinue(), to transfer blockType between header decoding and block decoding stages */
-    ZSTDv06_dStage stage;
-    U32 flagRepeatTable;
-    const BYTE* litPtr;
-    size_t litSize;
-    BYTE litBuffer[ZSTDv06_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH];
-    BYTE headerBuffer[ZSTDv06_FRAMEHEADERSIZE_MAX];
-};  /* typedef'd to ZSTDv06_DCtx within "zstd_static.h" */
-
-size_t ZSTDv06_sizeofDCtx (void); /* Hidden declaration */
-size_t ZSTDv06_sizeofDCtx (void) { return sizeof(ZSTDv06_DCtx); }
-
-size_t ZSTDv06_decompressBegin(ZSTDv06_DCtx* dctx)
-{
-    dctx->expected = ZSTDv06_frameHeaderSize_min;
-    dctx->stage = ZSTDds_getFrameHeaderSize;
-    dctx->previousDstEnd = NULL;
-    dctx->base = NULL;
-    dctx->vBase = NULL;
-    dctx->dictEnd = NULL;
-    dctx->hufTableX4[0] = HufLog;
-    dctx->flagRepeatTable = 0;
-    return 0;
-}
-
-ZSTDv06_DCtx* ZSTDv06_createDCtx(void)
-{
-    ZSTDv06_DCtx* dctx = (ZSTDv06_DCtx*)malloc(sizeof(ZSTDv06_DCtx));
-    if (dctx==NULL) return NULL;
-    ZSTDv06_decompressBegin(dctx);
-    return dctx;
-}
-
-size_t ZSTDv06_freeDCtx(ZSTDv06_DCtx* dctx)
-{
-    free(dctx);
-    return 0;   /* reserved as a potential error code in the future */
-}
-
-void ZSTDv06_copyDCtx(ZSTDv06_DCtx* dstDCtx, const ZSTDv06_DCtx* srcDCtx)
-{
-    memcpy(dstDCtx, srcDCtx,
-           sizeof(ZSTDv06_DCtx) - (ZSTDv06_BLOCKSIZE_MAX+WILDCOPY_OVERLENGTH + ZSTDv06_frameHeaderSize_max));  /* no need to copy workspace */
-}
-
-
-/*-*************************************************************
-*   Decompression section
-***************************************************************/
-
-/* Frame format description
-   Frame Header -  [ Block Header - Block ] - Frame End
-   1) Frame Header
-      - 4 bytes - Magic Number : ZSTDv06_MAGICNUMBER (defined within zstd_static.h)
-      - 1 byte  - Frame Descriptor
-   2) Block Header
-      - 3 bytes, starting with a 2-bits descriptor
-                 Uncompressed, Compressed, Frame End, unused
-   3) Block
-      See Block Format Description
-   4) Frame End
-      - 3 bytes, compatible with Block Header
-*/
-
-
-/* Frame descriptor
-
-   1 byte, using :
-   bit 0-3 : windowLog - ZSTDv06_WINDOWLOG_ABSOLUTEMIN   (see zstd_internal.h)
-   bit 4   : minmatch 4(0) or 3(1)
-   bit 5   : reserved (must be zero)
-   bit 6-7 : Frame content size : unknown, 1 byte, 2 bytes, 8 bytes
-
-   Optional : content size (0, 1, 2 or 8 bytes)
-   0 : unknown
-   1 : 0-255 bytes
-   2 : 256 - 65535+256
-   8 : up to 16 exa
-*/
-
-
-/* Compressed Block, format description
-
-   Block = Literal Section - Sequences Section
-   Prerequisite : size of (compressed) block, maximum size of regenerated data
-
-   1) Literal Section
-
-   1.1) Header : 1-5 bytes
-        flags: 2 bits
-            00 compressed by Huff0
-            01 unused
-            10 is Raw (uncompressed)
-            11 is Rle
-            Note : using 01 => Huff0 with precomputed table ?
-            Note : delta map ? => compressed ?
-
-   1.1.1) Huff0-compressed literal block : 3-5 bytes
-            srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream
-            srcSize < 1 KB => 3 bytes (2-2-10-10)
-            srcSize < 16KB => 4 bytes (2-2-14-14)
-            else           => 5 bytes (2-2-18-18)
-            big endian convention
-
-   1.1.2) Raw (uncompressed) literal block header : 1-3 bytes
-        size :  5 bits: (IS_RAW<<6) + (0<<4) + size
-               12 bits: (IS_RAW<<6) + (2<<4) + (size>>8)
-                        size&255
-               20 bits: (IS_RAW<<6) + (3<<4) + (size>>16)
-                        size>>8&255
-                        size&255
-
-   1.1.3) Rle (repeated single byte) literal block header : 1-3 bytes
-        size :  5 bits: (IS_RLE<<6) + (0<<4) + size
-               12 bits: (IS_RLE<<6) + (2<<4) + (size>>8)
-                        size&255
-               20 bits: (IS_RLE<<6) + (3<<4) + (size>>16)
-                        size>>8&255
-                        size&255
-
-   1.1.4) Huff0-compressed literal block, using precomputed CTables : 3-5 bytes
-            srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream
-            srcSize < 1 KB => 3 bytes (2-2-10-10)
-            srcSize < 16KB => 4 bytes (2-2-14-14)
-            else           => 5 bytes (2-2-18-18)
-            big endian convention
-
-        1- CTable available (stored into workspace ?)
-        2- Small input (fast heuristic ? Full comparison ? depend on clevel ?)
-
-
-   1.2) Literal block content
-
-   1.2.1) Huff0 block, using sizes from header
-        See Huff0 format
-
-   1.2.2) Huff0 block, using prepared table
-
-   1.2.3) Raw content
-
-   1.2.4) single byte
-
-
-   2) Sequences section
-      TO DO
-*/
-
-/** ZSTDv06_frameHeaderSize() :
-*   srcSize must be >= ZSTDv06_frameHeaderSize_min.
-*   @return : size of the Frame Header */
-static size_t ZSTDv06_frameHeaderSize(const void* src, size_t srcSize)
-{
-    if (srcSize < ZSTDv06_frameHeaderSize_min) return ERROR(srcSize_wrong);
-    { U32 const fcsId = (((const BYTE*)src)[4]) >> 6;
-      return ZSTDv06_frameHeaderSize_min + ZSTDv06_fcs_fieldSize[fcsId]; }
-}
-
-
-/** ZSTDv06_getFrameParams() :
-*   decode Frame Header, or provide expected `srcSize`.
-*   @return : 0, `fparamsPtr` is correctly filled,
-*            >0, `srcSize` is too small, result is expected `srcSize`,
-*             or an error code, which can be tested using ZSTDv06_isError() */
-size_t ZSTDv06_getFrameParams(ZSTDv06_frameParams* fparamsPtr, const void* src, size_t srcSize)
-{
-    const BYTE* ip = (const BYTE*)src;
-
-    if (srcSize < ZSTDv06_frameHeaderSize_min) return ZSTDv06_frameHeaderSize_min;
-    if (MEM_readLE32(src) != ZSTDv06_MAGICNUMBER) return ERROR(prefix_unknown);
-
-    /* ensure there is enough `srcSize` to fully read/decode frame header */
-    { size_t const fhsize = ZSTDv06_frameHeaderSize(src, srcSize);
-      if (srcSize < fhsize) return fhsize; }
-
-    memset(fparamsPtr, 0, sizeof(*fparamsPtr));
-    {   BYTE const frameDesc = ip[4];
-        fparamsPtr->windowLog = (frameDesc & 0xF) + ZSTDv06_WINDOWLOG_ABSOLUTEMIN;
-        if ((frameDesc & 0x20) != 0) return ERROR(frameParameter_unsupported);   /* reserved 1 bit */
-        switch(frameDesc >> 6)  /* fcsId */
-        {
-            default:   /* impossible */
-            case 0 : fparamsPtr->frameContentSize = 0; break;
-            case 1 : fparamsPtr->frameContentSize = ip[5]; break;
-            case 2 : fparamsPtr->frameContentSize = MEM_readLE16(ip+5)+256; break;
-            case 3 : fparamsPtr->frameContentSize = MEM_readLE64(ip+5); break;
-    }   }
-    return 0;
-}
-
-
-/** ZSTDv06_decodeFrameHeader() :
-*   `srcSize` must be the size provided by ZSTDv06_frameHeaderSize().
-*   @return : 0 if success, or an error code, which can be tested using ZSTDv06_isError() */
-static size_t ZSTDv06_decodeFrameHeader(ZSTDv06_DCtx* zc, const void* src, size_t srcSize)
-{
-    size_t const result = ZSTDv06_getFrameParams(&(zc->fParams), src, srcSize);
-    if ((MEM_32bits()) && (zc->fParams.windowLog > 25)) return ERROR(frameParameter_unsupported);
-    return result;
-}
-
-
-typedef struct
-{
-    blockType_t blockType;
-    U32 origSize;
-} blockProperties_t;
-
-/*! ZSTDv06_getcBlockSize() :
-*   Provides the size of compressed block from block header `src` */
-static size_t ZSTDv06_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
-{
-    const BYTE* const in = (const BYTE* const)src;
-    U32 cSize;
-
-    if (srcSize < ZSTDv06_blockHeaderSize) return ERROR(srcSize_wrong);
-
-    bpPtr->blockType = (blockType_t)((*in) >> 6);
-    cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
-    bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
-
-    if (bpPtr->blockType == bt_end) return 0;
-    if (bpPtr->blockType == bt_rle) return 1;
-    return cSize;
-}
-
-
-static size_t ZSTDv06_copyRawBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
-{
-    if (dst==NULL) return ERROR(dstSize_tooSmall);
-    if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall);
-    memcpy(dst, src, srcSize);
-    return srcSize;
-}
-
-
-/*! ZSTDv06_decodeLiteralsBlock() :
-    @return : nb of bytes read from src (< srcSize ) */
-static size_t ZSTDv06_decodeLiteralsBlock(ZSTDv06_DCtx* dctx,
-                          const void* src, size_t srcSize)   /* note : srcSize < BLOCKSIZE */
-{
-    const BYTE* const istart = (const BYTE*) src;
-
-    /* any compressed block with literals segment must be at least this size */
-    if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
-
-    switch(istart[0]>> 6)
-    {
-    case IS_HUF:
-        {   size_t litSize, litCSize, singleStream=0;
-            U32 lhSize = ((istart[0]) >> 4) & 3;
-            if (srcSize < 5) return ERROR(corruption_detected);   /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for lhSize, + cSize (+nbSeq) */
-            switch(lhSize)
-            {
-            case 0: case 1: default:   /* note : default is impossible, since lhSize into [0..3] */
-                /* 2 - 2 - 10 - 10 */
-                lhSize=3;
-                singleStream = istart[0] & 16;
-                litSize  = ((istart[0] & 15) << 6) + (istart[1] >> 2);
-                litCSize = ((istart[1] &  3) << 8) + istart[2];
-                break;
-            case 2:
-                /* 2 - 2 - 14 - 14 */
-                lhSize=4;
-                litSize  = ((istart[0] & 15) << 10) + (istart[1] << 2) + (istart[2] >> 6);
-                litCSize = ((istart[2] & 63) <<  8) + istart[3];
-                break;
-            case 3:
-                /* 2 - 2 - 18 - 18 */
-                lhSize=5;
-                litSize  = ((istart[0] & 15) << 14) + (istart[1] << 6) + (istart[2] >> 2);
-                litCSize = ((istart[2] &  3) << 16) + (istart[3] << 8) + istart[4];
-                break;
-            }
-            if (litSize > ZSTDv06_BLOCKSIZE_MAX) return ERROR(corruption_detected);
-            if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
-
-            if (HUFv06_isError(singleStream ?
-                            HUFv06_decompress1X2(dctx->litBuffer, litSize, istart+lhSize, litCSize) :
-                            HUFv06_decompress   (dctx->litBuffer, litSize, istart+lhSize, litCSize) ))
-                return ERROR(corruption_detected);
-
-            dctx->litPtr = dctx->litBuffer;
-            dctx->litSize = litSize;
-            memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
-            return litCSize + lhSize;
-        }
-    case IS_PCH:
-        {   size_t litSize, litCSize;
-            U32 lhSize = ((istart[0]) >> 4) & 3;
-            if (lhSize != 1)  /* only case supported for now : small litSize, single stream */
-                return ERROR(corruption_detected);
-            if (!dctx->flagRepeatTable)
-                return ERROR(dictionary_corrupted);
-
-            /* 2 - 2 - 10 - 10 */
-            lhSize=3;
-            litSize  = ((istart[0] & 15) << 6) + (istart[1] >> 2);
-            litCSize = ((istart[1] &  3) << 8) + istart[2];
-            if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
-
-            {   size_t const errorCode = HUFv06_decompress1X4_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->hufTableX4);
-                if (HUFv06_isError(errorCode)) return ERROR(corruption_detected);
-            }
-            dctx->litPtr = dctx->litBuffer;
-            dctx->litSize = litSize;
-            memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
-            return litCSize + lhSize;
-        }
-    case IS_RAW:
-        {   size_t litSize;
-            U32 lhSize = ((istart[0]) >> 4) & 3;
-            switch(lhSize)
-            {
-            case 0: case 1: default:   /* note : default is impossible, since lhSize into [0..3] */
-                lhSize=1;
-                litSize = istart[0] & 31;
-                break;
-            case 2:
-                litSize = ((istart[0] & 15) << 8) + istart[1];
-                break;
-            case 3:
-                litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];
-                break;
-            }
-
-            if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) {  /* risk reading beyond src buffer with wildcopy */
-                if (litSize+lhSize > srcSize) return ERROR(corruption_detected);
-                memcpy(dctx->litBuffer, istart+lhSize, litSize);
-                dctx->litPtr = dctx->litBuffer;
-                dctx->litSize = litSize;
-                memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
-                return lhSize+litSize;
-            }
-            /* direct reference into compressed stream */
-            dctx->litPtr = istart+lhSize;
-            dctx->litSize = litSize;
-            return lhSize+litSize;
-        }
-    case IS_RLE:
-        {   size_t litSize;
-            U32 lhSize = ((istart[0]) >> 4) & 3;
-            switch(lhSize)
-            {
-            case 0: case 1: default:   /* note : default is impossible, since lhSize into [0..3] */
-                lhSize = 1;
-                litSize = istart[0] & 31;
-                break;
-            case 2:
-                litSize = ((istart[0] & 15) << 8) + istart[1];
-                break;
-            case 3:
-                litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];
-                if (srcSize<4) return ERROR(corruption_detected);   /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
-                break;
-            }
-            if (litSize > ZSTDv06_BLOCKSIZE_MAX) return ERROR(corruption_detected);
-            memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
-            dctx->litPtr = dctx->litBuffer;
-            dctx->litSize = litSize;
-            return lhSize+1;
-        }
-    default:
-        return ERROR(corruption_detected);   /* impossible */
-    }
-}
-
-
-/*! ZSTDv06_buildSeqTable() :
-    @return : nb bytes read from src,
-              or an error code if it fails, testable with ZSTDv06_isError()
-*/
-static size_t ZSTDv06_buildSeqTable(FSEv06_DTable* DTable, U32 type, U32 max, U32 maxLog,
-                                 const void* src, size_t srcSize,
-                                 const S16* defaultNorm, U32 defaultLog, U32 flagRepeatTable)
-{
-    switch(type)
-    {
-    case FSEv06_ENCODING_RLE :
-        if (!srcSize) return ERROR(srcSize_wrong);
-        if ( (*(const BYTE*)src) > max) return ERROR(corruption_detected);
-        FSEv06_buildDTable_rle(DTable, *(const BYTE*)src);   /* if *src > max, data is corrupted */
-        return 1;
-    case FSEv06_ENCODING_RAW :
-        FSEv06_buildDTable(DTable, defaultNorm, max, defaultLog);
-        return 0;
-    case FSEv06_ENCODING_STATIC:
-        if (!flagRepeatTable) return ERROR(corruption_detected);
-        return 0;
-    default :   /* impossible */
-    case FSEv06_ENCODING_DYNAMIC :
-        {   U32 tableLog;
-            S16 norm[MaxSeq+1];
-            size_t const headerSize = FSEv06_readNCount(norm, &max, &tableLog, src, srcSize);
-            if (FSEv06_isError(headerSize)) return ERROR(corruption_detected);
-            if (tableLog > maxLog) return ERROR(corruption_detected);
-            FSEv06_buildDTable(DTable, norm, max, tableLog);
-            return headerSize;
-    }   }
-}
-
-
-static size_t ZSTDv06_decodeSeqHeaders(int* nbSeqPtr,
-                             FSEv06_DTable* DTableLL, FSEv06_DTable* DTableML, FSEv06_DTable* DTableOffb, U32 flagRepeatTable,
-                             const void* src, size_t srcSize)
-{
-    const BYTE* const istart = (const BYTE* const)src;
-    const BYTE* const iend = istart + srcSize;
-    const BYTE* ip = istart;
-
-    /* check */
-    if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong);
-
-    /* SeqHead */
-    {   int nbSeq = *ip++;
-        if (!nbSeq) { *nbSeqPtr=0; return 1; }
-        if (nbSeq > 0x7F) {
-            if (nbSeq == 0xFF) {
-                if (ip+2 > iend) return ERROR(srcSize_wrong);
-                nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
-            } else {
-                if (ip >= iend) return ERROR(srcSize_wrong);
-                nbSeq = ((nbSeq-0x80)<<8) + *ip++;
-            }
-        }
-        *nbSeqPtr = nbSeq;
-    }
-
-    /* FSE table descriptors */
-    if (ip + 4 > iend) return ERROR(srcSize_wrong); /* min : header byte + all 3 are "raw", hence no header, but at least xxLog bits per type */
-    {   U32 const LLtype  = *ip >> 6;
-        U32 const Offtype = (*ip >> 4) & 3;
-        U32 const MLtype  = (*ip >> 2) & 3;
-        ip++;
-
-        /* Build DTables */
-        {   size_t const bhSize = ZSTDv06_buildSeqTable(DTableLL, LLtype, MaxLL, LLFSELog, ip, iend-ip, LL_defaultNorm, LL_defaultNormLog, flagRepeatTable);
-            if (ZSTDv06_isError(bhSize)) return ERROR(corruption_detected);
-            ip += bhSize;
-        }
-        {   size_t const bhSize = ZSTDv06_buildSeqTable(DTableOffb, Offtype, MaxOff, OffFSELog, ip, iend-ip, OF_defaultNorm, OF_defaultNormLog, flagRepeatTable);
-            if (ZSTDv06_isError(bhSize)) return ERROR(corruption_detected);
-            ip += bhSize;
-        }
-        {   size_t const bhSize = ZSTDv06_buildSeqTable(DTableML, MLtype, MaxML, MLFSELog, ip, iend-ip, ML_defaultNorm, ML_defaultNormLog, flagRepeatTable);
-            if (ZSTDv06_isError(bhSize)) return ERROR(corruption_detected);
-            ip += bhSize;
-    }   }
-
-    return ip-istart;
-}
-
-
-typedef struct {
-    size_t litLength;
-    size_t matchLength;
-    size_t offset;
-} seq_t;
-
-typedef struct {
-    BITv06_DStream_t DStream;
-    FSEv06_DState_t stateLL;
-    FSEv06_DState_t stateOffb;
-    FSEv06_DState_t stateML;
-    size_t prevOffset[ZSTDv06_REP_INIT];
-} seqState_t;
-
-
-
-static void ZSTDv06_decodeSequence(seq_t* seq, seqState_t* seqState)
-{
-    /* Literal length */
-    U32 const llCode = FSEv06_peekSymbol(&(seqState->stateLL));
-    U32 const mlCode = FSEv06_peekSymbol(&(seqState->stateML));
-    U32 const ofCode = FSEv06_peekSymbol(&(seqState->stateOffb));   /* <= maxOff, by table construction */
-
-    U32 const llBits = LL_bits[llCode];
-    U32 const mlBits = ML_bits[mlCode];
-    U32 const ofBits = ofCode;
-    U32 const totalBits = llBits+mlBits+ofBits;
-
-    static const U32 LL_base[MaxLL+1] = {
-                             0,  1,  2,  3,  4,  5,  6,  7,  8,  9,   10,    11,    12,    13,    14,     15,
-                            16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
-                            0x2000, 0x4000, 0x8000, 0x10000 };
-
-    static const U32 ML_base[MaxML+1] = {
-                             0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10,   11,    12,    13,    14,    15,
-                            16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,   27,    28,    29,    30,    31,
-                            32, 34, 36, 38, 40, 44, 48, 56, 64, 80, 96, 0x80, 0x100, 0x200, 0x400, 0x800,
-                            0x1000, 0x2000, 0x4000, 0x8000, 0x10000 };
-
-    static const U32 OF_base[MaxOff+1] = {
-                 0,        1,       3,       7,     0xF,     0x1F,     0x3F,     0x7F,
-                 0xFF,   0x1FF,   0x3FF,   0x7FF,   0xFFF,   0x1FFF,   0x3FFF,   0x7FFF,
-                 0xFFFF, 0x1FFFF, 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF,
-                 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, /*fake*/ 1, 1 };
-
-    /* sequence */
-    {   size_t offset;
-        if (!ofCode)
-            offset = 0;
-        else {
-            offset = OF_base[ofCode] + BITv06_readBits(&(seqState->DStream), ofBits);   /* <=  26 bits */
-            if (MEM_32bits()) BITv06_reloadDStream(&(seqState->DStream));
-        }
-
-        if (offset < ZSTDv06_REP_NUM) {
-            if (llCode == 0 && offset <= 1) offset = 1-offset;
-
-            if (offset != 0) {
-                size_t temp = seqState->prevOffset[offset];
-                if (offset != 1) {
-                    seqState->prevOffset[2] = seqState->prevOffset[1];
-                }
-                seqState->prevOffset[1] = seqState->prevOffset[0];
-                seqState->prevOffset[0] = offset = temp;
-
-            } else {
-                offset = seqState->prevOffset[0];
-            }
-        } else {
-            offset -= ZSTDv06_REP_MOVE;
-            seqState->prevOffset[2] = seqState->prevOffset[1];
-            seqState->prevOffset[1] = seqState->prevOffset[0];
-            seqState->prevOffset[0] = offset;
-        }
-        seq->offset = offset;
-    }
-
-    seq->matchLength = ML_base[mlCode] + MINMATCH + ((mlCode>31) ? BITv06_readBits(&(seqState->DStream), mlBits) : 0);   /* <=  16 bits */
-    if (MEM_32bits() && (mlBits+llBits>24)) BITv06_reloadDStream(&(seqState->DStream));
-
-    seq->litLength = LL_base[llCode] + ((llCode>15) ? BITv06_readBits(&(seqState->DStream), llBits) : 0);   /* <=  16 bits */
-    if (MEM_32bits() ||
-       (totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BITv06_reloadDStream(&(seqState->DStream));
-
-    /* ANS state update */
-    FSEv06_updateState(&(seqState->stateLL), &(seqState->DStream));   /* <=  9 bits */
-    FSEv06_updateState(&(seqState->stateML), &(seqState->DStream));   /* <=  9 bits */
-    if (MEM_32bits()) BITv06_reloadDStream(&(seqState->DStream));     /* <= 18 bits */
-    FSEv06_updateState(&(seqState->stateOffb), &(seqState->DStream)); /* <=  8 bits */
-}
-
-
-static size_t ZSTDv06_execSequence(BYTE* op,
-                                BYTE* const oend, seq_t sequence,
-                                const BYTE** litPtr, const BYTE* const litLimit,
-                                const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
-{
-    BYTE* const oLitEnd = op + sequence.litLength;
-    size_t const sequenceLength = sequence.litLength + sequence.matchLength;
-    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
-    BYTE* const oend_8 = oend-8;
-    const BYTE* const iLitEnd = *litPtr + sequence.litLength;
-    const BYTE* match = oLitEnd - sequence.offset;
-
-    /* check */
-    if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall);   /* last match must start at a minimum distance of 8 from oend */
-    if (oMatchEnd > oend) return ERROR(dstSize_tooSmall);   /* overwrite beyond dst buffer */
-    if (iLitEnd > litLimit) return ERROR(corruption_detected);   /* over-read beyond lit buffer */
-
-    /* copy Literals */
-    ZSTDv06_wildcopy(op, *litPtr, sequence.litLength);   /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
-    op = oLitEnd;
-    *litPtr = iLitEnd;   /* update for next sequence */
-
-    /* copy Match */
-    if (sequence.offset > (size_t)(oLitEnd - base)) {
-        /* offset beyond prefix */
-        if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);
-        match = dictEnd - (base-match);
-        if (match + sequence.matchLength <= dictEnd) {
-            memmove(oLitEnd, match, sequence.matchLength);
-            return sequenceLength;
-        }
-        /* span extDict & currentPrefixSegment */
-        {   size_t const length1 = dictEnd - match;
-            memmove(oLitEnd, match, length1);
-            op = oLitEnd + length1;
-            sequence.matchLength -= length1;
-            match = base;
-            if (op > oend_8 || sequence.matchLength < MINMATCH) {
-              while (op < oMatchEnd) *op++ = *match++;
-              return sequenceLength;
-            }
-    }   }
-    /* Requirement: op <= oend_8 */
-
-    /* match within prefix */
-    if (sequence.offset < 8) {
-        /* close range match, overlap */
-        static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
-        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */
-        int const sub2 = dec64table[sequence.offset];
-        op[0] = match[0];
-        op[1] = match[1];
-        op[2] = match[2];
-        op[3] = match[3];
-        match += dec32table[sequence.offset];
-        ZSTDv06_copy4(op+4, match);
-        match -= sub2;
-    } else {
-        ZSTDv06_copy8(op, match);
-    }
-    op += 8; match += 8;
-
-    if (oMatchEnd > oend-(16-MINMATCH)) {
-        if (op < oend_8) {
-            ZSTDv06_wildcopy(op, match, oend_8 - op);
-            match += oend_8 - op;
-            op = oend_8;
-        }
-        while (op < oMatchEnd) *op++ = *match++;
-    } else {
-        ZSTDv06_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8 */
-    }
-    return sequenceLength;
-}
-
-
-static size_t ZSTDv06_decompressSequences(
-                               ZSTDv06_DCtx* dctx,
-                               void* dst, size_t maxDstSize,
-                         const void* seqStart, size_t seqSize)
-{
-    const BYTE* ip = (const BYTE*)seqStart;
-    const BYTE* const iend = ip + seqSize;
-    BYTE* const ostart = (BYTE* const)dst;
-    BYTE* const oend = ostart + maxDstSize;
-    BYTE* op = ostart;
-    const BYTE* litPtr = dctx->litPtr;
-    const BYTE* const litEnd = litPtr + dctx->litSize;
-    FSEv06_DTable* DTableLL = dctx->LLTable;
-    FSEv06_DTable* DTableML = dctx->MLTable;
-    FSEv06_DTable* DTableOffb = dctx->OffTable;
-    const BYTE* const base = (const BYTE*) (dctx->base);
-    const BYTE* const vBase = (const BYTE*) (dctx->vBase);
-    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
-    int nbSeq;
-
-    /* Build Decoding Tables */
-    {   size_t const seqHSize = ZSTDv06_decodeSeqHeaders(&nbSeq, DTableLL, DTableML, DTableOffb, dctx->flagRepeatTable, ip, seqSize);
-        if (ZSTDv06_isError(seqHSize)) return seqHSize;
-        ip += seqHSize;
-        dctx->flagRepeatTable = 0;
-    }
-
-    /* Regen sequences */
-    if (nbSeq) {
-        seq_t sequence;
-        seqState_t seqState;
-
-        memset(&sequence, 0, sizeof(sequence));
-        sequence.offset = REPCODE_STARTVALUE;
-        { U32 i; for (i=0; i<ZSTDv06_REP_INIT; i++) seqState.prevOffset[i] = REPCODE_STARTVALUE; }
-        { size_t const errorCode = BITv06_initDStream(&(seqState.DStream), ip, iend-ip);
-          if (ERR_isError(errorCode)) return ERROR(corruption_detected); }
-        FSEv06_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
-        FSEv06_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
-        FSEv06_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
-
-        for ( ; (BITv06_reloadDStream(&(seqState.DStream)) <= BITv06_DStream_completed) && nbSeq ; ) {
-            nbSeq--;
-            ZSTDv06_decodeSequence(&sequence, &seqState);
-
-#if 0  /* debug */
-            static BYTE* start = NULL;
-            if (start==NULL) start = op;
-            size_t pos = (size_t)(op-start);
-            if ((pos >= 5810037) && (pos < 5810400))
-                printf("Dpos %6u :%5u literals & match %3u bytes at distance %6u \n",
-                       pos, (U32)sequence.litLength, (U32)sequence.matchLength, (U32)sequence.offset);
-#endif
-
-            {   size_t const oneSeqSize = ZSTDv06_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
-                if (ZSTDv06_isError(oneSeqSize)) return oneSeqSize;
-                op += oneSeqSize;
-        }   }
-
-        /* check if reached exact end */
-        if (nbSeq) return ERROR(corruption_detected);
-    }
-
-    /* last literal segment */
-    {   size_t const lastLLSize = litEnd - litPtr;
-        if (litPtr > litEnd) return ERROR(corruption_detected);   /* too many literals already used */
-        if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
-        memcpy(op, litPtr, lastLLSize);
-        op += lastLLSize;
-    }
-
-    return op-ostart;
-}
-
-
-static void ZSTDv06_checkContinuity(ZSTDv06_DCtx* dctx, const void* dst)
-{
-    if (dst != dctx->previousDstEnd) {   /* not contiguous */
-        dctx->dictEnd = dctx->previousDstEnd;
-        dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
-        dctx->base = dst;
-        dctx->previousDstEnd = dst;
-    }
-}
-
-
-static size_t ZSTDv06_decompressBlock_internal(ZSTDv06_DCtx* dctx,
-                            void* dst, size_t dstCapacity,
-                      const void* src, size_t srcSize)
-{   /* blockType == blockCompressed */
-    const BYTE* ip = (const BYTE*)src;
-
-    if (srcSize >= ZSTDv06_BLOCKSIZE_MAX) return ERROR(srcSize_wrong);
-
-    /* Decode literals sub-block */
-    {   size_t const litCSize = ZSTDv06_decodeLiteralsBlock(dctx, src, srcSize);
-        if (ZSTDv06_isError(litCSize)) return litCSize;
-        ip += litCSize;
-        srcSize -= litCSize;
-    }
-    return ZSTDv06_decompressSequences(dctx, dst, dstCapacity, ip, srcSize);
-}
-
-
-size_t ZSTDv06_decompressBlock(ZSTDv06_DCtx* dctx,
-                            void* dst, size_t dstCapacity,
-                      const void* src, size_t srcSize)
-{
-    ZSTDv06_checkContinuity(dctx, dst);
-    return ZSTDv06_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
-}
-
-
-/*! ZSTDv06_decompressFrame() :
-*   `dctx` must be properly initialized */
-static size_t ZSTDv06_decompressFrame(ZSTDv06_DCtx* dctx,
-                                 void* dst, size_t dstCapacity,
-                                 const void* src, size_t srcSize)
-{
-    const BYTE* ip = (const BYTE*)src;
-    const BYTE* const iend = ip + srcSize;
-    BYTE* const ostart = (BYTE* const)dst;
-    BYTE* op = ostart;
-    BYTE* const oend = ostart + dstCapacity;
-    size_t remainingSize = srcSize;
-    blockProperties_t blockProperties = { bt_compressed, 0 };
-
-    /* check */
-    if (srcSize < ZSTDv06_frameHeaderSize_min+ZSTDv06_blockHeaderSize) return ERROR(srcSize_wrong);
-
-    /* Frame Header */
-    {   size_t const frameHeaderSize = ZSTDv06_frameHeaderSize(src, ZSTDv06_frameHeaderSize_min);
-        if (ZSTDv06_isError(frameHeaderSize)) return frameHeaderSize;
-        if (srcSize < frameHeaderSize+ZSTDv06_blockHeaderSize) return ERROR(srcSize_wrong);
-        if (ZSTDv06_decodeFrameHeader(dctx, src, frameHeaderSize)) return ERROR(corruption_detected);
-        ip += frameHeaderSize; remainingSize -= frameHeaderSize;
-    }
-
-    /* Loop on each block */
-    while (1) {
-        size_t decodedSize=0;
-        size_t const cBlockSize = ZSTDv06_getcBlockSize(ip, iend-ip, &blockProperties);
-        if (ZSTDv06_isError(cBlockSize)) return cBlockSize;
-
-        ip += ZSTDv06_blockHeaderSize;
-        remainingSize -= ZSTDv06_blockHeaderSize;
-        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
-
-        switch(blockProperties.blockType)
-        {
-        case bt_compressed:
-            decodedSize = ZSTDv06_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize);
-            break;
-        case bt_raw :
-            decodedSize = ZSTDv06_copyRawBlock(op, oend-op, ip, cBlockSize);
-            break;
-        case bt_rle :
-            return ERROR(GENERIC);   /* not yet supported */
-            break;
-        case bt_end :
-            /* end of frame */
-            if (remainingSize) return ERROR(srcSize_wrong);
-            break;
-        default:
-            return ERROR(GENERIC);   /* impossible */
-        }
-        if (cBlockSize == 0) break;   /* bt_end */
-
-        if (ZSTDv06_isError(decodedSize)) return decodedSize;
-        op += decodedSize;
-        ip += cBlockSize;
-        remainingSize -= cBlockSize;
-    }
-
-    return op-ostart;
-}
-
-
-size_t ZSTDv06_decompress_usingPreparedDCtx(ZSTDv06_DCtx* dctx, const ZSTDv06_DCtx* refDCtx,
-                                         void* dst, size_t dstCapacity,
-                                   const void* src, size_t srcSize)
-{
-    ZSTDv06_copyDCtx(dctx, refDCtx);
-    ZSTDv06_checkContinuity(dctx, dst);
-    return ZSTDv06_decompressFrame(dctx, dst, dstCapacity, src, srcSize);
-}
-
-
-size_t ZSTDv06_decompress_usingDict(ZSTDv06_DCtx* dctx,
-                                 void* dst, size_t dstCapacity,
-                                 const void* src, size_t srcSize,
-                                 const void* dict, size_t dictSize)
-{
-    ZSTDv06_decompressBegin_usingDict(dctx, dict, dictSize);
-    ZSTDv06_checkContinuity(dctx, dst);
-    return ZSTDv06_decompressFrame(dctx, dst, dstCapacity, src, srcSize);
-}
-
-
-size_t ZSTDv06_decompressDCtx(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
-{
-    return ZSTDv06_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0);
-}
-
-
-size_t ZSTDv06_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
-{
-#if defined(ZSTDv06_HEAPMODE) && (ZSTDv06_HEAPMODE==1)
-    size_t regenSize;
-    ZSTDv06_DCtx* dctx = ZSTDv06_createDCtx();
-    if (dctx==NULL) return ERROR(memory_allocation);
-    regenSize = ZSTDv06_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
-    ZSTDv06_freeDCtx(dctx);
-    return regenSize;
-#else   /* stack mode */
-    ZSTDv06_DCtx dctx;
-    return ZSTDv06_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize);
-#endif
-}
-
-/* ZSTD_errorFrameSizeInfoLegacy() :
-   assumes `cSize` and `dBound` are _not_ NULL */
-static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
-{
-    *cSize = ret;
-    *dBound = ZSTD_CONTENTSIZE_ERROR;
-}
-
-void ZSTDv06_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
-{
-    const BYTE* ip = (const BYTE*)src;
-    size_t remainingSize = srcSize;
-    size_t nbBlocks = 0;
-    blockProperties_t blockProperties = { bt_compressed, 0 };
-
-    /* Frame Header */
-    {   size_t const frameHeaderSize = ZSTDv06_frameHeaderSize(src, srcSize);
-        if (ZSTDv06_isError(frameHeaderSize)) {
-            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, frameHeaderSize);
-            return;
-        }
-        if (MEM_readLE32(src) != ZSTDv06_MAGICNUMBER) {
-            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
-            return;
-        }
-        if (srcSize < frameHeaderSize+ZSTDv06_blockHeaderSize) {
-            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
-            return;
-        }
-        ip += frameHeaderSize; remainingSize -= frameHeaderSize;
-    }
-
-    /* Loop on each block */
-    while (1) {
-        size_t const cBlockSize = ZSTDv06_getcBlockSize(ip, remainingSize, &blockProperties);
-        if (ZSTDv06_isError(cBlockSize)) {
-            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
-            return;
-        }
-
-        ip += ZSTDv06_blockHeaderSize;
-        remainingSize -= ZSTDv06_blockHeaderSize;
-        if (cBlockSize > remainingSize) {
-            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
-            return;
-        }
-
-        if (cBlockSize == 0) break;   /* bt_end */
-
-        ip += cBlockSize;
-        remainingSize -= cBlockSize;
-        nbBlocks++;
-    }
-
-    *cSize = ip - (const BYTE*)src;
-    *dBound = nbBlocks * ZSTDv06_BLOCKSIZE_MAX;
-}
-
-/*_******************************
-*  Streaming Decompression API
-********************************/
-size_t ZSTDv06_nextSrcSizeToDecompress(ZSTDv06_DCtx* dctx)
-{
-    return dctx->expected;
-}
-
-size_t ZSTDv06_decompressContinue(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
-{
-    /* Sanity check */
-    if (srcSize != dctx->expected) return ERROR(srcSize_wrong);
-    if (dstCapacity) ZSTDv06_checkContinuity(dctx, dst);
-
-    /* Decompress : frame header; part 1 */
-    switch (dctx->stage)
-    {
-    case ZSTDds_getFrameHeaderSize :
-        if (srcSize != ZSTDv06_frameHeaderSize_min) return ERROR(srcSize_wrong);   /* impossible */
-        dctx->headerSize = ZSTDv06_frameHeaderSize(src, ZSTDv06_frameHeaderSize_min);
-        if (ZSTDv06_isError(dctx->headerSize)) return dctx->headerSize;
-        memcpy(dctx->headerBuffer, src, ZSTDv06_frameHeaderSize_min);
-        if (dctx->headerSize > ZSTDv06_frameHeaderSize_min) {
-            dctx->expected = dctx->headerSize - ZSTDv06_frameHeaderSize_min;
-            dctx->stage = ZSTDds_decodeFrameHeader;
-            return 0;
-        }
-        dctx->expected = 0;   /* not necessary to copy more */
-	/* fall-through */
-    case ZSTDds_decodeFrameHeader:
-        {   size_t result;
-            memcpy(dctx->headerBuffer + ZSTDv06_frameHeaderSize_min, src, dctx->expected);
-            result = ZSTDv06_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize);
-            if (ZSTDv06_isError(result)) return result;
-            dctx->expected = ZSTDv06_blockHeaderSize;
-            dctx->stage = ZSTDds_decodeBlockHeader;
-            return 0;
-        }
-    case ZSTDds_decodeBlockHeader:
-        {   blockProperties_t bp;
-            size_t const cBlockSize = ZSTDv06_getcBlockSize(src, ZSTDv06_blockHeaderSize, &bp);
-            if (ZSTDv06_isError(cBlockSize)) return cBlockSize;
-            if (bp.blockType == bt_end) {
-                dctx->expected = 0;
-                dctx->stage = ZSTDds_getFrameHeaderSize;
-            } else {
-                dctx->expected = cBlockSize;
-                dctx->bType = bp.blockType;
-                dctx->stage = ZSTDds_decompressBlock;
-            }
-            return 0;
-        }
-    case ZSTDds_decompressBlock:
-        {   size_t rSize;
-            switch(dctx->bType)
-            {
-            case bt_compressed:
-                rSize = ZSTDv06_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
-                break;
-            case bt_raw :
-                rSize = ZSTDv06_copyRawBlock(dst, dstCapacity, src, srcSize);
-                break;
-            case bt_rle :
-                return ERROR(GENERIC);   /* not yet handled */
-                break;
-            case bt_end :   /* should never happen (filtered at phase 1) */
-                rSize = 0;
-                break;
-            default:
-                return ERROR(GENERIC);   /* impossible */
-            }
-            dctx->stage = ZSTDds_decodeBlockHeader;
-            dctx->expected = ZSTDv06_blockHeaderSize;
-            dctx->previousDstEnd = (char*)dst + rSize;
-            return rSize;
-        }
-    default:
-        return ERROR(GENERIC);   /* impossible */
-    }
-}
-
-
-static void ZSTDv06_refDictContent(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize)
-{
-    dctx->dictEnd = dctx->previousDstEnd;
-    dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
-    dctx->base = dict;
-    dctx->previousDstEnd = (const char*)dict + dictSize;
-}
-
-static size_t ZSTDv06_loadEntropy(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize)
-{
-    size_t hSize, offcodeHeaderSize, matchlengthHeaderSize, litlengthHeaderSize;
-
-    hSize = HUFv06_readDTableX4(dctx->hufTableX4, dict, dictSize);
-    if (HUFv06_isError(hSize)) return ERROR(dictionary_corrupted);
-    dict = (const char*)dict + hSize;
-    dictSize -= hSize;
-
-    {   short offcodeNCount[MaxOff+1];
-        U32 offcodeMaxValue=MaxOff, offcodeLog;
-        offcodeHeaderSize = FSEv06_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dict, dictSize);
-        if (FSEv06_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
-        if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
-        { size_t const errorCode = FSEv06_buildDTable(dctx->OffTable, offcodeNCount, offcodeMaxValue, offcodeLog);
-          if (FSEv06_isError(errorCode)) return ERROR(dictionary_corrupted); }
-        dict = (const char*)dict + offcodeHeaderSize;
-        dictSize -= offcodeHeaderSize;
-    }
-
-    {   short matchlengthNCount[MaxML+1];
-        unsigned matchlengthMaxValue = MaxML, matchlengthLog;
-        matchlengthHeaderSize = FSEv06_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dict, dictSize);
-        if (FSEv06_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
-        if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
-        { size_t const errorCode = FSEv06_buildDTable(dctx->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog);
-          if (FSEv06_isError(errorCode)) return ERROR(dictionary_corrupted); }
-        dict = (const char*)dict + matchlengthHeaderSize;
-        dictSize -= matchlengthHeaderSize;
-    }
-
-    {   short litlengthNCount[MaxLL+1];
-        unsigned litlengthMaxValue = MaxLL, litlengthLog;
-        litlengthHeaderSize = FSEv06_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dict, dictSize);
-        if (FSEv06_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
-        if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
-        { size_t const errorCode = FSEv06_buildDTable(dctx->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog);
-          if (FSEv06_isError(errorCode)) return ERROR(dictionary_corrupted); }
-    }
-
-    dctx->flagRepeatTable = 1;
-    return hSize + offcodeHeaderSize + matchlengthHeaderSize + litlengthHeaderSize;
-}
-
-static size_t ZSTDv06_decompress_insertDictionary(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize)
-{
-    size_t eSize;
-    U32 const magic = MEM_readLE32(dict);
-    if (magic != ZSTDv06_DICT_MAGIC) {
-        /* pure content mode */
-        ZSTDv06_refDictContent(dctx, dict, dictSize);
-        return 0;
-    }
-    /* load entropy tables */
-    dict = (const char*)dict + 4;
-    dictSize -= 4;
-    eSize = ZSTDv06_loadEntropy(dctx, dict, dictSize);
-    if (ZSTDv06_isError(eSize)) return ERROR(dictionary_corrupted);
-
-    /* reference dictionary content */
-    dict = (const char*)dict + eSize;
-    dictSize -= eSize;
-    ZSTDv06_refDictContent(dctx, dict, dictSize);
-
-    return 0;
-}
-
-
-size_t ZSTDv06_decompressBegin_usingDict(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize)
-{
-    { size_t const errorCode = ZSTDv06_decompressBegin(dctx);
-      if (ZSTDv06_isError(errorCode)) return errorCode; }
-
-    if (dict && dictSize) {
-        size_t const errorCode = ZSTDv06_decompress_insertDictionary(dctx, dict, dictSize);
-        if (ZSTDv06_isError(errorCode)) return ERROR(dictionary_corrupted);
-    }
-
-    return 0;
-}
-
-/*
-    Buffered version of Zstd compression library
-    Copyright (C) 2015-2016, Yann Collet.
-
-    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are
-    met:
-    * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-    copyright notice, this list of conditions and the following disclaimer
-    in the documentation and/or other materials provided with the
-    distribution.
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - zstd homepage : http://www.zstd.net/
-*/
-
-
-/*-***************************************************************************
-*  Streaming decompression howto
-*
-*  A ZBUFFv06_DCtx object is required to track streaming operations.
-*  Use ZBUFFv06_createDCtx() and ZBUFFv06_freeDCtx() to create/release resources.
-*  Use ZBUFFv06_decompressInit() to start a new decompression operation,
-*   or ZBUFFv06_decompressInitDictionary() if decompression requires a dictionary.
-*  Note that ZBUFFv06_DCtx objects can be re-init multiple times.
-*
-*  Use ZBUFFv06_decompressContinue() repetitively to consume your input.
-*  *srcSizePtr and *dstCapacityPtr can be any size.
-*  The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
-*  Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
-*  The content of @dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change @dst.
-*  @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency),
-*            or 0 when a frame is completely decoded,
-*            or an error code, which can be tested using ZBUFFv06_isError().
-*
-*  Hint : recommended buffer sizes (not compulsory) : ZBUFFv06_recommendedDInSize() and ZBUFFv06_recommendedDOutSize()
-*  output : ZBUFFv06_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
-*  input  : ZBUFFv06_recommendedDInSize == 128KB + 3;
-*           just follow indications from ZBUFFv06_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
-* *******************************************************************************/
-
-typedef enum { ZBUFFds_init, ZBUFFds_loadHeader,
-               ZBUFFds_read, ZBUFFds_load, ZBUFFds_flush } ZBUFFv06_dStage;
-
-/* *** Resource management *** */
-struct ZBUFFv06_DCtx_s {
-    ZSTDv06_DCtx* zd;
-    ZSTDv06_frameParams fParams;
-    ZBUFFv06_dStage stage;
-    char*  inBuff;
-    size_t inBuffSize;
-    size_t inPos;
-    char*  outBuff;
-    size_t outBuffSize;
-    size_t outStart;
-    size_t outEnd;
-    size_t blockSize;
-    BYTE headerBuffer[ZSTDv06_FRAMEHEADERSIZE_MAX];
-    size_t lhSize;
-};   /* typedef'd to ZBUFFv06_DCtx within "zstd_buffered.h" */
-
-
-ZBUFFv06_DCtx* ZBUFFv06_createDCtx(void)
-{
-    ZBUFFv06_DCtx* zbd = (ZBUFFv06_DCtx*)malloc(sizeof(ZBUFFv06_DCtx));
-    if (zbd==NULL) return NULL;
-    memset(zbd, 0, sizeof(*zbd));
-    zbd->zd = ZSTDv06_createDCtx();
-    zbd->stage = ZBUFFds_init;
-    return zbd;
-}
-
-size_t ZBUFFv06_freeDCtx(ZBUFFv06_DCtx* zbd)
-{
-    if (zbd==NULL) return 0;   /* support free on null */
-    ZSTDv06_freeDCtx(zbd->zd);
-    free(zbd->inBuff);
-    free(zbd->outBuff);
-    free(zbd);
-    return 0;
-}
-
-
-/* *** Initialization *** */
-
-size_t ZBUFFv06_decompressInitDictionary(ZBUFFv06_DCtx* zbd, const void* dict, size_t dictSize)
-{
-    zbd->stage = ZBUFFds_loadHeader;
-    zbd->lhSize = zbd->inPos = zbd->outStart = zbd->outEnd = 0;
-    return ZSTDv06_decompressBegin_usingDict(zbd->zd, dict, dictSize);
-}
-
-size_t ZBUFFv06_decompressInit(ZBUFFv06_DCtx* zbd)
-{
-    return ZBUFFv06_decompressInitDictionary(zbd, NULL, 0);
-}
-
-
-
-MEM_STATIC size_t ZBUFFv06_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
-{
-    size_t length = MIN(dstCapacity, srcSize);
-    memcpy(dst, src, length);
-    return length;
-}
-
-
-/* *** Decompression *** */
-
-size_t ZBUFFv06_decompressContinue(ZBUFFv06_DCtx* zbd,
-                                void* dst, size_t* dstCapacityPtr,
-                          const void* src, size_t* srcSizePtr)
-{
-    const char* const istart = (const char*)src;
-    const char* const iend = istart + *srcSizePtr;
-    const char* ip = istart;
-    char* const ostart = (char*)dst;
-    char* const oend = ostart + *dstCapacityPtr;
-    char* op = ostart;
-    U32 notDone = 1;
-
-    while (notDone) {
-        switch(zbd->stage)
-        {
-        case ZBUFFds_init :
-            return ERROR(init_missing);
-
-        case ZBUFFds_loadHeader :
-            {   size_t const hSize = ZSTDv06_getFrameParams(&(zbd->fParams), zbd->headerBuffer, zbd->lhSize);
-                if (hSize != 0) {
-                    size_t const toLoad = hSize - zbd->lhSize;   /* if hSize!=0, hSize > zbd->lhSize */
-                    if (ZSTDv06_isError(hSize)) return hSize;
-                    if (toLoad > (size_t)(iend-ip)) {   /* not enough input to load full header */
-                        memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip);
-                        zbd->lhSize += iend-ip;
-                        *dstCapacityPtr = 0;
-                        return (hSize - zbd->lhSize) + ZSTDv06_blockHeaderSize;   /* remaining header bytes + next block header */
-                    }
-                    memcpy(zbd->headerBuffer + zbd->lhSize, ip, toLoad); zbd->lhSize = hSize; ip += toLoad;
-                    break;
-            }   }
-
-            /* Consume header */
-            {   size_t const h1Size = ZSTDv06_nextSrcSizeToDecompress(zbd->zd);  /* == ZSTDv06_frameHeaderSize_min */
-                size_t const h1Result = ZSTDv06_decompressContinue(zbd->zd, NULL, 0, zbd->headerBuffer, h1Size);
-                if (ZSTDv06_isError(h1Result)) return h1Result;
-                if (h1Size < zbd->lhSize) {   /* long header */
-                    size_t const h2Size = ZSTDv06_nextSrcSizeToDecompress(zbd->zd);
-                    size_t const h2Result = ZSTDv06_decompressContinue(zbd->zd, NULL, 0, zbd->headerBuffer+h1Size, h2Size);
-                    if (ZSTDv06_isError(h2Result)) return h2Result;
-            }   }
-
-            /* Frame header instruct buffer sizes */
-            {   size_t const blockSize = MIN(1 << zbd->fParams.windowLog, ZSTDv06_BLOCKSIZE_MAX);
-                zbd->blockSize = blockSize;
-                if (zbd->inBuffSize < blockSize) {
-                    free(zbd->inBuff);
-                    zbd->inBuffSize = blockSize;
-                    zbd->inBuff = (char*)malloc(blockSize);
-                    if (zbd->inBuff == NULL) return ERROR(memory_allocation);
-                }
-                {   size_t const neededOutSize = ((size_t)1 << zbd->fParams.windowLog) + blockSize + WILDCOPY_OVERLENGTH * 2;
-                    if (zbd->outBuffSize < neededOutSize) {
-                        free(zbd->outBuff);
-                        zbd->outBuffSize = neededOutSize;
-                        zbd->outBuff = (char*)malloc(neededOutSize);
-                        if (zbd->outBuff == NULL) return ERROR(memory_allocation);
-            }   }   }
-            zbd->stage = ZBUFFds_read;
-	    /* fall-through */
-        case ZBUFFds_read:
-            {   size_t const neededInSize = ZSTDv06_nextSrcSizeToDecompress(zbd->zd);
-                if (neededInSize==0) {  /* end of frame */
-                    zbd->stage = ZBUFFds_init;
-                    notDone = 0;
-                    break;
-                }
-                if ((size_t)(iend-ip) >= neededInSize) {  /* decode directly from src */
-                    size_t const decodedSize = ZSTDv06_decompressContinue(zbd->zd,
-                        zbd->outBuff + zbd->outStart, zbd->outBuffSize - zbd->outStart,
-                        ip, neededInSize);
-                    if (ZSTDv06_isError(decodedSize)) return decodedSize;
-                    ip += neededInSize;
-                    if (!decodedSize) break;   /* this was just a header */
-                    zbd->outEnd = zbd->outStart +  decodedSize;
-                    zbd->stage = ZBUFFds_flush;
-                    break;
-                }
-                if (ip==iend) { notDone = 0; break; }   /* no more input */
-                zbd->stage = ZBUFFds_load;
-            }
-	    /* fall-through */
-        case ZBUFFds_load:
-            {   size_t const neededInSize = ZSTDv06_nextSrcSizeToDecompress(zbd->zd);
-                size_t const toLoad = neededInSize - zbd->inPos;   /* should always be <= remaining space within inBuff */
-                size_t loadedSize;
-                if (toLoad > zbd->inBuffSize - zbd->inPos) return ERROR(corruption_detected);   /* should never happen */
-                loadedSize = ZBUFFv06_limitCopy(zbd->inBuff + zbd->inPos, toLoad, ip, iend-ip);
-                ip += loadedSize;
-                zbd->inPos += loadedSize;
-                if (loadedSize < toLoad) { notDone = 0; break; }   /* not enough input, wait for more */
-
-                /* decode loaded input */
-                {   size_t const decodedSize = ZSTDv06_decompressContinue(zbd->zd,
-                        zbd->outBuff + zbd->outStart, zbd->outBuffSize - zbd->outStart,
-                        zbd->inBuff, neededInSize);
-                    if (ZSTDv06_isError(decodedSize)) return decodedSize;
-                    zbd->inPos = 0;   /* input is consumed */
-                    if (!decodedSize) { zbd->stage = ZBUFFds_read; break; }   /* this was just a header */
-                    zbd->outEnd = zbd->outStart +  decodedSize;
-                    zbd->stage = ZBUFFds_flush;
-                    // break; /* ZBUFFds_flush follows */
-                }
-	    }
-	    /* fall-through */
-        case ZBUFFds_flush:
-            {   size_t const toFlushSize = zbd->outEnd - zbd->outStart;
-                size_t const flushedSize = ZBUFFv06_limitCopy(op, oend-op, zbd->outBuff + zbd->outStart, toFlushSize);
-                op += flushedSize;
-                zbd->outStart += flushedSize;
-                if (flushedSize == toFlushSize) {
-                    zbd->stage = ZBUFFds_read;
-                    if (zbd->outStart + zbd->blockSize > zbd->outBuffSize)
-                        zbd->outStart = zbd->outEnd = 0;
-                    break;
-                }
-                /* cannot flush everything */
-                notDone = 0;
-                break;
-            }
-        default: return ERROR(GENERIC);   /* impossible */
-    }   }
-
-    /* result */
-    *srcSizePtr = ip-istart;
-    *dstCapacityPtr = op-ostart;
-    {   size_t nextSrcSizeHint = ZSTDv06_nextSrcSizeToDecompress(zbd->zd);
-        if (nextSrcSizeHint > ZSTDv06_blockHeaderSize) nextSrcSizeHint+= ZSTDv06_blockHeaderSize;   /* get following block header too */
-        nextSrcSizeHint -= zbd->inPos;   /* already loaded*/
-        return nextSrcSizeHint;
-    }
-}
-
-
-
-/* *************************************
-*  Tool functions
-***************************************/
-size_t ZBUFFv06_recommendedDInSize(void)  { return ZSTDv06_BLOCKSIZE_MAX + ZSTDv06_blockHeaderSize /* block header size*/ ; }
-size_t ZBUFFv06_recommendedDOutSize(void) { return ZSTDv06_BLOCKSIZE_MAX; }
diff --git a/vendor/github.com/DataDog/zstd/zstd_v06.h b/vendor/github.com/DataDog/zstd/zstd_v06.h
deleted file mode 100644
index 0781857..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_v06.h
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef ZSTDv06_H
-#define ZSTDv06_H
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/*======  Dependency  ======*/
-#include <stddef.h>   /* size_t */
-
-
-/*======  Export for Windows  ======*/
-/*!
-*  ZSTDv06_DLL_EXPORT :
-*  Enable exporting of functions when building a Windows DLL
-*/
-#if defined(_WIN32) && defined(ZSTDv06_DLL_EXPORT) && (ZSTDv06_DLL_EXPORT==1)
-#  define ZSTDLIBv06_API __declspec(dllexport)
-#else
-#  define ZSTDLIBv06_API
-#endif
-
-
-/* *************************************
-*  Simple functions
-***************************************/
-/*! ZSTDv06_decompress() :
-    `compressedSize` : is the _exact_ size of the compressed blob, otherwise decompression will fail.
-    `dstCapacity` must be large enough, equal or larger than originalSize.
-    @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
-              or an errorCode if it fails (which can be tested using ZSTDv06_isError()) */
-ZSTDLIBv06_API size_t ZSTDv06_decompress( void* dst, size_t dstCapacity,
-                                    const void* src, size_t compressedSize);
-
-/**
-ZSTDv06_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.6.x format
-    srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
-    cSize (output parameter)  : the number of bytes that would be read to decompress this frame
-                                or an error code if it fails (which can be tested using ZSTDv01_isError())
-    dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
-                                or ZSTD_CONTENTSIZE_ERROR if an error occurs
-
-    note : assumes `cSize` and `dBound` are _not_ NULL.
-*/
-void ZSTDv06_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
-                                     size_t* cSize, unsigned long long* dBound);
-
-/* *************************************
-*  Helper functions
-***************************************/
-ZSTDLIBv06_API size_t      ZSTDv06_compressBound(size_t srcSize); /*!< maximum compressed size (worst case scenario) */
-
-/* Error Management */
-ZSTDLIBv06_API unsigned    ZSTDv06_isError(size_t code);          /*!< tells if a `size_t` function result is an error code */
-ZSTDLIBv06_API const char* ZSTDv06_getErrorName(size_t code);     /*!< provides readable string for an error code */
-
-
-/* *************************************
-*  Explicit memory management
-***************************************/
-/** Decompression context */
-typedef struct ZSTDv06_DCtx_s ZSTDv06_DCtx;
-ZSTDLIBv06_API ZSTDv06_DCtx* ZSTDv06_createDCtx(void);
-ZSTDLIBv06_API size_t     ZSTDv06_freeDCtx(ZSTDv06_DCtx* dctx);      /*!< @return : errorCode */
-
-/** ZSTDv06_decompressDCtx() :
-*   Same as ZSTDv06_decompress(), but requires an already allocated ZSTDv06_DCtx (see ZSTDv06_createDCtx()) */
-ZSTDLIBv06_API size_t ZSTDv06_decompressDCtx(ZSTDv06_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
-
-
-/*-***********************
-*  Dictionary API
-*************************/
-/*! ZSTDv06_decompress_usingDict() :
-*   Decompression using a pre-defined Dictionary content (see dictBuilder).
-*   Dictionary must be identical to the one used during compression, otherwise regenerated data will be corrupted.
-*   Note : dict can be NULL, in which case, it's equivalent to ZSTDv06_decompressDCtx() */
-ZSTDLIBv06_API size_t ZSTDv06_decompress_usingDict(ZSTDv06_DCtx* dctx,
-                                                   void* dst, size_t dstCapacity,
-                                             const void* src, size_t srcSize,
-                                             const void* dict,size_t dictSize);
-
-
-/*-************************
-*  Advanced Streaming API
-***************************/
-struct ZSTDv06_frameParams_s { unsigned long long frameContentSize; unsigned windowLog; };
-typedef struct ZSTDv06_frameParams_s ZSTDv06_frameParams;
-
-ZSTDLIBv06_API size_t ZSTDv06_getFrameParams(ZSTDv06_frameParams* fparamsPtr, const void* src, size_t srcSize);   /**< doesn't consume input */
-ZSTDLIBv06_API size_t ZSTDv06_decompressBegin_usingDict(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize);
-ZSTDLIBv06_API void   ZSTDv06_copyDCtx(ZSTDv06_DCtx* dctx, const ZSTDv06_DCtx* preparedDCtx);
-
-ZSTDLIBv06_API size_t ZSTDv06_nextSrcSizeToDecompress(ZSTDv06_DCtx* dctx);
-ZSTDLIBv06_API size_t ZSTDv06_decompressContinue(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
-
-
-
-/* *************************************
-*  ZBUFF API
-***************************************/
-
-typedef struct ZBUFFv06_DCtx_s ZBUFFv06_DCtx;
-ZSTDLIBv06_API ZBUFFv06_DCtx* ZBUFFv06_createDCtx(void);
-ZSTDLIBv06_API size_t         ZBUFFv06_freeDCtx(ZBUFFv06_DCtx* dctx);
-
-ZSTDLIBv06_API size_t ZBUFFv06_decompressInit(ZBUFFv06_DCtx* dctx);
-ZSTDLIBv06_API size_t ZBUFFv06_decompressInitDictionary(ZBUFFv06_DCtx* dctx, const void* dict, size_t dictSize);
-
-ZSTDLIBv06_API size_t ZBUFFv06_decompressContinue(ZBUFFv06_DCtx* dctx,
-                                                  void* dst, size_t* dstCapacityPtr,
-                                            const void* src, size_t* srcSizePtr);
-
-/*-***************************************************************************
-*  Streaming decompression howto
-*
-*  A ZBUFFv06_DCtx object is required to track streaming operations.
-*  Use ZBUFFv06_createDCtx() and ZBUFFv06_freeDCtx() to create/release resources.
-*  Use ZBUFFv06_decompressInit() to start a new decompression operation,
-*   or ZBUFFv06_decompressInitDictionary() if decompression requires a dictionary.
-*  Note that ZBUFFv06_DCtx objects can be re-init multiple times.
-*
-*  Use ZBUFFv06_decompressContinue() repetitively to consume your input.
-*  *srcSizePtr and *dstCapacityPtr can be any size.
-*  The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
-*  Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
-*  The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`.
-*  @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency),
-*            or 0 when a frame is completely decoded,
-*            or an error code, which can be tested using ZBUFFv06_isError().
-*
-*  Hint : recommended buffer sizes (not compulsory) : ZBUFFv06_recommendedDInSize() and ZBUFFv06_recommendedDOutSize()
-*  output : ZBUFFv06_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
-*  input  : ZBUFFv06_recommendedDInSize == 128KB + 3;
-*           just follow indications from ZBUFFv06_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
-* *******************************************************************************/
-
-
-/* *************************************
-*  Tool functions
-***************************************/
-ZSTDLIBv06_API unsigned ZBUFFv06_isError(size_t errorCode);
-ZSTDLIBv06_API const char* ZBUFFv06_getErrorName(size_t errorCode);
-
-/** Functions below provide recommended buffer sizes for Compression or Decompression operations.
-*   These sizes are just hints, they tend to offer better latency */
-ZSTDLIBv06_API size_t ZBUFFv06_recommendedDInSize(void);
-ZSTDLIBv06_API size_t ZBUFFv06_recommendedDOutSize(void);
-
-
-/*-*************************************
-*  Constants
-***************************************/
-#define ZSTDv06_MAGICNUMBER 0xFD2FB526   /* v0.6 */
-
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif  /* ZSTDv06_BUFFERED_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_v07.c b/vendor/github.com/DataDog/zstd/zstd_v07.c
deleted file mode 100644
index a83ddc9..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_v07.c
+++ /dev/null
@@ -1,4533 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-
-/*- Dependencies -*/
-#include <stddef.h>     /* size_t, ptrdiff_t */
-#include <string.h>     /* memcpy */
-#include <stdlib.h>     /* malloc, free, qsort */
-
-#ifndef XXH_STATIC_LINKING_ONLY
-#  define XXH_STATIC_LINKING_ONLY    /* XXH64_state_t */
-#endif
-#include "xxhash.h"                  /* XXH64_* */
-#include "zstd_v07.h"
-
-#define FSEv07_STATIC_LINKING_ONLY   /* FSEv07_MIN_TABLELOG */
-#define HUFv07_STATIC_LINKING_ONLY   /* HUFv07_TABLELOG_ABSOLUTEMAX */
-#define ZSTDv07_STATIC_LINKING_ONLY
-
-#include "error_private.h"
-
-
-#ifdef ZSTDv07_STATIC_LINKING_ONLY
-
-/* ====================================================================================
- * The definitions in this section are considered experimental.
- * They should never be used with a dynamic library, as they may change in the future.
- * They are provided for advanced usages.
- * Use them only in association with static linking.
- * ==================================================================================== */
-
-/*--- Constants ---*/
-#define ZSTDv07_MAGIC_SKIPPABLE_START  0x184D2A50U
-
-#define ZSTDv07_WINDOWLOG_MAX_32  25
-#define ZSTDv07_WINDOWLOG_MAX_64  27
-#define ZSTDv07_WINDOWLOG_MAX    ((U32)(MEM_32bits() ? ZSTDv07_WINDOWLOG_MAX_32 : ZSTDv07_WINDOWLOG_MAX_64))
-#define ZSTDv07_WINDOWLOG_MIN     18
-#define ZSTDv07_CHAINLOG_MAX     (ZSTDv07_WINDOWLOG_MAX+1)
-#define ZSTDv07_CHAINLOG_MIN       4
-#define ZSTDv07_HASHLOG_MAX       ZSTDv07_WINDOWLOG_MAX
-#define ZSTDv07_HASHLOG_MIN       12
-#define ZSTDv07_HASHLOG3_MAX      17
-#define ZSTDv07_SEARCHLOG_MAX    (ZSTDv07_WINDOWLOG_MAX-1)
-#define ZSTDv07_SEARCHLOG_MIN      1
-#define ZSTDv07_SEARCHLENGTH_MAX   7
-#define ZSTDv07_SEARCHLENGTH_MIN   3
-#define ZSTDv07_TARGETLENGTH_MIN   4
-#define ZSTDv07_TARGETLENGTH_MAX 999
-
-#define ZSTDv07_FRAMEHEADERSIZE_MAX 18    /* for static allocation */
-static const size_t ZSTDv07_frameHeaderSize_min = 5;
-static const size_t ZSTDv07_frameHeaderSize_max = ZSTDv07_FRAMEHEADERSIZE_MAX;
-static const size_t ZSTDv07_skippableHeaderSize = 8;  /* magic number + skippable frame length */
-
-
-/* custom memory allocation functions */
-typedef void* (*ZSTDv07_allocFunction) (void* opaque, size_t size);
-typedef void  (*ZSTDv07_freeFunction) (void* opaque, void* address);
-typedef struct { ZSTDv07_allocFunction customAlloc; ZSTDv07_freeFunction customFree; void* opaque; } ZSTDv07_customMem;
-
-
-/*--- Advanced Decompression functions ---*/
-
-/*! ZSTDv07_estimateDCtxSize() :
- *  Gives the potential amount of memory allocated to create a ZSTDv07_DCtx */
-ZSTDLIBv07_API size_t ZSTDv07_estimateDCtxSize(void);
-
-/*! ZSTDv07_createDCtx_advanced() :
- *  Create a ZSTD decompression context using external alloc and free functions */
-ZSTDLIBv07_API ZSTDv07_DCtx* ZSTDv07_createDCtx_advanced(ZSTDv07_customMem customMem);
-
-/*! ZSTDv07_sizeofDCtx() :
- *  Gives the amount of memory used by a given ZSTDv07_DCtx */
-ZSTDLIBv07_API size_t ZSTDv07_sizeofDCtx(const ZSTDv07_DCtx* dctx);
-
-
-/* ******************************************************************
-*  Buffer-less streaming functions (synchronous mode)
-********************************************************************/
-
-ZSTDLIBv07_API size_t ZSTDv07_decompressBegin(ZSTDv07_DCtx* dctx);
-ZSTDLIBv07_API size_t ZSTDv07_decompressBegin_usingDict(ZSTDv07_DCtx* dctx, const void* dict, size_t dictSize);
-ZSTDLIBv07_API void   ZSTDv07_copyDCtx(ZSTDv07_DCtx* dctx, const ZSTDv07_DCtx* preparedDCtx);
-
-ZSTDLIBv07_API size_t ZSTDv07_nextSrcSizeToDecompress(ZSTDv07_DCtx* dctx);
-ZSTDLIBv07_API size_t ZSTDv07_decompressContinue(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
-
-/*
-  Buffer-less streaming decompression (synchronous mode)
-
-  A ZSTDv07_DCtx object is required to track streaming operations.
-  Use ZSTDv07_createDCtx() / ZSTDv07_freeDCtx() to manage it.
-  A ZSTDv07_DCtx object can be re-used multiple times.
-
-  First optional operation is to retrieve frame parameters, using ZSTDv07_getFrameParams(), which doesn't consume the input.
-  It can provide the minimum size of rolling buffer required to properly decompress data (`windowSize`),
-  and optionally the final size of uncompressed content.
-  (Note : content size is an optional info that may not be present. 0 means : content size unknown)
-  Frame parameters are extracted from the beginning of compressed frame.
-  The amount of data to read is variable, from ZSTDv07_frameHeaderSize_min to ZSTDv07_frameHeaderSize_max (so if `srcSize` >= ZSTDv07_frameHeaderSize_max, it will always work)
-  If `srcSize` is too small for operation to succeed, function will return the minimum size it requires to produce a result.
-  Result : 0 when successful, it means the ZSTDv07_frameParams structure has been filled.
-          >0 : means there is not enough data into `src`. Provides the expected size to successfully decode header.
-           errorCode, which can be tested using ZSTDv07_isError()
-
-  Start decompression, with ZSTDv07_decompressBegin() or ZSTDv07_decompressBegin_usingDict().
-  Alternatively, you can copy a prepared context, using ZSTDv07_copyDCtx().
-
-  Then use ZSTDv07_nextSrcSizeToDecompress() and ZSTDv07_decompressContinue() alternatively.
-  ZSTDv07_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTDv07_decompressContinue().
-  ZSTDv07_decompressContinue() requires this exact amount of bytes, or it will fail.
-
-  @result of ZSTDv07_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
-  It can be zero, which is not an error; it just means ZSTDv07_decompressContinue() has decoded some header.
-
-  ZSTDv07_decompressContinue() needs previous data blocks during decompression, up to `windowSize`.
-  They should preferably be located contiguously, prior to current block.
-  Alternatively, a round buffer of sufficient size is also possible. Sufficient size is determined by frame parameters.
-  ZSTDv07_decompressContinue() is very sensitive to contiguity,
-  if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,
-    or that previous contiguous segment is large enough to properly handle maximum back-reference.
-
-  A frame is fully decoded when ZSTDv07_nextSrcSizeToDecompress() returns zero.
-  Context can then be reset to start a new decompression.
-
-
-  == Special case : skippable frames ==
-
-  Skippable frames allow the integration of user-defined data into a flow of concatenated frames.
-  Skippable frames will be ignored (skipped) by a decompressor. The format of skippable frame is following:
-  a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F
-  b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
-  c) Frame Content - any content (User Data) of length equal to Frame Size
-  For skippable frames ZSTDv07_decompressContinue() always returns 0.
-  For skippable frames ZSTDv07_getFrameParams() returns fparamsPtr->windowLog==0 what means that a frame is skippable.
-  It also returns Frame Size as fparamsPtr->frameContentSize.
-*/
-
-
-/* **************************************
-*  Block functions
-****************************************/
-/*! Block functions produce and decode raw zstd blocks, without frame metadata.
-    Frame metadata cost is typically ~18 bytes, which can be non-negligible for very small blocks (< 100 bytes).
-    User will have to take in charge required information to regenerate data, such as compressed and content sizes.
-
-    A few rules to respect :
-    - Compressing and decompressing require a context structure
-      + Use ZSTDv07_createCCtx() and ZSTDv07_createDCtx()
-    - It is necessary to init context before starting
-      + compression : ZSTDv07_compressBegin()
-      + decompression : ZSTDv07_decompressBegin()
-      + variants _usingDict() are also allowed
-      + copyCCtx() and copyDCtx() work too
-    - Block size is limited, it must be <= ZSTDv07_getBlockSizeMax()
-      + If you need to compress more, cut data into multiple blocks
-      + Consider using the regular ZSTDv07_compress() instead, as frame metadata costs become negligible when source size is large.
-    - When a block is considered not compressible enough, ZSTDv07_compressBlock() result will be zero.
-      In which case, nothing is produced into `dst`.
-      + User must test for such outcome and deal directly with uncompressed data
-      + ZSTDv07_decompressBlock() doesn't accept uncompressed data as input !!!
-      + In case of multiple successive blocks, decoder must be informed of uncompressed block existence to follow proper history.
-        Use ZSTDv07_insertBlock() in such a case.
-*/
-
-#define ZSTDv07_BLOCKSIZE_ABSOLUTEMAX (128 * 1024)   /* define, for static allocation */
-ZSTDLIBv07_API size_t ZSTDv07_decompressBlock(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
-ZSTDLIBv07_API size_t ZSTDv07_insertBlock(ZSTDv07_DCtx* dctx, const void* blockStart, size_t blockSize);  /**< insert block into `dctx` history. Useful for uncompressed blocks */
-
-
-#endif   /* ZSTDv07_STATIC_LINKING_ONLY */
-
-
-/* ******************************************************************
-   mem.h
-   low-level memory access routines
-   Copyright (C) 2013-2015, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
-    - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-#ifndef MEM_H_MODULE
-#define MEM_H_MODULE
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/*-****************************************
-*  Compiler specifics
-******************************************/
-#if defined(_MSC_VER)   /* Visual Studio */
-#   include <stdlib.h>  /* _byteswap_ulong */
-#   include <intrin.h>  /* _byteswap_* */
-#endif
-#if defined(__GNUC__)
-#  define MEM_STATIC static __attribute__((unused))
-#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-#  define MEM_STATIC static inline
-#elif defined(_MSC_VER)
-#  define MEM_STATIC static __inline
-#else
-#  define MEM_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */
-#endif
-
-
-/*-**************************************************************
-*  Basic Types
-*****************************************************************/
-#if  !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
-# include <stdint.h>
-  typedef  uint8_t BYTE;
-  typedef uint16_t U16;
-  typedef  int16_t S16;
-  typedef uint32_t U32;
-  typedef  int32_t S32;
-  typedef uint64_t U64;
-  typedef  int64_t S64;
-#else
-  typedef unsigned char       BYTE;
-  typedef unsigned short      U16;
-  typedef   signed short      S16;
-  typedef unsigned int        U32;
-  typedef   signed int        S32;
-  typedef unsigned long long  U64;
-  typedef   signed long long  S64;
-#endif
-
-
-/*-**************************************************************
-*  Memory I/O
-*****************************************************************/
-/* MEM_FORCE_MEMORY_ACCESS :
- * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
- * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
- * The below switch allow to select different access method for improved performance.
- * Method 0 (default) : use `memcpy()`. Safe and portable.
- * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
- *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
- * Method 2 : direct access. This method is portable but violate C standard.
- *            It can generate buggy code on targets depending on alignment.
- *            In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
- * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
- * Prefer these methods in priority order (0 > 1 > 2)
- */
-#ifndef MEM_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
-#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
-#    define MEM_FORCE_MEMORY_ACCESS 2
-#  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
-  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
-#    define MEM_FORCE_MEMORY_ACCESS 1
-#  endif
-#endif
-
-MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; }
-MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }
-
-MEM_STATIC unsigned MEM_isLittleEndian(void)
-{
-    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */
-    return one.c[0];
-}
-
-#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
-
-/* violates C standard, by lying on structure alignment.
-Only use if no other choice to achieve best performance on target platform */
-MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
-MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
-MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
-
-MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
-
-#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
-
-/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
-/* currently only defined for gcc and icc */
-typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign;
-
-MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
-MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
-MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
-
-MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
-
-#else
-
-/* default method, safe and standard.
-   can sometimes prove slower */
-
-MEM_STATIC U16 MEM_read16(const void* memPtr)
-{
-    U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-MEM_STATIC U32 MEM_read32(const void* memPtr)
-{
-    U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-MEM_STATIC U64 MEM_read64(const void* memPtr)
-{
-    U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-MEM_STATIC void MEM_write16(void* memPtr, U16 value)
-{
-    memcpy(memPtr, &value, sizeof(value));
-}
-
-#endif /* MEM_FORCE_MEMORY_ACCESS */
-
-MEM_STATIC U32 MEM_swap32(U32 in)
-{
-#if defined(_MSC_VER)     /* Visual Studio */
-    return _byteswap_ulong(in);
-#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
-    return __builtin_bswap32(in);
-#else
-    return  ((in << 24) & 0xff000000 ) |
-            ((in <<  8) & 0x00ff0000 ) |
-            ((in >>  8) & 0x0000ff00 ) |
-            ((in >> 24) & 0x000000ff );
-#endif
-}
-
-MEM_STATIC U64 MEM_swap64(U64 in)
-{
-#if defined(_MSC_VER)     /* Visual Studio */
-    return _byteswap_uint64(in);
-#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
-    return __builtin_bswap64(in);
-#else
-    return  ((in << 56) & 0xff00000000000000ULL) |
-            ((in << 40) & 0x00ff000000000000ULL) |
-            ((in << 24) & 0x0000ff0000000000ULL) |
-            ((in << 8)  & 0x000000ff00000000ULL) |
-            ((in >> 8)  & 0x00000000ff000000ULL) |
-            ((in >> 24) & 0x0000000000ff0000ULL) |
-            ((in >> 40) & 0x000000000000ff00ULL) |
-            ((in >> 56) & 0x00000000000000ffULL);
-#endif
-}
-
-
-/*=== Little endian r/w ===*/
-
-MEM_STATIC U16 MEM_readLE16(const void* memPtr)
-{
-    if (MEM_isLittleEndian())
-        return MEM_read16(memPtr);
-    else {
-        const BYTE* p = (const BYTE*)memPtr;
-        return (U16)(p[0] + (p[1]<<8));
-    }
-}
-
-MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
-{
-    if (MEM_isLittleEndian()) {
-        MEM_write16(memPtr, val);
-    } else {
-        BYTE* p = (BYTE*)memPtr;
-        p[0] = (BYTE)val;
-        p[1] = (BYTE)(val>>8);
-    }
-}
-
-MEM_STATIC U32 MEM_readLE32(const void* memPtr)
-{
-    if (MEM_isLittleEndian())
-        return MEM_read32(memPtr);
-    else
-        return MEM_swap32(MEM_read32(memPtr));
-}
-
-
-MEM_STATIC U64 MEM_readLE64(const void* memPtr)
-{
-    if (MEM_isLittleEndian())
-        return MEM_read64(memPtr);
-    else
-        return MEM_swap64(MEM_read64(memPtr));
-}
-
-MEM_STATIC size_t MEM_readLEST(const void* memPtr)
-{
-    if (MEM_32bits())
-        return (size_t)MEM_readLE32(memPtr);
-    else
-        return (size_t)MEM_readLE64(memPtr);
-}
-
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* MEM_H_MODULE */
-/* ******************************************************************
-   bitstream
-   Part of FSE library
-   header file (to include)
-   Copyright (C) 2013-2016, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-****************************************************************** */
-#ifndef BITSTREAM_H_MODULE
-#define BITSTREAM_H_MODULE
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-/*
-*  This API consists of small unitary functions, which must be inlined for best performance.
-*  Since link-time-optimization is not available for all compilers,
-*  these functions are defined into a .h to be included.
-*/
-
-
-/*=========================================
-*  Target specific
-=========================================*/
-#if defined(__BMI__) && defined(__GNUC__)
-#  include <immintrin.h>   /* support for bextr (experimental) */
-#endif
-
-/*-********************************************
-*  bitStream decoding API (read backward)
-**********************************************/
-typedef struct
-{
-    size_t   bitContainer;
-    unsigned bitsConsumed;
-    const char* ptr;
-    const char* start;
-} BITv07_DStream_t;
-
-typedef enum { BITv07_DStream_unfinished = 0,
-               BITv07_DStream_endOfBuffer = 1,
-               BITv07_DStream_completed = 2,
-               BITv07_DStream_overflow = 3 } BITv07_DStream_status;  /* result of BITv07_reloadDStream() */
-               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
-
-MEM_STATIC size_t   BITv07_initDStream(BITv07_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
-MEM_STATIC size_t   BITv07_readBits(BITv07_DStream_t* bitD, unsigned nbBits);
-MEM_STATIC BITv07_DStream_status BITv07_reloadDStream(BITv07_DStream_t* bitD);
-MEM_STATIC unsigned BITv07_endOfDStream(const BITv07_DStream_t* bitD);
-
-
-
-/*-****************************************
-*  unsafe API
-******************************************/
-MEM_STATIC size_t BITv07_readBitsFast(BITv07_DStream_t* bitD, unsigned nbBits);
-/* faster, but works only if nbBits >= 1 */
-
-
-
-/*-**************************************************************
-*  Internal functions
-****************************************************************/
-MEM_STATIC unsigned BITv07_highbit32 (U32 val)
-{
-#   if defined(_MSC_VER)   /* Visual */
-    unsigned long r=0;
-    _BitScanReverse ( &r, val );
-    return (unsigned) r;
-#   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* Use GCC Intrinsic */
-    return 31 - __builtin_clz (val);
-#   else   /* Software version */
-    static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
-    U32 v = val;
-    v |= v >> 1;
-    v |= v >> 2;
-    v |= v >> 4;
-    v |= v >> 8;
-    v |= v >> 16;
-    return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
-#   endif
-}
-
-
-
-/*-********************************************************
-* bitStream decoding
-**********************************************************/
-/*! BITv07_initDStream() :
-*   Initialize a BITv07_DStream_t.
-*   `bitD` : a pointer to an already allocated BITv07_DStream_t structure.
-*   `srcSize` must be the *exact* size of the bitStream, in bytes.
-*   @return : size of stream (== srcSize) or an errorCode if a problem is detected
-*/
-MEM_STATIC size_t BITv07_initDStream(BITv07_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
-{
-    if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
-
-    if (srcSize >=  sizeof(bitD->bitContainer)) {  /* normal case */
-        bitD->start = (const char*)srcBuffer;
-        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
-        bitD->bitContainer = MEM_readLEST(bitD->ptr);
-        { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
-          bitD->bitsConsumed = lastByte ? 8 - BITv07_highbit32(lastByte) : 0;
-          if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
-    } else {
-        bitD->start = (const char*)srcBuffer;
-        bitD->ptr   = bitD->start;
-        bitD->bitContainer = *(const BYTE*)(bitD->start);
-        switch(srcSize)
-        {
-            case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);/* fall-through */
-            case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);/* fall-through */
-            case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);/* fall-through */
-            case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; /* fall-through */
-            case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; /* fall-through */
-            case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) <<  8; /* fall-through */
-            default: break;
-        }
-        { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
-          bitD->bitsConsumed = lastByte ? 8 - BITv07_highbit32(lastByte) : 0;
-          if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
-        bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
-    }
-
-    return srcSize;
-}
-
-
- MEM_STATIC size_t BITv07_lookBits(const BITv07_DStream_t* bitD, U32 nbBits)
-{
-    U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1;
-    return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
-}
-
-/*! BITv07_lookBitsFast() :
-*   unsafe version; only works only if nbBits >= 1 */
-MEM_STATIC size_t BITv07_lookBitsFast(const BITv07_DStream_t* bitD, U32 nbBits)
-{
-    U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1;
-    return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
-}
-
-MEM_STATIC void BITv07_skipBits(BITv07_DStream_t* bitD, U32 nbBits)
-{
-    bitD->bitsConsumed += nbBits;
-}
-
-MEM_STATIC size_t BITv07_readBits(BITv07_DStream_t* bitD, U32 nbBits)
-{
-    size_t const value = BITv07_lookBits(bitD, nbBits);
-    BITv07_skipBits(bitD, nbBits);
-    return value;
-}
-
-/*! BITv07_readBitsFast() :
-*   unsafe version; only works only if nbBits >= 1 */
-MEM_STATIC size_t BITv07_readBitsFast(BITv07_DStream_t* bitD, U32 nbBits)
-{
-    size_t const value = BITv07_lookBitsFast(bitD, nbBits);
-    BITv07_skipBits(bitD, nbBits);
-    return value;
-}
-
-MEM_STATIC BITv07_DStream_status BITv07_reloadDStream(BITv07_DStream_t* bitD)
-{
-    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should not happen => corruption detected */
-        return BITv07_DStream_overflow;
-
-    if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {
-        bitD->ptr -= bitD->bitsConsumed >> 3;
-        bitD->bitsConsumed &= 7;
-        bitD->bitContainer = MEM_readLEST(bitD->ptr);
-        return BITv07_DStream_unfinished;
-    }
-    if (bitD->ptr == bitD->start) {
-        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BITv07_DStream_endOfBuffer;
-        return BITv07_DStream_completed;
-    }
-    {   U32 nbBytes = bitD->bitsConsumed >> 3;
-        BITv07_DStream_status result = BITv07_DStream_unfinished;
-        if (bitD->ptr - nbBytes < bitD->start) {
-            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */
-            result = BITv07_DStream_endOfBuffer;
-        }
-        bitD->ptr -= nbBytes;
-        bitD->bitsConsumed -= nbBytes*8;
-        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD) */
-        return result;
-    }
-}
-
-/*! BITv07_endOfDStream() :
-*   @return Tells if DStream has exactly reached its end (all bits consumed).
-*/
-MEM_STATIC unsigned BITv07_endOfDStream(const BITv07_DStream_t* DStream)
-{
-    return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
-}
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* BITSTREAM_H_MODULE */
-/* ******************************************************************
-   FSE : Finite State Entropy codec
-   Public Prototypes declaration
-   Copyright (C) 2013-2016, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-****************************************************************** */
-#ifndef FSEv07_H
-#define FSEv07_H
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-
-/*-****************************************
-*  FSE simple functions
-******************************************/
-
-/*! FSEv07_decompress():
-    Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
-    into already allocated destination buffer 'dst', of size 'dstCapacity'.
-    @return : size of regenerated data (<= maxDstSize),
-              or an error code, which can be tested using FSEv07_isError() .
-
-    ** Important ** : FSEv07_decompress() does not decompress non-compressible nor RLE data !!!
-    Why ? : making this distinction requires a header.
-    Header management is intentionally delegated to the user layer, which can better manage special cases.
-*/
-size_t FSEv07_decompress(void* dst,  size_t dstCapacity,
-                const void* cSrc, size_t cSrcSize);
-
-
-/* Error Management */
-unsigned    FSEv07_isError(size_t code);        /* tells if a return value is an error code */
-const char* FSEv07_getErrorName(size_t code);   /* provides error code string (useful for debugging) */
-
-
-/*-*****************************************
-*  FSE detailed API
-******************************************/
-/*!
-FSEv07_decompress() does the following:
-1. read normalized counters with readNCount()
-2. build decoding table 'DTable' from normalized counters
-3. decode the data stream using decoding table 'DTable'
-
-The following API allows targeting specific sub-functions for advanced tasks.
-For example, it's possible to compress several blocks using the same 'CTable',
-or to save and provide normalized distribution using external method.
-*/
-
-
-/* *** DECOMPRESSION *** */
-
-/*! FSEv07_readNCount():
-    Read compactly saved 'normalizedCounter' from 'rBuffer'.
-    @return : size read from 'rBuffer',
-              or an errorCode, which can be tested using FSEv07_isError().
-              maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
-size_t FSEv07_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);
-
-/*! Constructor and Destructor of FSEv07_DTable.
-    Note that its size depends on 'tableLog' */
-typedef unsigned FSEv07_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */
-FSEv07_DTable* FSEv07_createDTable(unsigned tableLog);
-void        FSEv07_freeDTable(FSEv07_DTable* dt);
-
-/*! FSEv07_buildDTable():
-    Builds 'dt', which must be already allocated, using FSEv07_createDTable().
-    return : 0, or an errorCode, which can be tested using FSEv07_isError() */
-size_t FSEv07_buildDTable (FSEv07_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
-
-/*! FSEv07_decompress_usingDTable():
-    Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
-    into `dst` which must be already allocated.
-    @return : size of regenerated data (necessarily <= `dstCapacity`),
-              or an errorCode, which can be tested using FSEv07_isError() */
-size_t FSEv07_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSEv07_DTable* dt);
-
-/*!
-Tutorial :
-----------
-(Note : these functions only decompress FSE-compressed blocks.
- If block is uncompressed, use memcpy() instead
- If block is a single repeated byte, use memset() instead )
-
-The first step is to obtain the normalized frequencies of symbols.
-This can be performed by FSEv07_readNCount() if it was saved using FSEv07_writeNCount().
-'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
-In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
-or size the table to handle worst case situations (typically 256).
-FSEv07_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
-The result of FSEv07_readNCount() is the number of bytes read from 'rBuffer'.
-Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
-If there is an error, the function will return an error code, which can be tested using FSEv07_isError().
-
-The next step is to build the decompression tables 'FSEv07_DTable' from 'normalizedCounter'.
-This is performed by the function FSEv07_buildDTable().
-The space required by 'FSEv07_DTable' must be already allocated using FSEv07_createDTable().
-If there is an error, the function will return an error code, which can be tested using FSEv07_isError().
-
-`FSEv07_DTable` can then be used to decompress `cSrc`, with FSEv07_decompress_usingDTable().
-`cSrcSize` must be strictly correct, otherwise decompression will fail.
-FSEv07_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
-If there is an error, the function will return an error code, which can be tested using FSEv07_isError(). (ex: dst buffer too small)
-*/
-
-
-#ifdef FSEv07_STATIC_LINKING_ONLY
-
-
-/* *****************************************
-*  Static allocation
-*******************************************/
-/* FSE buffer bounds */
-#define FSEv07_NCOUNTBOUND 512
-#define FSEv07_BLOCKBOUND(size) (size + (size>>7))
-
-/* It is possible to statically allocate FSE CTable/DTable as a table of unsigned using below macros */
-#define FSEv07_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<maxTableLog))
-
-
-/* *****************************************
-*  FSE advanced API
-*******************************************/
-size_t FSEv07_countFast(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
-/**< same as FSEv07_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr  */
-
-unsigned FSEv07_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
-/**< same as FSEv07_optimalTableLog(), which used `minus==2` */
-
-size_t FSEv07_buildDTable_raw (FSEv07_DTable* dt, unsigned nbBits);
-/**< build a fake FSEv07_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */
-
-size_t FSEv07_buildDTable_rle (FSEv07_DTable* dt, unsigned char symbolValue);
-/**< build a fake FSEv07_DTable, designed to always generate the same symbolValue */
-
-
-
-/* *****************************************
-*  FSE symbol decompression API
-*******************************************/
-typedef struct
-{
-    size_t      state;
-    const void* table;   /* precise table may vary, depending on U16 */
-} FSEv07_DState_t;
-
-
-static void     FSEv07_initDState(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD, const FSEv07_DTable* dt);
-
-static unsigned char FSEv07_decodeSymbol(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD);
-
-
-
-/* *****************************************
-*  FSE unsafe API
-*******************************************/
-static unsigned char FSEv07_decodeSymbolFast(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD);
-/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
-
-
-/* ======    Decompression    ====== */
-
-typedef struct {
-    U16 tableLog;
-    U16 fastMode;
-} FSEv07_DTableHeader;   /* sizeof U32 */
-
-typedef struct
-{
-    unsigned short newState;
-    unsigned char  symbol;
-    unsigned char  nbBits;
-} FSEv07_decode_t;   /* size == U32 */
-
-MEM_STATIC void FSEv07_initDState(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD, const FSEv07_DTable* dt)
-{
-    const void* ptr = dt;
-    const FSEv07_DTableHeader* const DTableH = (const FSEv07_DTableHeader*)ptr;
-    DStatePtr->state = BITv07_readBits(bitD, DTableH->tableLog);
-    BITv07_reloadDStream(bitD);
-    DStatePtr->table = dt + 1;
-}
-
-MEM_STATIC BYTE FSEv07_peekSymbol(const FSEv07_DState_t* DStatePtr)
-{
-    FSEv07_decode_t const DInfo = ((const FSEv07_decode_t*)(DStatePtr->table))[DStatePtr->state];
-    return DInfo.symbol;
-}
-
-MEM_STATIC void FSEv07_updateState(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD)
-{
-    FSEv07_decode_t const DInfo = ((const FSEv07_decode_t*)(DStatePtr->table))[DStatePtr->state];
-    U32 const nbBits = DInfo.nbBits;
-    size_t const lowBits = BITv07_readBits(bitD, nbBits);
-    DStatePtr->state = DInfo.newState + lowBits;
-}
-
-MEM_STATIC BYTE FSEv07_decodeSymbol(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD)
-{
-    FSEv07_decode_t const DInfo = ((const FSEv07_decode_t*)(DStatePtr->table))[DStatePtr->state];
-    U32 const nbBits = DInfo.nbBits;
-    BYTE const symbol = DInfo.symbol;
-    size_t const lowBits = BITv07_readBits(bitD, nbBits);
-
-    DStatePtr->state = DInfo.newState + lowBits;
-    return symbol;
-}
-
-/*! FSEv07_decodeSymbolFast() :
-    unsafe, only works if no symbol has a probability > 50% */
-MEM_STATIC BYTE FSEv07_decodeSymbolFast(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD)
-{
-    FSEv07_decode_t const DInfo = ((const FSEv07_decode_t*)(DStatePtr->table))[DStatePtr->state];
-    U32 const nbBits = DInfo.nbBits;
-    BYTE const symbol = DInfo.symbol;
-    size_t const lowBits = BITv07_readBitsFast(bitD, nbBits);
-
-    DStatePtr->state = DInfo.newState + lowBits;
-    return symbol;
-}
-
-
-
-#ifndef FSEv07_COMMONDEFS_ONLY
-
-/* **************************************************************
-*  Tuning parameters
-****************************************************************/
-/*!MEMORY_USAGE :
-*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
-*  Increasing memory usage improves compression ratio
-*  Reduced memory usage can improve speed, due to cache effect
-*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
-#define FSEv07_MAX_MEMORY_USAGE 14
-#define FSEv07_DEFAULT_MEMORY_USAGE 13
-
-/*!FSEv07_MAX_SYMBOL_VALUE :
-*  Maximum symbol value authorized.
-*  Required for proper stack allocation */
-#define FSEv07_MAX_SYMBOL_VALUE 255
-
-
-/* **************************************************************
-*  template functions type & suffix
-****************************************************************/
-#define FSEv07_FUNCTION_TYPE BYTE
-#define FSEv07_FUNCTION_EXTENSION
-#define FSEv07_DECODE_TYPE FSEv07_decode_t
-
-
-#endif   /* !FSEv07_COMMONDEFS_ONLY */
-
-
-/* ***************************************************************
-*  Constants
-*****************************************************************/
-#define FSEv07_MAX_TABLELOG  (FSEv07_MAX_MEMORY_USAGE-2)
-#define FSEv07_MAX_TABLESIZE (1U<<FSEv07_MAX_TABLELOG)
-#define FSEv07_MAXTABLESIZE_MASK (FSEv07_MAX_TABLESIZE-1)
-#define FSEv07_DEFAULT_TABLELOG (FSEv07_DEFAULT_MEMORY_USAGE-2)
-#define FSEv07_MIN_TABLELOG 5
-
-#define FSEv07_TABLELOG_ABSOLUTE_MAX 15
-#if FSEv07_MAX_TABLELOG > FSEv07_TABLELOG_ABSOLUTE_MAX
-#  error "FSEv07_MAX_TABLELOG > FSEv07_TABLELOG_ABSOLUTE_MAX is not supported"
-#endif
-
-#define FSEv07_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3)
-
-
-#endif /* FSEv07_STATIC_LINKING_ONLY */
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif  /* FSEv07_H */
-/* ******************************************************************
-   Huffman coder, part of New Generation Entropy library
-   header file
-   Copyright (C) 2013-2016, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-   You can contact the author at :
-   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
-****************************************************************** */
-#ifndef HUFv07_H_298734234
-#define HUFv07_H_298734234
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
-
-/* *** simple functions *** */
-/**
-HUFv07_decompress() :
-    Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
-    into already allocated buffer 'dst', of minimum size 'dstSize'.
-    `dstSize` : **must** be the ***exact*** size of original (uncompressed) data.
-    Note : in contrast with FSE, HUFv07_decompress can regenerate
-           RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
-           because it knows size to regenerate.
-    @return : size of regenerated data (== dstSize),
-              or an error code, which can be tested using HUFv07_isError()
-*/
-size_t HUFv07_decompress(void* dst,  size_t dstSize,
-                const void* cSrc, size_t cSrcSize);
-
-
-/* ****************************************
-*  Tool functions
-******************************************/
-#define HUFv07_BLOCKSIZE_MAX (128 * 1024)
-
-/* Error Management */
-unsigned    HUFv07_isError(size_t code);        /**< tells if a return value is an error code */
-const char* HUFv07_getErrorName(size_t code);   /**< provides error code string (useful for debugging) */
-
-
-/* *** Advanced function *** */
-
-
-#ifdef HUFv07_STATIC_LINKING_ONLY
-
-
-/* *** Constants *** */
-#define HUFv07_TABLELOG_ABSOLUTEMAX  16   /* absolute limit of HUFv07_MAX_TABLELOG. Beyond that value, code does not work */
-#define HUFv07_TABLELOG_MAX  12           /* max configured tableLog (for static allocation); can be modified up to HUFv07_ABSOLUTEMAX_TABLELOG */
-#define HUFv07_TABLELOG_DEFAULT  11       /* tableLog by default, when not specified */
-#define HUFv07_SYMBOLVALUE_MAX 255
-#if (HUFv07_TABLELOG_MAX > HUFv07_TABLELOG_ABSOLUTEMAX)
-#  error "HUFv07_TABLELOG_MAX is too large !"
-#endif
-
-
-/* ****************************************
-*  Static allocation
-******************************************/
-/* HUF buffer bounds */
-#define HUFv07_BLOCKBOUND(size) (size + (size>>8) + 8)   /* only true if incompressible pre-filtered with fast heuristic */
-
-/* static allocation of HUF's DTable */
-typedef U32 HUFv07_DTable;
-#define HUFv07_DTABLE_SIZE(maxTableLog)   (1 + (1<<(maxTableLog)))
-#define HUFv07_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
-        HUFv07_DTable DTable[HUFv07_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1)*0x1000001) }
-#define HUFv07_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
-        HUFv07_DTable DTable[HUFv07_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog)*0x1000001) }
-
-
-/* ****************************************
-*  Advanced decompression functions
-******************************************/
-size_t HUFv07_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
-size_t HUFv07_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
-
-size_t HUFv07_decompress4X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< decodes RLE and uncompressed */
-size_t HUFv07_decompress4X_hufOnly(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */
-size_t HUFv07_decompress4X2_DCtx(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
-size_t HUFv07_decompress4X4_DCtx(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
-
-size_t HUFv07_decompress1X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
-size_t HUFv07_decompress1X2_DCtx(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
-size_t HUFv07_decompress1X4_DCtx(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
-
-
-/* ****************************************
-*  HUF detailed API
-******************************************/
-/*!
-The following API allows targeting specific sub-functions for advanced tasks.
-For example, it's possible to compress several blocks using the same 'CTable',
-or to save and regenerate 'CTable' using external methods.
-*/
-/* FSEv07_count() : find it within "fse.h" */
-
-/*! HUFv07_readStats() :
-    Read compact Huffman tree, saved by HUFv07_writeCTable().
-    `huffWeight` is destination buffer.
-    @return : size read from `src` , or an error Code .
-    Note : Needed by HUFv07_readCTable() and HUFv07_readDTableXn() . */
-size_t HUFv07_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
-                     U32* nbSymbolsPtr, U32* tableLogPtr,
-                     const void* src, size_t srcSize);
-
-
-/*
-HUFv07_decompress() does the following:
-1. select the decompression algorithm (X2, X4) based on pre-computed heuristics
-2. build Huffman table from save, using HUFv07_readDTableXn()
-3. decode 1 or 4 segments in parallel using HUFv07_decompressSXn_usingDTable
-*/
-
-/** HUFv07_selectDecoder() :
-*   Tells which decoder is likely to decode faster,
-*   based on a set of pre-determined metrics.
-*   @return : 0==HUFv07_decompress4X2, 1==HUFv07_decompress4X4 .
-*   Assumption : 0 < cSrcSize < dstSize <= 128 KB */
-U32 HUFv07_selectDecoder (size_t dstSize, size_t cSrcSize);
-
-size_t HUFv07_readDTableX2 (HUFv07_DTable* DTable, const void* src, size_t srcSize);
-size_t HUFv07_readDTableX4 (HUFv07_DTable* DTable, const void* src, size_t srcSize);
-
-size_t HUFv07_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);
-size_t HUFv07_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);
-size_t HUFv07_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);
-
-
-/* single stream variants */
-size_t HUFv07_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */
-size_t HUFv07_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbol decoder */
-
-size_t HUFv07_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);
-size_t HUFv07_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);
-size_t HUFv07_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);
-
-
-#endif /* HUFv07_STATIC_LINKING_ONLY */
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif   /* HUFv07_H_298734234 */
-/*
-   Common functions of New Generation Entropy library
-   Copyright (C) 2016, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
-    - Public forum : https://groups.google.com/forum/#!forum/lz4c
-*************************************************************************** */
-
-
-
-/*-****************************************
-*  FSE Error Management
-******************************************/
-unsigned FSEv07_isError(size_t code) { return ERR_isError(code); }
-
-const char* FSEv07_getErrorName(size_t code) { return ERR_getErrorName(code); }
-
-
-/* **************************************************************
-*  HUF Error Management
-****************************************************************/
-unsigned HUFv07_isError(size_t code) { return ERR_isError(code); }
-
-const char* HUFv07_getErrorName(size_t code) { return ERR_getErrorName(code); }
-
-
-/*-**************************************************************
-*  FSE NCount encoding-decoding
-****************************************************************/
-static short FSEv07_abs(short a) { return (short)(a<0 ? -a : a); }
-
-size_t FSEv07_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
-                 const void* headerBuffer, size_t hbSize)
-{
-    const BYTE* const istart = (const BYTE*) headerBuffer;
-    const BYTE* const iend = istart + hbSize;
-    const BYTE* ip = istart;
-    int nbBits;
-    int remaining;
-    int threshold;
-    U32 bitStream;
-    int bitCount;
-    unsigned charnum = 0;
-    int previous0 = 0;
-
-    if (hbSize < 4) return ERROR(srcSize_wrong);
-    bitStream = MEM_readLE32(ip);
-    nbBits = (bitStream & 0xF) + FSEv07_MIN_TABLELOG;   /* extract tableLog */
-    if (nbBits > FSEv07_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
-    bitStream >>= 4;
-    bitCount = 4;
-    *tableLogPtr = nbBits;
-    remaining = (1<<nbBits)+1;
-    threshold = 1<<nbBits;
-    nbBits++;
-
-    while ((remaining>1) && (charnum<=*maxSVPtr)) {
-        if (previous0) {
-            unsigned n0 = charnum;
-            while ((bitStream & 0xFFFF) == 0xFFFF) {
-                n0+=24;
-                if (ip < iend-5) {
-                    ip+=2;
-                    bitStream = MEM_readLE32(ip) >> bitCount;
-                } else {
-                    bitStream >>= 16;
-                    bitCount+=16;
-            }   }
-            while ((bitStream & 3) == 3) {
-                n0+=3;
-                bitStream>>=2;
-                bitCount+=2;
-            }
-            n0 += bitStream & 3;
-            bitCount += 2;
-            if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
-            while (charnum < n0) normalizedCounter[charnum++] = 0;
-            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
-                ip += bitCount>>3;
-                bitCount &= 7;
-                bitStream = MEM_readLE32(ip) >> bitCount;
-            }
-            else
-                bitStream >>= 2;
-        }
-        {   short const max = (short)((2*threshold-1)-remaining);
-            short count;
-
-            if ((bitStream & (threshold-1)) < (U32)max) {
-                count = (short)(bitStream & (threshold-1));
-                bitCount   += nbBits-1;
-            } else {
-                count = (short)(bitStream & (2*threshold-1));
-                if (count >= threshold) count -= max;
-                bitCount   += nbBits;
-            }
-
-            count--;   /* extra accuracy */
-            remaining -= FSEv07_abs(count);
-            normalizedCounter[charnum++] = count;
-            previous0 = !count;
-            while (remaining < threshold) {
-                nbBits--;
-                threshold >>= 1;
-            }
-
-            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
-                ip += bitCount>>3;
-                bitCount &= 7;
-            } else {
-                bitCount -= (int)(8 * (iend - 4 - ip));
-                ip = iend - 4;
-            }
-            bitStream = MEM_readLE32(ip) >> (bitCount & 31);
-    }   }   /* while ((remaining>1) && (charnum<=*maxSVPtr)) */
-    if (remaining != 1) return ERROR(GENERIC);
-    *maxSVPtr = charnum-1;
-
-    ip += (bitCount+7)>>3;
-    if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);
-    return ip-istart;
-}
-
-
-/*! HUFv07_readStats() :
-    Read compact Huffman tree, saved by HUFv07_writeCTable().
-    `huffWeight` is destination buffer.
-    @return : size read from `src` , or an error Code .
-    Note : Needed by HUFv07_readCTable() and HUFv07_readDTableXn() .
-*/
-size_t HUFv07_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
-                     U32* nbSymbolsPtr, U32* tableLogPtr,
-                     const void* src, size_t srcSize)
-{
-    U32 weightTotal;
-    const BYTE* ip = (const BYTE*) src;
-    size_t iSize;
-    size_t oSize;
-
-    if (!srcSize) return ERROR(srcSize_wrong);
-    iSize = ip[0];
-    //memset(huffWeight, 0, hwSize);   /* is not necessary, even though some analyzer complain ... */
-
-    if (iSize >= 128)  { /* special header */
-        if (iSize >= (242)) {  /* RLE */
-            static U32 l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
-            oSize = l[iSize-242];
-            memset(huffWeight, 1, hwSize);
-            iSize = 0;
-        }
-        else {   /* Incompressible */
-            oSize = iSize - 127;
-            iSize = ((oSize+1)/2);
-            if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
-            if (oSize >= hwSize) return ERROR(corruption_detected);
-            ip += 1;
-            {   U32 n;
-                for (n=0; n<oSize; n+=2) {
-                    huffWeight[n]   = ip[n/2] >> 4;
-                    huffWeight[n+1] = ip[n/2] & 15;
-    }   }   }   }
-    else  {   /* header compressed with FSE (normal case) */
-        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
-        oSize = FSEv07_decompress(huffWeight, hwSize-1, ip+1, iSize);   /* max (hwSize-1) values decoded, as last one is implied */
-        if (FSEv07_isError(oSize)) return oSize;
-    }
-
-    /* collect weight stats */
-    memset(rankStats, 0, (HUFv07_TABLELOG_ABSOLUTEMAX + 1) * sizeof(U32));
-    weightTotal = 0;
-    {   U32 n; for (n=0; n<oSize; n++) {
-            if (huffWeight[n] >= HUFv07_TABLELOG_ABSOLUTEMAX) return ERROR(corruption_detected);
-            rankStats[huffWeight[n]]++;
-            weightTotal += (1 << huffWeight[n]) >> 1;
-    }   }
-    if (weightTotal == 0) return ERROR(corruption_detected);
-
-    /* get last non-null symbol weight (implied, total must be 2^n) */
-    {   U32 const tableLog = BITv07_highbit32(weightTotal) + 1;
-        if (tableLog > HUFv07_TABLELOG_ABSOLUTEMAX) return ERROR(corruption_detected);
-        *tableLogPtr = tableLog;
-        /* determine last weight */
-        {   U32 const total = 1 << tableLog;
-            U32 const rest = total - weightTotal;
-            U32 const verif = 1 << BITv07_highbit32(rest);
-            U32 const lastWeight = BITv07_highbit32(rest) + 1;
-            if (verif != rest) return ERROR(corruption_detected);    /* last value must be a clean power of 2 */
-            huffWeight[oSize] = (BYTE)lastWeight;
-            rankStats[lastWeight]++;
-    }   }
-
-    /* check tree construction validity */
-    if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected);   /* by construction : at least 2 elts of rank 1, must be even */
-
-    /* results */
-    *nbSymbolsPtr = (U32)(oSize+1);
-    return iSize+1;
-}
-/* ******************************************************************
-   FSE : Finite State Entropy decoder
-   Copyright (C) 2013-2015, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
-    - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-
-
-/* **************************************************************
-*  Compiler specifics
-****************************************************************/
-#ifdef _MSC_VER    /* Visual Studio */
-#  define FORCE_INLINE static __forceinline
-#  include <intrin.h>                    /* For Visual 2005 */
-#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
-#  pragma warning(disable : 4214)        /* disable: C4214: non-int bitfields */
-#else
-#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
-#    ifdef __GNUC__
-#      define FORCE_INLINE static inline __attribute__((always_inline))
-#    else
-#      define FORCE_INLINE static inline
-#    endif
-#  else
-#    define FORCE_INLINE static
-#  endif /* __STDC_VERSION__ */
-#endif
-
-
-/* **************************************************************
-*  Error Management
-****************************************************************/
-#define FSEv07_isError ERR_isError
-#define FSEv07_STATIC_ASSERT(c) { enum { FSEv07_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
-
-
-/* **************************************************************
-*  Complex types
-****************************************************************/
-typedef U32 DTable_max_t[FSEv07_DTABLE_SIZE_U32(FSEv07_MAX_TABLELOG)];
-
-
-/* **************************************************************
-*  Templates
-****************************************************************/
-/*
-  designed to be included
-  for type-specific functions (template emulation in C)
-  Objective is to write these functions only once, for improved maintenance
-*/
-
-/* safety checks */
-#ifndef FSEv07_FUNCTION_EXTENSION
-#  error "FSEv07_FUNCTION_EXTENSION must be defined"
-#endif
-#ifndef FSEv07_FUNCTION_TYPE
-#  error "FSEv07_FUNCTION_TYPE must be defined"
-#endif
-
-/* Function names */
-#define FSEv07_CAT(X,Y) X##Y
-#define FSEv07_FUNCTION_NAME(X,Y) FSEv07_CAT(X,Y)
-#define FSEv07_TYPE_NAME(X,Y) FSEv07_CAT(X,Y)
-
-
-/* Function templates */
-FSEv07_DTable* FSEv07_createDTable (unsigned tableLog)
-{
-    if (tableLog > FSEv07_TABLELOG_ABSOLUTE_MAX) tableLog = FSEv07_TABLELOG_ABSOLUTE_MAX;
-    return (FSEv07_DTable*)malloc( FSEv07_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
-}
-
-void FSEv07_freeDTable (FSEv07_DTable* dt)
-{
-    free(dt);
-}
-
-size_t FSEv07_buildDTable(FSEv07_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
-{
-    void* const tdPtr = dt+1;   /* because *dt is unsigned, 32-bits aligned on 32-bits */
-    FSEv07_DECODE_TYPE* const tableDecode = (FSEv07_DECODE_TYPE*) (tdPtr);
-    U16 symbolNext[FSEv07_MAX_SYMBOL_VALUE+1];
-
-    U32 const maxSV1 = maxSymbolValue + 1;
-    U32 const tableSize = 1 << tableLog;
-    U32 highThreshold = tableSize-1;
-
-    /* Sanity Checks */
-    if (maxSymbolValue > FSEv07_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
-    if (tableLog > FSEv07_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
-
-    /* Init, lay down lowprob symbols */
-    {   FSEv07_DTableHeader DTableH;
-        DTableH.tableLog = (U16)tableLog;
-        DTableH.fastMode = 1;
-        {   S16 const largeLimit= (S16)(1 << (tableLog-1));
-            U32 s;
-            for (s=0; s<maxSV1; s++) {
-                if (normalizedCounter[s]==-1) {
-                    tableDecode[highThreshold--].symbol = (FSEv07_FUNCTION_TYPE)s;
-                    symbolNext[s] = 1;
-                } else {
-                    if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
-                    symbolNext[s] = normalizedCounter[s];
-        }   }   }
-        memcpy(dt, &DTableH, sizeof(DTableH));
-    }
-
-    /* Spread symbols */
-    {   U32 const tableMask = tableSize-1;
-        U32 const step = FSEv07_TABLESTEP(tableSize);
-        U32 s, position = 0;
-        for (s=0; s<maxSV1; s++) {
-            int i;
-            for (i=0; i<normalizedCounter[s]; i++) {
-                tableDecode[position].symbol = (FSEv07_FUNCTION_TYPE)s;
-                position = (position + step) & tableMask;
-                while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
-        }   }
-
-        if (position!=0) return ERROR(GENERIC);   /* position must reach all cells once, otherwise normalizedCounter is incorrect */
-    }
-
-    /* Build Decoding table */
-    {   U32 u;
-        for (u=0; u<tableSize; u++) {
-            FSEv07_FUNCTION_TYPE const symbol = (FSEv07_FUNCTION_TYPE)(tableDecode[u].symbol);
-            U16 nextState = symbolNext[symbol]++;
-            tableDecode[u].nbBits = (BYTE) (tableLog - BITv07_highbit32 ((U32)nextState) );
-            tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
-    }   }
-
-    return 0;
-}
-
-
-
-#ifndef FSEv07_COMMONDEFS_ONLY
-
-/*-*******************************************************
-*  Decompression (Byte symbols)
-*********************************************************/
-size_t FSEv07_buildDTable_rle (FSEv07_DTable* dt, BYTE symbolValue)
-{
-    void* ptr = dt;
-    FSEv07_DTableHeader* const DTableH = (FSEv07_DTableHeader*)ptr;
-    void* dPtr = dt + 1;
-    FSEv07_decode_t* const cell = (FSEv07_decode_t*)dPtr;
-
-    DTableH->tableLog = 0;
-    DTableH->fastMode = 0;
-
-    cell->newState = 0;
-    cell->symbol = symbolValue;
-    cell->nbBits = 0;
-
-    return 0;
-}
-
-
-size_t FSEv07_buildDTable_raw (FSEv07_DTable* dt, unsigned nbBits)
-{
-    void* ptr = dt;
-    FSEv07_DTableHeader* const DTableH = (FSEv07_DTableHeader*)ptr;
-    void* dPtr = dt + 1;
-    FSEv07_decode_t* const dinfo = (FSEv07_decode_t*)dPtr;
-    const unsigned tableSize = 1 << nbBits;
-    const unsigned tableMask = tableSize - 1;
-    const unsigned maxSV1 = tableMask+1;
-    unsigned s;
-
-    /* Sanity checks */
-    if (nbBits < 1) return ERROR(GENERIC);         /* min size */
-
-    /* Build Decoding Table */
-    DTableH->tableLog = (U16)nbBits;
-    DTableH->fastMode = 1;
-    for (s=0; s<maxSV1; s++) {
-        dinfo[s].newState = 0;
-        dinfo[s].symbol = (BYTE)s;
-        dinfo[s].nbBits = (BYTE)nbBits;
-    }
-
-    return 0;
-}
-
-FORCE_INLINE size_t FSEv07_decompress_usingDTable_generic(
-          void* dst, size_t maxDstSize,
-    const void* cSrc, size_t cSrcSize,
-    const FSEv07_DTable* dt, const unsigned fast)
-{
-    BYTE* const ostart = (BYTE*) dst;
-    BYTE* op = ostart;
-    BYTE* const omax = op + maxDstSize;
-    BYTE* const olimit = omax-3;
-
-    BITv07_DStream_t bitD;
-    FSEv07_DState_t state1;
-    FSEv07_DState_t state2;
-
-    /* Init */
-    { size_t const errorCode = BITv07_initDStream(&bitD, cSrc, cSrcSize);   /* replaced last arg by maxCompressed Size */
-      if (FSEv07_isError(errorCode)) return errorCode; }
-
-    FSEv07_initDState(&state1, &bitD, dt);
-    FSEv07_initDState(&state2, &bitD, dt);
-
-#define FSEv07_GETSYMBOL(statePtr) fast ? FSEv07_decodeSymbolFast(statePtr, &bitD) : FSEv07_decodeSymbol(statePtr, &bitD)
-
-    /* 4 symbols per loop */
-    for ( ; (BITv07_reloadDStream(&bitD)==BITv07_DStream_unfinished) && (op<olimit) ; op+=4) {
-        op[0] = FSEv07_GETSYMBOL(&state1);
-
-        if (FSEv07_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
-            BITv07_reloadDStream(&bitD);
-
-        op[1] = FSEv07_GETSYMBOL(&state2);
-
-        if (FSEv07_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
-            { if (BITv07_reloadDStream(&bitD) > BITv07_DStream_unfinished) { op+=2; break; } }
-
-        op[2] = FSEv07_GETSYMBOL(&state1);
-
-        if (FSEv07_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
-            BITv07_reloadDStream(&bitD);
-
-        op[3] = FSEv07_GETSYMBOL(&state2);
-    }
-
-    /* tail */
-    /* note : BITv07_reloadDStream(&bitD) >= FSEv07_DStream_partiallyFilled; Ends at exactly BITv07_DStream_completed */
-    while (1) {
-        if (op>(omax-2)) return ERROR(dstSize_tooSmall);
-
-        *op++ = FSEv07_GETSYMBOL(&state1);
-
-        if (BITv07_reloadDStream(&bitD)==BITv07_DStream_overflow) {
-            *op++ = FSEv07_GETSYMBOL(&state2);
-            break;
-        }
-
-        if (op>(omax-2)) return ERROR(dstSize_tooSmall);
-
-        *op++ = FSEv07_GETSYMBOL(&state2);
-
-        if (BITv07_reloadDStream(&bitD)==BITv07_DStream_overflow) {
-            *op++ = FSEv07_GETSYMBOL(&state1);
-            break;
-    }   }
-
-    return op-ostart;
-}
-
-
-size_t FSEv07_decompress_usingDTable(void* dst, size_t originalSize,
-                            const void* cSrc, size_t cSrcSize,
-                            const FSEv07_DTable* dt)
-{
-    const void* ptr = dt;
-    const FSEv07_DTableHeader* DTableH = (const FSEv07_DTableHeader*)ptr;
-    const U32 fastMode = DTableH->fastMode;
-
-    /* select fast mode (static) */
-    if (fastMode) return FSEv07_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
-    return FSEv07_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
-}
-
-
-size_t FSEv07_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
-{
-    const BYTE* const istart = (const BYTE*)cSrc;
-    const BYTE* ip = istart;
-    short counting[FSEv07_MAX_SYMBOL_VALUE+1];
-    DTable_max_t dt;   /* Static analyzer seems unable to understand this table will be properly initialized later */
-    unsigned tableLog;
-    unsigned maxSymbolValue = FSEv07_MAX_SYMBOL_VALUE;
-
-    if (cSrcSize<2) return ERROR(srcSize_wrong);   /* too small input size */
-
-    /* normal FSE decoding mode */
-    {   size_t const NCountLength = FSEv07_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
-        if (FSEv07_isError(NCountLength)) return NCountLength;
-        if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong);   /* too small input size */
-        ip += NCountLength;
-        cSrcSize -= NCountLength;
-    }
-
-    { size_t const errorCode = FSEv07_buildDTable (dt, counting, maxSymbolValue, tableLog);
-      if (FSEv07_isError(errorCode)) return errorCode; }
-
-    return FSEv07_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);   /* always return, even if it is an error code */
-}
-
-
-
-#endif   /* FSEv07_COMMONDEFS_ONLY */
-
-/* ******************************************************************
-   Huffman decoder, part of New Generation Entropy library
-   Copyright (C) 2013-2016, Yann Collet.
-
-   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions are
-   met:
-
-       * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above
-   copyright notice, this list of conditions and the following disclaimer
-   in the documentation and/or other materials provided with the
-   distribution.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
-    - Public forum : https://groups.google.com/forum/#!forum/lz4c
-****************************************************************** */
-
-/* **************************************************************
-*  Compiler specifics
-****************************************************************/
-#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-/* inline is defined */
-#elif defined(_MSC_VER)
-#  define inline __inline
-#else
-#  define inline /* disable inline */
-#endif
-
-
-#ifdef _MSC_VER    /* Visual Studio */
-#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
-#endif
-
-
-
-/* **************************************************************
-*  Error Management
-****************************************************************/
-#define HUFv07_STATIC_ASSERT(c) { enum { HUFv07_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
-
-
-/*-***************************/
-/*  generic DTableDesc       */
-/*-***************************/
-
-typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc;
-
-static DTableDesc HUFv07_getDTableDesc(const HUFv07_DTable* table)
-{
-    DTableDesc dtd;
-    memcpy(&dtd, table, sizeof(dtd));
-    return dtd;
-}
-
-
-/*-***************************/
-/*  single-symbol decoding   */
-/*-***************************/
-
-typedef struct { BYTE byte; BYTE nbBits; } HUFv07_DEltX2;   /* single-symbol decoding */
-
-size_t HUFv07_readDTableX2 (HUFv07_DTable* DTable, const void* src, size_t srcSize)
-{
-    BYTE huffWeight[HUFv07_SYMBOLVALUE_MAX + 1];
-    U32 rankVal[HUFv07_TABLELOG_ABSOLUTEMAX + 1];   /* large enough for values from 0 to 16 */
-    U32 tableLog = 0;
-    U32 nbSymbols = 0;
-    size_t iSize;
-    void* const dtPtr = DTable + 1;
-    HUFv07_DEltX2* const dt = (HUFv07_DEltX2*)dtPtr;
-
-    HUFv07_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUFv07_DTable));
-    //memset(huffWeight, 0, sizeof(huffWeight));   /* is not necessary, even though some analyzer complain ... */
-
-    iSize = HUFv07_readStats(huffWeight, HUFv07_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
-    if (HUFv07_isError(iSize)) return iSize;
-
-    /* Table header */
-    {   DTableDesc dtd = HUFv07_getDTableDesc(DTable);
-        if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge);   /* DTable too small, huffman tree cannot fit in */
-        dtd.tableType = 0;
-        dtd.tableLog = (BYTE)tableLog;
-        memcpy(DTable, &dtd, sizeof(dtd));
-    }
-
-    /* Prepare ranks */
-    {   U32 n, nextRankStart = 0;
-        for (n=1; n<tableLog+1; n++) {
-            U32 current = nextRankStart;
-            nextRankStart += (rankVal[n] << (n-1));
-            rankVal[n] = current;
-    }   }
-
-    /* fill DTable */
-    {   U32 n;
-        for (n=0; n<nbSymbols; n++) {
-            U32 const w = huffWeight[n];
-            U32 const length = (1 << w) >> 1;
-            U32 i;
-            HUFv07_DEltX2 D;
-            D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
-            for (i = rankVal[w]; i < rankVal[w] + length; i++)
-                dt[i] = D;
-            rankVal[w] += length;
-    }   }
-
-    return iSize;
-}
-
-
-static BYTE HUFv07_decodeSymbolX2(BITv07_DStream_t* Dstream, const HUFv07_DEltX2* dt, const U32 dtLog)
-{
-    size_t const val = BITv07_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
-    BYTE const c = dt[val].byte;
-    BITv07_skipBits(Dstream, dt[val].nbBits);
-    return c;
-}
-
-#define HUFv07_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
-    *ptr++ = HUFv07_decodeSymbolX2(DStreamPtr, dt, dtLog)
-
-#define HUFv07_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
-    if (MEM_64bits() || (HUFv07_TABLELOG_MAX<=12)) \
-        HUFv07_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
-
-#define HUFv07_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
-    if (MEM_64bits()) \
-        HUFv07_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
-
-static inline size_t HUFv07_decodeStreamX2(BYTE* p, BITv07_DStream_t* const bitDPtr, BYTE* const pEnd, const HUFv07_DEltX2* const dt, const U32 dtLog)
-{
-    BYTE* const pStart = p;
-
-    /* up to 4 symbols at a time */
-    while ((BITv07_reloadDStream(bitDPtr) == BITv07_DStream_unfinished) && (p <= pEnd-4)) {
-        HUFv07_DECODE_SYMBOLX2_2(p, bitDPtr);
-        HUFv07_DECODE_SYMBOLX2_1(p, bitDPtr);
-        HUFv07_DECODE_SYMBOLX2_2(p, bitDPtr);
-        HUFv07_DECODE_SYMBOLX2_0(p, bitDPtr);
-    }
-
-    /* closer to the end */
-    while ((BITv07_reloadDStream(bitDPtr) == BITv07_DStream_unfinished) && (p < pEnd))
-        HUFv07_DECODE_SYMBOLX2_0(p, bitDPtr);
-
-    /* no more data to retrieve from bitstream, hence no need to reload */
-    while (p < pEnd)
-        HUFv07_DECODE_SYMBOLX2_0(p, bitDPtr);
-
-    return pEnd-pStart;
-}
-
-static size_t HUFv07_decompress1X2_usingDTable_internal(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const HUFv07_DTable* DTable)
-{
-    BYTE* op = (BYTE*)dst;
-    BYTE* const oend = op + dstSize;
-    const void* dtPtr = DTable + 1;
-    const HUFv07_DEltX2* const dt = (const HUFv07_DEltX2*)dtPtr;
-    BITv07_DStream_t bitD;
-    DTableDesc const dtd = HUFv07_getDTableDesc(DTable);
-    U32 const dtLog = dtd.tableLog;
-
-    { size_t const errorCode = BITv07_initDStream(&bitD, cSrc, cSrcSize);
-      if (HUFv07_isError(errorCode)) return errorCode; }
-
-    HUFv07_decodeStreamX2(op, &bitD, oend, dt, dtLog);
-
-    /* check */
-    if (!BITv07_endOfDStream(&bitD)) return ERROR(corruption_detected);
-
-    return dstSize;
-}
-
-size_t HUFv07_decompress1X2_usingDTable(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const HUFv07_DTable* DTable)
-{
-    DTableDesc dtd = HUFv07_getDTableDesc(DTable);
-    if (dtd.tableType != 0) return ERROR(GENERIC);
-    return HUFv07_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
-}
-
-size_t HUFv07_decompress1X2_DCtx (HUFv07_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    const BYTE* ip = (const BYTE*) cSrc;
-
-    size_t const hSize = HUFv07_readDTableX2 (DCtx, cSrc, cSrcSize);
-    if (HUFv07_isError(hSize)) return hSize;
-    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
-    ip += hSize; cSrcSize -= hSize;
-
-    return HUFv07_decompress1X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx);
-}
-
-size_t HUFv07_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    HUFv07_CREATE_STATIC_DTABLEX2(DTable, HUFv07_TABLELOG_MAX);
-    return HUFv07_decompress1X2_DCtx (DTable, dst, dstSize, cSrc, cSrcSize);
-}
-
-
-static size_t HUFv07_decompress4X2_usingDTable_internal(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const HUFv07_DTable* DTable)
-{
-    /* Check */
-    if (cSrcSize < 10) return ERROR(corruption_detected);  /* strict minimum : jump table + 1 byte per stream */
-
-    {   const BYTE* const istart = (const BYTE*) cSrc;
-        BYTE* const ostart = (BYTE*) dst;
-        BYTE* const oend = ostart + dstSize;
-        const void* const dtPtr = DTable + 1;
-        const HUFv07_DEltX2* const dt = (const HUFv07_DEltX2*)dtPtr;
-
-        /* Init */
-        BITv07_DStream_t bitD1;
-        BITv07_DStream_t bitD2;
-        BITv07_DStream_t bitD3;
-        BITv07_DStream_t bitD4;
-        size_t const length1 = MEM_readLE16(istart);
-        size_t const length2 = MEM_readLE16(istart+2);
-        size_t const length3 = MEM_readLE16(istart+4);
-        size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
-        const BYTE* const istart1 = istart + 6;  /* jumpTable */
-        const BYTE* const istart2 = istart1 + length1;
-        const BYTE* const istart3 = istart2 + length2;
-        const BYTE* const istart4 = istart3 + length3;
-        const size_t segmentSize = (dstSize+3) / 4;
-        BYTE* const opStart2 = ostart + segmentSize;
-        BYTE* const opStart3 = opStart2 + segmentSize;
-        BYTE* const opStart4 = opStart3 + segmentSize;
-        BYTE* op1 = ostart;
-        BYTE* op2 = opStart2;
-        BYTE* op3 = opStart3;
-        BYTE* op4 = opStart4;
-        U32 endSignal;
-        DTableDesc const dtd = HUFv07_getDTableDesc(DTable);
-        U32 const dtLog = dtd.tableLog;
-
-        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
-        { size_t const errorCode = BITv07_initDStream(&bitD1, istart1, length1);
-          if (HUFv07_isError(errorCode)) return errorCode; }
-        { size_t const errorCode = BITv07_initDStream(&bitD2, istart2, length2);
-          if (HUFv07_isError(errorCode)) return errorCode; }
-        { size_t const errorCode = BITv07_initDStream(&bitD3, istart3, length3);
-          if (HUFv07_isError(errorCode)) return errorCode; }
-        { size_t const errorCode = BITv07_initDStream(&bitD4, istart4, length4);
-          if (HUFv07_isError(errorCode)) return errorCode; }
-
-        /* 16-32 symbols per loop (4-8 symbols per stream) */
-        endSignal = BITv07_reloadDStream(&bitD1) | BITv07_reloadDStream(&bitD2) | BITv07_reloadDStream(&bitD3) | BITv07_reloadDStream(&bitD4);
-        for ( ; (endSignal==BITv07_DStream_unfinished) && (op4<(oend-7)) ; ) {
-            HUFv07_DECODE_SYMBOLX2_2(op1, &bitD1);
-            HUFv07_DECODE_SYMBOLX2_2(op2, &bitD2);
-            HUFv07_DECODE_SYMBOLX2_2(op3, &bitD3);
-            HUFv07_DECODE_SYMBOLX2_2(op4, &bitD4);
-            HUFv07_DECODE_SYMBOLX2_1(op1, &bitD1);
-            HUFv07_DECODE_SYMBOLX2_1(op2, &bitD2);
-            HUFv07_DECODE_SYMBOLX2_1(op3, &bitD3);
-            HUFv07_DECODE_SYMBOLX2_1(op4, &bitD4);
-            HUFv07_DECODE_SYMBOLX2_2(op1, &bitD1);
-            HUFv07_DECODE_SYMBOLX2_2(op2, &bitD2);
-            HUFv07_DECODE_SYMBOLX2_2(op3, &bitD3);
-            HUFv07_DECODE_SYMBOLX2_2(op4, &bitD4);
-            HUFv07_DECODE_SYMBOLX2_0(op1, &bitD1);
-            HUFv07_DECODE_SYMBOLX2_0(op2, &bitD2);
-            HUFv07_DECODE_SYMBOLX2_0(op3, &bitD3);
-            HUFv07_DECODE_SYMBOLX2_0(op4, &bitD4);
-            endSignal = BITv07_reloadDStream(&bitD1) | BITv07_reloadDStream(&bitD2) | BITv07_reloadDStream(&bitD3) | BITv07_reloadDStream(&bitD4);
-        }
-
-        /* check corruption */
-        if (op1 > opStart2) return ERROR(corruption_detected);
-        if (op2 > opStart3) return ERROR(corruption_detected);
-        if (op3 > opStart4) return ERROR(corruption_detected);
-        /* note : op4 supposed already verified within main loop */
-
-        /* finish bitStreams one by one */
-        HUFv07_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
-        HUFv07_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
-        HUFv07_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
-        HUFv07_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);
-
-        /* check */
-        endSignal = BITv07_endOfDStream(&bitD1) & BITv07_endOfDStream(&bitD2) & BITv07_endOfDStream(&bitD3) & BITv07_endOfDStream(&bitD4);
-        if (!endSignal) return ERROR(corruption_detected);
-
-        /* decoded size */
-        return dstSize;
-    }
-}
-
-
-size_t HUFv07_decompress4X2_usingDTable(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const HUFv07_DTable* DTable)
-{
-    DTableDesc dtd = HUFv07_getDTableDesc(DTable);
-    if (dtd.tableType != 0) return ERROR(GENERIC);
-    return HUFv07_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
-}
-
-
-size_t HUFv07_decompress4X2_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    const BYTE* ip = (const BYTE*) cSrc;
-
-    size_t const hSize = HUFv07_readDTableX2 (dctx, cSrc, cSrcSize);
-    if (HUFv07_isError(hSize)) return hSize;
-    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
-    ip += hSize; cSrcSize -= hSize;
-
-    return HUFv07_decompress4X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, dctx);
-}
-
-size_t HUFv07_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    HUFv07_CREATE_STATIC_DTABLEX2(DTable, HUFv07_TABLELOG_MAX);
-    return HUFv07_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
-}
-
-
-/* *************************/
-/* double-symbols decoding */
-/* *************************/
-typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUFv07_DEltX4;  /* double-symbols decoding */
-
-typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
-
-static void HUFv07_fillDTableX4Level2(HUFv07_DEltX4* DTable, U32 sizeLog, const U32 consumed,
-                           const U32* rankValOrigin, const int minWeight,
-                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
-                           U32 nbBitsBaseline, U16 baseSeq)
-{
-    HUFv07_DEltX4 DElt;
-    U32 rankVal[HUFv07_TABLELOG_ABSOLUTEMAX + 1];
-
-    /* get pre-calculated rankVal */
-    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
-
-    /* fill skipped values */
-    if (minWeight>1) {
-        U32 i, skipSize = rankVal[minWeight];
-        MEM_writeLE16(&(DElt.sequence), baseSeq);
-        DElt.nbBits   = (BYTE)(consumed);
-        DElt.length   = 1;
-        for (i = 0; i < skipSize; i++)
-            DTable[i] = DElt;
-    }
-
-    /* fill DTable */
-    { U32 s; for (s=0; s<sortedListSize; s++) {   /* note : sortedSymbols already skipped */
-        const U32 symbol = sortedSymbols[s].symbol;
-        const U32 weight = sortedSymbols[s].weight;
-        const U32 nbBits = nbBitsBaseline - weight;
-        const U32 length = 1 << (sizeLog-nbBits);
-        const U32 start = rankVal[weight];
-        U32 i = start;
-        const U32 end = start + length;
-
-        MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
-        DElt.nbBits = (BYTE)(nbBits + consumed);
-        DElt.length = 2;
-        do { DTable[i++] = DElt; } while (i<end);   /* since length >= 1 */
-
-        rankVal[weight] += length;
-    }}
-}
-
-typedef U32 rankVal_t[HUFv07_TABLELOG_ABSOLUTEMAX][HUFv07_TABLELOG_ABSOLUTEMAX + 1];
-
-static void HUFv07_fillDTableX4(HUFv07_DEltX4* DTable, const U32 targetLog,
-                           const sortedSymbol_t* sortedList, const U32 sortedListSize,
-                           const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
-                           const U32 nbBitsBaseline)
-{
-    U32 rankVal[HUFv07_TABLELOG_ABSOLUTEMAX + 1];
-    const int scaleLog = nbBitsBaseline - targetLog;   /* note : targetLog >= srcLog, hence scaleLog <= 1 */
-    const U32 minBits  = nbBitsBaseline - maxWeight;
-    U32 s;
-
-    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
-
-    /* fill DTable */
-    for (s=0; s<sortedListSize; s++) {
-        const U16 symbol = sortedList[s].symbol;
-        const U32 weight = sortedList[s].weight;
-        const U32 nbBits = nbBitsBaseline - weight;
-        const U32 start = rankVal[weight];
-        const U32 length = 1 << (targetLog-nbBits);
-
-        if (targetLog-nbBits >= minBits) {   /* enough room for a second symbol */
-            U32 sortedRank;
-            int minWeight = nbBits + scaleLog;
-            if (minWeight < 1) minWeight = 1;
-            sortedRank = rankStart[minWeight];
-            HUFv07_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
-                           rankValOrigin[nbBits], minWeight,
-                           sortedList+sortedRank, sortedListSize-sortedRank,
-                           nbBitsBaseline, symbol);
-        } else {
-            HUFv07_DEltX4 DElt;
-            MEM_writeLE16(&(DElt.sequence), symbol);
-            DElt.nbBits = (BYTE)(nbBits);
-            DElt.length = 1;
-            {   U32 u;
-                const U32 end = start + length;
-                for (u = start; u < end; u++) DTable[u] = DElt;
-        }   }
-        rankVal[weight] += length;
-    }
-}
-
-size_t HUFv07_readDTableX4 (HUFv07_DTable* DTable, const void* src, size_t srcSize)
-{
-    BYTE weightList[HUFv07_SYMBOLVALUE_MAX + 1];
-    sortedSymbol_t sortedSymbol[HUFv07_SYMBOLVALUE_MAX + 1];
-    U32 rankStats[HUFv07_TABLELOG_ABSOLUTEMAX + 1] = { 0 };
-    U32 rankStart0[HUFv07_TABLELOG_ABSOLUTEMAX + 2] = { 0 };
-    U32* const rankStart = rankStart0+1;
-    rankVal_t rankVal;
-    U32 tableLog, maxW, sizeOfSort, nbSymbols;
-    DTableDesc dtd = HUFv07_getDTableDesc(DTable);
-    U32 const maxTableLog = dtd.maxTableLog;
-    size_t iSize;
-    void* dtPtr = DTable+1;   /* force compiler to avoid strict-aliasing */
-    HUFv07_DEltX4* const dt = (HUFv07_DEltX4*)dtPtr;
-
-    HUFv07_STATIC_ASSERT(sizeof(HUFv07_DEltX4) == sizeof(HUFv07_DTable));   /* if compilation fails here, assertion is false */
-    if (maxTableLog > HUFv07_TABLELOG_ABSOLUTEMAX) return ERROR(tableLog_tooLarge);
-    //memset(weightList, 0, sizeof(weightList));   /* is not necessary, even though some analyzer complain ... */
-
-    iSize = HUFv07_readStats(weightList, HUFv07_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
-    if (HUFv07_isError(iSize)) return iSize;
-
-    /* check result */
-    if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge);   /* DTable can't fit code depth */
-
-    /* find maxWeight */
-    for (maxW = tableLog; rankStats[maxW]==0; maxW--) {}  /* necessarily finds a solution before 0 */
-
-    /* Get start index of each weight */
-    {   U32 w, nextRankStart = 0;
-        for (w=1; w<maxW+1; w++) {
-            U32 current = nextRankStart;
-            nextRankStart += rankStats[w];
-            rankStart[w] = current;
-        }
-        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/
-        sizeOfSort = nextRankStart;
-    }
-
-    /* sort symbols by weight */
-    {   U32 s;
-        for (s=0; s<nbSymbols; s++) {
-            U32 const w = weightList[s];
-            U32 const r = rankStart[w]++;
-            sortedSymbol[r].symbol = (BYTE)s;
-            sortedSymbol[r].weight = (BYTE)w;
-        }
-        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */
-    }
-
-    /* Build rankVal */
-    {   U32* const rankVal0 = rankVal[0];
-        {   int const rescale = (maxTableLog-tableLog) - 1;   /* tableLog <= maxTableLog */
-            U32 nextRankVal = 0;
-            U32 w;
-            for (w=1; w<maxW+1; w++) {
-                U32 current = nextRankVal;
-                nextRankVal += rankStats[w] << (w+rescale);
-                rankVal0[w] = current;
-        }   }
-        {   U32 const minBits = tableLog+1 - maxW;
-            U32 consumed;
-            for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
-                U32* const rankValPtr = rankVal[consumed];
-                U32 w;
-                for (w = 1; w < maxW+1; w++) {
-                    rankValPtr[w] = rankVal0[w] >> consumed;
-    }   }   }   }
-
-    HUFv07_fillDTableX4(dt, maxTableLog,
-                   sortedSymbol, sizeOfSort,
-                   rankStart0, rankVal, maxW,
-                   tableLog+1);
-
-    dtd.tableLog = (BYTE)maxTableLog;
-    dtd.tableType = 1;
-    memcpy(DTable, &dtd, sizeof(dtd));
-    return iSize;
-}
-
-
-static U32 HUFv07_decodeSymbolX4(void* op, BITv07_DStream_t* DStream, const HUFv07_DEltX4* dt, const U32 dtLog)
-{
-    const size_t val = BITv07_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
-    memcpy(op, dt+val, 2);
-    BITv07_skipBits(DStream, dt[val].nbBits);
-    return dt[val].length;
-}
-
-static U32 HUFv07_decodeLastSymbolX4(void* op, BITv07_DStream_t* DStream, const HUFv07_DEltX4* dt, const U32 dtLog)
-{
-    const size_t val = BITv07_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
-    memcpy(op, dt+val, 1);
-    if (dt[val].length==1) BITv07_skipBits(DStream, dt[val].nbBits);
-    else {
-        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
-            BITv07_skipBits(DStream, dt[val].nbBits);
-            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
-                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);   /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
-    }   }
-    return 1;
-}
-
-
-#define HUFv07_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
-    ptr += HUFv07_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-#define HUFv07_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
-    if (MEM_64bits() || (HUFv07_TABLELOG_MAX<=12)) \
-        ptr += HUFv07_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-#define HUFv07_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
-    if (MEM_64bits()) \
-        ptr += HUFv07_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-static inline size_t HUFv07_decodeStreamX4(BYTE* p, BITv07_DStream_t* bitDPtr, BYTE* const pEnd, const HUFv07_DEltX4* const dt, const U32 dtLog)
-{
-    BYTE* const pStart = p;
-
-    /* up to 8 symbols at a time */
-    while ((BITv07_reloadDStream(bitDPtr) == BITv07_DStream_unfinished) && (p < pEnd-7)) {
-        HUFv07_DECODE_SYMBOLX4_2(p, bitDPtr);
-        HUFv07_DECODE_SYMBOLX4_1(p, bitDPtr);
-        HUFv07_DECODE_SYMBOLX4_2(p, bitDPtr);
-        HUFv07_DECODE_SYMBOLX4_0(p, bitDPtr);
-    }
-
-    /* closer to end : up to 2 symbols at a time */
-    while ((BITv07_reloadDStream(bitDPtr) == BITv07_DStream_unfinished) && (p <= pEnd-2))
-        HUFv07_DECODE_SYMBOLX4_0(p, bitDPtr);
-
-    while (p <= pEnd-2)
-        HUFv07_DECODE_SYMBOLX4_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */
-
-    if (p < pEnd)
-        p += HUFv07_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
-
-    return p-pStart;
-}
-
-
-static size_t HUFv07_decompress1X4_usingDTable_internal(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const HUFv07_DTable* DTable)
-{
-    BITv07_DStream_t bitD;
-
-    /* Init */
-    {   size_t const errorCode = BITv07_initDStream(&bitD, cSrc, cSrcSize);
-        if (HUFv07_isError(errorCode)) return errorCode;
-    }
-
-    /* decode */
-    {   BYTE* const ostart = (BYTE*) dst;
-        BYTE* const oend = ostart + dstSize;
-        const void* const dtPtr = DTable+1;   /* force compiler to not use strict-aliasing */
-        const HUFv07_DEltX4* const dt = (const HUFv07_DEltX4*)dtPtr;
-        DTableDesc const dtd = HUFv07_getDTableDesc(DTable);
-        HUFv07_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog);
-    }
-
-    /* check */
-    if (!BITv07_endOfDStream(&bitD)) return ERROR(corruption_detected);
-
-    /* decoded size */
-    return dstSize;
-}
-
-size_t HUFv07_decompress1X4_usingDTable(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const HUFv07_DTable* DTable)
-{
-    DTableDesc dtd = HUFv07_getDTableDesc(DTable);
-    if (dtd.tableType != 1) return ERROR(GENERIC);
-    return HUFv07_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
-}
-
-size_t HUFv07_decompress1X4_DCtx (HUFv07_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    const BYTE* ip = (const BYTE*) cSrc;
-
-    size_t const hSize = HUFv07_readDTableX4 (DCtx, cSrc, cSrcSize);
-    if (HUFv07_isError(hSize)) return hSize;
-    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
-    ip += hSize; cSrcSize -= hSize;
-
-    return HUFv07_decompress1X4_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx);
-}
-
-size_t HUFv07_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    HUFv07_CREATE_STATIC_DTABLEX4(DTable, HUFv07_TABLELOG_MAX);
-    return HUFv07_decompress1X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
-}
-
-static size_t HUFv07_decompress4X4_usingDTable_internal(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const HUFv07_DTable* DTable)
-{
-    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
-
-    {   const BYTE* const istart = (const BYTE*) cSrc;
-        BYTE* const ostart = (BYTE*) dst;
-        BYTE* const oend = ostart + dstSize;
-        const void* const dtPtr = DTable+1;
-        const HUFv07_DEltX4* const dt = (const HUFv07_DEltX4*)dtPtr;
-
-        /* Init */
-        BITv07_DStream_t bitD1;
-        BITv07_DStream_t bitD2;
-        BITv07_DStream_t bitD3;
-        BITv07_DStream_t bitD4;
-        size_t const length1 = MEM_readLE16(istart);
-        size_t const length2 = MEM_readLE16(istart+2);
-        size_t const length3 = MEM_readLE16(istart+4);
-        size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
-        const BYTE* const istart1 = istart + 6;  /* jumpTable */
-        const BYTE* const istart2 = istart1 + length1;
-        const BYTE* const istart3 = istart2 + length2;
-        const BYTE* const istart4 = istart3 + length3;
-        size_t const segmentSize = (dstSize+3) / 4;
-        BYTE* const opStart2 = ostart + segmentSize;
-        BYTE* const opStart3 = opStart2 + segmentSize;
-        BYTE* const opStart4 = opStart3 + segmentSize;
-        BYTE* op1 = ostart;
-        BYTE* op2 = opStart2;
-        BYTE* op3 = opStart3;
-        BYTE* op4 = opStart4;
-        U32 endSignal;
-        DTableDesc const dtd = HUFv07_getDTableDesc(DTable);
-        U32 const dtLog = dtd.tableLog;
-
-        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
-        { size_t const errorCode = BITv07_initDStream(&bitD1, istart1, length1);
-          if (HUFv07_isError(errorCode)) return errorCode; }
-        { size_t const errorCode = BITv07_initDStream(&bitD2, istart2, length2);
-          if (HUFv07_isError(errorCode)) return errorCode; }
-        { size_t const errorCode = BITv07_initDStream(&bitD3, istart3, length3);
-          if (HUFv07_isError(errorCode)) return errorCode; }
-        { size_t const errorCode = BITv07_initDStream(&bitD4, istart4, length4);
-          if (HUFv07_isError(errorCode)) return errorCode; }
-
-        /* 16-32 symbols per loop (4-8 symbols per stream) */
-        endSignal = BITv07_reloadDStream(&bitD1) | BITv07_reloadDStream(&bitD2) | BITv07_reloadDStream(&bitD3) | BITv07_reloadDStream(&bitD4);
-        for ( ; (endSignal==BITv07_DStream_unfinished) && (op4<(oend-7)) ; ) {
-            HUFv07_DECODE_SYMBOLX4_2(op1, &bitD1);
-            HUFv07_DECODE_SYMBOLX4_2(op2, &bitD2);
-            HUFv07_DECODE_SYMBOLX4_2(op3, &bitD3);
-            HUFv07_DECODE_SYMBOLX4_2(op4, &bitD4);
-            HUFv07_DECODE_SYMBOLX4_1(op1, &bitD1);
-            HUFv07_DECODE_SYMBOLX4_1(op2, &bitD2);
-            HUFv07_DECODE_SYMBOLX4_1(op3, &bitD3);
-            HUFv07_DECODE_SYMBOLX4_1(op4, &bitD4);
-            HUFv07_DECODE_SYMBOLX4_2(op1, &bitD1);
-            HUFv07_DECODE_SYMBOLX4_2(op2, &bitD2);
-            HUFv07_DECODE_SYMBOLX4_2(op3, &bitD3);
-            HUFv07_DECODE_SYMBOLX4_2(op4, &bitD4);
-            HUFv07_DECODE_SYMBOLX4_0(op1, &bitD1);
-            HUFv07_DECODE_SYMBOLX4_0(op2, &bitD2);
-            HUFv07_DECODE_SYMBOLX4_0(op3, &bitD3);
-            HUFv07_DECODE_SYMBOLX4_0(op4, &bitD4);
-
-            endSignal = BITv07_reloadDStream(&bitD1) | BITv07_reloadDStream(&bitD2) | BITv07_reloadDStream(&bitD3) | BITv07_reloadDStream(&bitD4);
-        }
-
-        /* check corruption */
-        if (op1 > opStart2) return ERROR(corruption_detected);
-        if (op2 > opStart3) return ERROR(corruption_detected);
-        if (op3 > opStart4) return ERROR(corruption_detected);
-        /* note : op4 supposed already verified within main loop */
-
-        /* finish bitStreams one by one */
-        HUFv07_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
-        HUFv07_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
-        HUFv07_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
-        HUFv07_decodeStreamX4(op4, &bitD4, oend,     dt, dtLog);
-
-        /* check */
-        { U32 const endCheck = BITv07_endOfDStream(&bitD1) & BITv07_endOfDStream(&bitD2) & BITv07_endOfDStream(&bitD3) & BITv07_endOfDStream(&bitD4);
-          if (!endCheck) return ERROR(corruption_detected); }
-
-        /* decoded size */
-        return dstSize;
-    }
-}
-
-
-size_t HUFv07_decompress4X4_usingDTable(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const HUFv07_DTable* DTable)
-{
-    DTableDesc dtd = HUFv07_getDTableDesc(DTable);
-    if (dtd.tableType != 1) return ERROR(GENERIC);
-    return HUFv07_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
-}
-
-
-size_t HUFv07_decompress4X4_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    const BYTE* ip = (const BYTE*) cSrc;
-
-    size_t hSize = HUFv07_readDTableX4 (dctx, cSrc, cSrcSize);
-    if (HUFv07_isError(hSize)) return hSize;
-    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
-    ip += hSize; cSrcSize -= hSize;
-
-    return HUFv07_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx);
-}
-
-size_t HUFv07_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    HUFv07_CREATE_STATIC_DTABLEX4(DTable, HUFv07_TABLELOG_MAX);
-    return HUFv07_decompress4X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
-}
-
-
-/* ********************************/
-/* Generic decompression selector */
-/* ********************************/
-
-size_t HUFv07_decompress1X_usingDTable(void* dst, size_t maxDstSize,
-                                    const void* cSrc, size_t cSrcSize,
-                                    const HUFv07_DTable* DTable)
-{
-    DTableDesc const dtd = HUFv07_getDTableDesc(DTable);
-    return dtd.tableType ? HUFv07_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) :
-                           HUFv07_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
-}
-
-size_t HUFv07_decompress4X_usingDTable(void* dst, size_t maxDstSize,
-                                    const void* cSrc, size_t cSrcSize,
-                                    const HUFv07_DTable* DTable)
-{
-    DTableDesc const dtd = HUFv07_getDTableDesc(DTable);
-    return dtd.tableType ? HUFv07_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) :
-                           HUFv07_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
-}
-
-
-typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
-static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
-{
-    /* single, double, quad */
-    {{0,0}, {1,1}, {2,2}},  /* Q==0 : impossible */
-    {{0,0}, {1,1}, {2,2}},  /* Q==1 : impossible */
-    {{  38,130}, {1313, 74}, {2151, 38}},   /* Q == 2 : 12-18% */
-    {{ 448,128}, {1353, 74}, {2238, 41}},   /* Q == 3 : 18-25% */
-    {{ 556,128}, {1353, 74}, {2238, 47}},   /* Q == 4 : 25-32% */
-    {{ 714,128}, {1418, 74}, {2436, 53}},   /* Q == 5 : 32-38% */
-    {{ 883,128}, {1437, 74}, {2464, 61}},   /* Q == 6 : 38-44% */
-    {{ 897,128}, {1515, 75}, {2622, 68}},   /* Q == 7 : 44-50% */
-    {{ 926,128}, {1613, 75}, {2730, 75}},   /* Q == 8 : 50-56% */
-    {{ 947,128}, {1729, 77}, {3359, 77}},   /* Q == 9 : 56-62% */
-    {{1107,128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */
-    {{1177,128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */
-    {{1242,128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */
-    {{1349,128}, {2644,106}, {5260,106}},   /* Q ==13 : 81-87% */
-    {{1455,128}, {2422,124}, {4174,124}},   /* Q ==14 : 87-93% */
-    {{ 722,128}, {1891,145}, {1936,146}},   /* Q ==15 : 93-99% */
-};
-
-/** HUFv07_selectDecoder() :
-*   Tells which decoder is likely to decode faster,
-*   based on a set of pre-determined metrics.
-*   @return : 0==HUFv07_decompress4X2, 1==HUFv07_decompress4X4 .
-*   Assumption : 0 < cSrcSize < dstSize <= 128 KB */
-U32 HUFv07_selectDecoder (size_t dstSize, size_t cSrcSize)
-{
-    /* decoder timing evaluation */
-    U32 const Q = (U32)(cSrcSize * 16 / dstSize);   /* Q < 16 since dstSize > cSrcSize */
-    U32 const D256 = (U32)(dstSize >> 8);
-    U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
-    U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
-    DTime1 += DTime1 >> 3;  /* advantage to algorithm using less memory, for cache eviction */
-
-    return DTime1 < DTime0;
-}
-
-
-typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
-
-size_t HUFv07_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    static const decompressionAlgo decompress[2] = { HUFv07_decompress4X2, HUFv07_decompress4X4 };
-
-    /* validation checks */
-    if (dstSize == 0) return ERROR(dstSize_tooSmall);
-    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */
-    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */
-    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
-
-    {   U32 const algoNb = HUFv07_selectDecoder(dstSize, cSrcSize);
-        return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
-    }
-
-    //return HUFv07_decompress4X2(dst, dstSize, cSrc, cSrcSize);   /* multi-streams single-symbol decoding */
-    //return HUFv07_decompress4X4(dst, dstSize, cSrc, cSrcSize);   /* multi-streams double-symbols decoding */
-}
-
-size_t HUFv07_decompress4X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    /* validation checks */
-    if (dstSize == 0) return ERROR(dstSize_tooSmall);
-    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */
-    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */
-    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
-
-    {   U32 const algoNb = HUFv07_selectDecoder(dstSize, cSrcSize);
-        return algoNb ? HUFv07_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
-                        HUFv07_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
-    }
-}
-
-size_t HUFv07_decompress4X_hufOnly (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    /* validation checks */
-    if (dstSize == 0) return ERROR(dstSize_tooSmall);
-    if ((cSrcSize >= dstSize) || (cSrcSize <= 1)) return ERROR(corruption_detected);   /* invalid */
-
-    {   U32 const algoNb = HUFv07_selectDecoder(dstSize, cSrcSize);
-        return algoNb ? HUFv07_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
-                        HUFv07_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
-    }
-}
-
-size_t HUFv07_decompress1X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    /* validation checks */
-    if (dstSize == 0) return ERROR(dstSize_tooSmall);
-    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */
-    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */
-    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
-
-    {   U32 const algoNb = HUFv07_selectDecoder(dstSize, cSrcSize);
-        return algoNb ? HUFv07_decompress1X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
-                        HUFv07_decompress1X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
-    }
-}
-/*
-    Common functions of Zstd compression library
-    Copyright (C) 2015-2016, Yann Collet.
-
-    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are
-    met:
-    * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-    copyright notice, this list of conditions and the following disclaimer
-    in the documentation and/or other materials provided with the
-    distribution.
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - zstd homepage : http://www.zstd.net/
-*/
-
-
-
-/*-****************************************
-*  ZSTD Error Management
-******************************************/
-/*! ZSTDv07_isError() :
-*   tells if a return value is an error code */
-unsigned ZSTDv07_isError(size_t code) { return ERR_isError(code); }
-
-/*! ZSTDv07_getErrorName() :
-*   provides error code string from function result (useful for debugging) */
-const char* ZSTDv07_getErrorName(size_t code) { return ERR_getErrorName(code); }
-
-
-
-/* **************************************************************
-*  ZBUFF Error Management
-****************************************************************/
-unsigned ZBUFFv07_isError(size_t errorCode) { return ERR_isError(errorCode); }
-
-const char* ZBUFFv07_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
-
-
-
-static void* ZSTDv07_defaultAllocFunction(void* opaque, size_t size)
-{
-    void* address = malloc(size);
-    (void)opaque;
-    /* printf("alloc %p, %d opaque=%p \n", address, (int)size, opaque); */
-    return address;
-}
-
-static void ZSTDv07_defaultFreeFunction(void* opaque, void* address)
-{
-    (void)opaque;
-    /* if (address) printf("free %p opaque=%p \n", address, opaque); */
-    free(address);
-}
-/*
-    zstd_internal - common functions to include
-    Header File for include
-    Copyright (C) 2014-2016, Yann Collet.
-
-    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are
-    met:
-    * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-    copyright notice, this list of conditions and the following disclaimer
-    in the documentation and/or other materials provided with the
-    distribution.
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - zstd homepage : https://www.zstd.net
-*/
-#ifndef ZSTDv07_CCOMMON_H_MODULE
-#define ZSTDv07_CCOMMON_H_MODULE
-
-
-/*-*************************************
-*  Common macros
-***************************************/
-#define MIN(a,b) ((a)<(b) ? (a) : (b))
-#define MAX(a,b) ((a)>(b) ? (a) : (b))
-
-
-/*-*************************************
-*  Common constants
-***************************************/
-#define ZSTDv07_OPT_NUM    (1<<12)
-#define ZSTDv07_DICT_MAGIC  0xEC30A437   /* v0.7 */
-
-#define ZSTDv07_REP_NUM    3
-#define ZSTDv07_REP_INIT   ZSTDv07_REP_NUM
-#define ZSTDv07_REP_MOVE   (ZSTDv07_REP_NUM-1)
-static const U32 repStartValue[ZSTDv07_REP_NUM] = { 1, 4, 8 };
-
-#define KB *(1 <<10)
-#define MB *(1 <<20)
-#define GB *(1U<<30)
-
-#define BIT7 128
-#define BIT6  64
-#define BIT5  32
-#define BIT4  16
-#define BIT1   2
-#define BIT0   1
-
-#define ZSTDv07_WINDOWLOG_ABSOLUTEMIN 10
-static const size_t ZSTDv07_fcs_fieldSize[4] = { 0, 2, 4, 8 };
-static const size_t ZSTDv07_did_fieldSize[4] = { 0, 1, 2, 4 };
-
-#define ZSTDv07_BLOCKHEADERSIZE 3   /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
-static const size_t ZSTDv07_blockHeaderSize = ZSTDv07_BLOCKHEADERSIZE;
-typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
-
-#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
-#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */)   /* for a non-null block */
-
-#define HufLog 12
-typedef enum { lbt_huffman, lbt_repeat, lbt_raw, lbt_rle } litBlockType_t;
-
-#define LONGNBSEQ 0x7F00
-
-#define MINMATCH 3
-#define EQUAL_READ32 4
-
-#define Litbits  8
-#define MaxLit ((1<<Litbits) - 1)
-#define MaxML  52
-#define MaxLL  35
-#define MaxOff 28
-#define MaxSeq MAX(MaxLL, MaxML)   /* Assumption : MaxOff < MaxLL,MaxML */
-#define MLFSELog    9
-#define LLFSELog    9
-#define OffFSELog   8
-
-#define FSEv07_ENCODING_RAW     0
-#define FSEv07_ENCODING_RLE     1
-#define FSEv07_ENCODING_STATIC  2
-#define FSEv07_ENCODING_DYNAMIC 3
-
-#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
-
-static const U32 LL_bits[MaxLL+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                                      1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9,10,11,12,
-                                     13,14,15,16 };
-static const S16 LL_defaultNorm[MaxLL+1] = { 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
-                                             2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
-                                            -1,-1,-1,-1 };
-static const U32 LL_defaultNormLog = 6;
-
-static const U32 ML_bits[MaxML+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                                      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                                      1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9,10,11,
-                                     12,13,14,15,16 };
-static const S16 ML_defaultNorm[MaxML+1] = { 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
-                                             1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-                                             1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,
-                                            -1,-1,-1,-1,-1 };
-static const U32 ML_defaultNormLog = 6;
-
-static const S16 OF_defaultNorm[MaxOff+1] = { 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
-                                              1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1 };
-static const U32 OF_defaultNormLog = 5;
-
-
-/*-*******************************************
-*  Shared functions to include for inlining
-*********************************************/
-static void ZSTDv07_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
-#define COPY8(d,s) { ZSTDv07_copy8(d,s); d+=8; s+=8; }
-
-/*! ZSTDv07_wildcopy() :
-*   custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
-#define WILDCOPY_OVERLENGTH 8
-MEM_STATIC void ZSTDv07_wildcopy(void* dst, const void* src, ptrdiff_t length)
-{
-    const BYTE* ip = (const BYTE*)src;
-    BYTE* op = (BYTE*)dst;
-    BYTE* const oend = op + length;
-    do
-        COPY8(op, ip)
-    while (op < oend);
-}
-
-
-/*-*******************************************
-*  Private interfaces
-*********************************************/
-typedef struct ZSTDv07_stats_s ZSTDv07_stats_t;
-
-typedef struct {
-    U32 off;
-    U32 len;
-} ZSTDv07_match_t;
-
-typedef struct {
-    U32 price;
-    U32 off;
-    U32 mlen;
-    U32 litlen;
-    U32 rep[ZSTDv07_REP_INIT];
-} ZSTDv07_optimal_t;
-
-struct ZSTDv07_stats_s { U32 unused; };
-
-typedef struct {
-    void* buffer;
-    U32*  offsetStart;
-    U32*  offset;
-    BYTE* offCodeStart;
-    BYTE* litStart;
-    BYTE* lit;
-    U16*  litLengthStart;
-    U16*  litLength;
-    BYTE* llCodeStart;
-    U16*  matchLengthStart;
-    U16*  matchLength;
-    BYTE* mlCodeStart;
-    U32   longLengthID;   /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
-    U32   longLengthPos;
-    /* opt */
-    ZSTDv07_optimal_t* priceTable;
-    ZSTDv07_match_t* matchTable;
-    U32* matchLengthFreq;
-    U32* litLengthFreq;
-    U32* litFreq;
-    U32* offCodeFreq;
-    U32  matchLengthSum;
-    U32  matchSum;
-    U32  litLengthSum;
-    U32  litSum;
-    U32  offCodeSum;
-    U32  log2matchLengthSum;
-    U32  log2matchSum;
-    U32  log2litLengthSum;
-    U32  log2litSum;
-    U32  log2offCodeSum;
-    U32  factor;
-    U32  cachedPrice;
-    U32  cachedLitLength;
-    const BYTE* cachedLiterals;
-    ZSTDv07_stats_t stats;
-} seqStore_t;
-
-void ZSTDv07_seqToCodes(const seqStore_t* seqStorePtr, size_t const nbSeq);
-
-/* custom memory allocation functions */
-static const ZSTDv07_customMem defaultCustomMem = { ZSTDv07_defaultAllocFunction, ZSTDv07_defaultFreeFunction, NULL };
-
-#endif   /* ZSTDv07_CCOMMON_H_MODULE */
-/*
-    zstd - standard compression library
-    Copyright (C) 2014-2016, Yann Collet.
-
-    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are
-    met:
-    * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-    copyright notice, this list of conditions and the following disclaimer
-    in the documentation and/or other materials provided with the
-    distribution.
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - zstd homepage : http://www.zstd.net
-*/
-
-/* ***************************************************************
-*  Tuning parameters
-*****************************************************************/
-/*!
- * HEAPMODE :
- * Select how default decompression function ZSTDv07_decompress() will allocate memory,
- * in memory stack (0), or in memory heap (1, requires malloc())
- */
-#ifndef ZSTDv07_HEAPMODE
-#  define ZSTDv07_HEAPMODE 1
-#endif
-
-
-/*-*******************************************************
-*  Compiler specifics
-*********************************************************/
-#ifdef _MSC_VER    /* Visual Studio */
-#  include <intrin.h>                    /* For Visual 2005 */
-#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
-#  pragma warning(disable : 4324)        /* disable: C4324: padded structure */
-#  pragma warning(disable : 4100)        /* disable: C4100: unreferenced formal parameter */
-#endif
-
-
-/*-*************************************
-*  Macros
-***************************************/
-#define ZSTDv07_isError ERR_isError   /* for inlining */
-#define FSEv07_isError  ERR_isError
-#define HUFv07_isError  ERR_isError
-
-
-/*_*******************************************************
-*  Memory operations
-**********************************************************/
-static void ZSTDv07_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
-
-
-/*-*************************************************************
-*   Context management
-***************************************************************/
-typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
-               ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock,
-               ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTDv07_dStage;
-
-struct ZSTDv07_DCtx_s
-{
-    FSEv07_DTable LLTable[FSEv07_DTABLE_SIZE_U32(LLFSELog)];
-    FSEv07_DTable OffTable[FSEv07_DTABLE_SIZE_U32(OffFSELog)];
-    FSEv07_DTable MLTable[FSEv07_DTABLE_SIZE_U32(MLFSELog)];
-    HUFv07_DTable hufTable[HUFv07_DTABLE_SIZE(HufLog)];  /* can accommodate HUFv07_decompress4X */
-    const void* previousDstEnd;
-    const void* base;
-    const void* vBase;
-    const void* dictEnd;
-    size_t expected;
-    U32 rep[3];
-    ZSTDv07_frameParams fParams;
-    blockType_t bType;   /* used in ZSTDv07_decompressContinue(), to transfer blockType between header decoding and block decoding stages */
-    ZSTDv07_dStage stage;
-    U32 litEntropy;
-    U32 fseEntropy;
-    XXH64_state_t xxhState;
-    size_t headerSize;
-    U32 dictID;
-    const BYTE* litPtr;
-    ZSTDv07_customMem customMem;
-    size_t litSize;
-    BYTE litBuffer[ZSTDv07_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH];
-    BYTE headerBuffer[ZSTDv07_FRAMEHEADERSIZE_MAX];
-};  /* typedef'd to ZSTDv07_DCtx within "zstd_static.h" */
-
-int ZSTDv07_isSkipFrame(ZSTDv07_DCtx* dctx);
-
-size_t ZSTDv07_sizeofDCtx (const ZSTDv07_DCtx* dctx) { return sizeof(*dctx); }
-
-size_t ZSTDv07_estimateDCtxSize(void) { return sizeof(ZSTDv07_DCtx); }
-
-size_t ZSTDv07_decompressBegin(ZSTDv07_DCtx* dctx)
-{
-    dctx->expected = ZSTDv07_frameHeaderSize_min;
-    dctx->stage = ZSTDds_getFrameHeaderSize;
-    dctx->previousDstEnd = NULL;
-    dctx->base = NULL;
-    dctx->vBase = NULL;
-    dctx->dictEnd = NULL;
-    dctx->hufTable[0] = (HUFv07_DTable)((HufLog)*0x1000001);
-    dctx->litEntropy = dctx->fseEntropy = 0;
-    dctx->dictID = 0;
-    { int i; for (i=0; i<ZSTDv07_REP_NUM; i++) dctx->rep[i] = repStartValue[i]; }
-    return 0;
-}
-
-ZSTDv07_DCtx* ZSTDv07_createDCtx_advanced(ZSTDv07_customMem customMem)
-{
-    ZSTDv07_DCtx* dctx;
-
-    if (!customMem.customAlloc && !customMem.customFree)
-        customMem = defaultCustomMem;
-
-    if (!customMem.customAlloc || !customMem.customFree)
-        return NULL;
-
-    dctx = (ZSTDv07_DCtx*) customMem.customAlloc(customMem.opaque, sizeof(ZSTDv07_DCtx));
-    if (!dctx) return NULL;
-    memcpy(&dctx->customMem, &customMem, sizeof(ZSTDv07_customMem));
-    ZSTDv07_decompressBegin(dctx);
-    return dctx;
-}
-
-ZSTDv07_DCtx* ZSTDv07_createDCtx(void)
-{
-    return ZSTDv07_createDCtx_advanced(defaultCustomMem);
-}
-
-size_t ZSTDv07_freeDCtx(ZSTDv07_DCtx* dctx)
-{
-    if (dctx==NULL) return 0;   /* support free on NULL */
-    dctx->customMem.customFree(dctx->customMem.opaque, dctx);
-    return 0;   /* reserved as a potential error code in the future */
-}
-
-void ZSTDv07_copyDCtx(ZSTDv07_DCtx* dstDCtx, const ZSTDv07_DCtx* srcDCtx)
-{
-    memcpy(dstDCtx, srcDCtx,
-           sizeof(ZSTDv07_DCtx) - (ZSTDv07_BLOCKSIZE_ABSOLUTEMAX+WILDCOPY_OVERLENGTH + ZSTDv07_frameHeaderSize_max));  /* no need to copy workspace */
-}
-
-
-/*-*************************************************************
-*   Decompression section
-***************************************************************/
-
-/* Frame format description
-   Frame Header -  [ Block Header - Block ] - Frame End
-   1) Frame Header
-      - 4 bytes - Magic Number : ZSTDv07_MAGICNUMBER (defined within zstd.h)
-      - 1 byte  - Frame Descriptor
-   2) Block Header
-      - 3 bytes, starting with a 2-bits descriptor
-                 Uncompressed, Compressed, Frame End, unused
-   3) Block
-      See Block Format Description
-   4) Frame End
-      - 3 bytes, compatible with Block Header
-*/
-
-
-/* Frame Header :
-
-   1 byte - FrameHeaderDescription :
-   bit 0-1 : dictID (0, 1, 2 or 4 bytes)
-   bit 2   : checksumFlag
-   bit 3   : reserved (must be zero)
-   bit 4   : reserved (unused, can be any value)
-   bit 5   : Single Segment (if 1, WindowLog byte is not present)
-   bit 6-7 : FrameContentFieldSize (0, 2, 4, or 8)
-             if (SkippedWindowLog && !FrameContentFieldsize) FrameContentFieldsize=1;
-
-   Optional : WindowLog (0 or 1 byte)
-   bit 0-2 : octal Fractional (1/8th)
-   bit 3-7 : Power of 2, with 0 = 1 KB (up to 2 TB)
-
-   Optional : dictID (0, 1, 2 or 4 bytes)
-   Automatic adaptation
-   0 : no dictID
-   1 : 1 - 255
-   2 : 256 - 65535
-   4 : all other values
-
-   Optional : content size (0, 1, 2, 4 or 8 bytes)
-   0 : unknown          (fcfs==0 and swl==0)
-   1 : 0-255 bytes      (fcfs==0 and swl==1)
-   2 : 256 - 65535+256  (fcfs==1)
-   4 : 0 - 4GB-1        (fcfs==2)
-   8 : 0 - 16EB-1       (fcfs==3)
-*/
-
-
-/* Compressed Block, format description
-
-   Block = Literal Section - Sequences Section
-   Prerequisite : size of (compressed) block, maximum size of regenerated data
-
-   1) Literal Section
-
-   1.1) Header : 1-5 bytes
-        flags: 2 bits
-            00 compressed by Huff0
-            01 unused
-            10 is Raw (uncompressed)
-            11 is Rle
-            Note : using 01 => Huff0 with precomputed table ?
-            Note : delta map ? => compressed ?
-
-   1.1.1) Huff0-compressed literal block : 3-5 bytes
-            srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream
-            srcSize < 1 KB => 3 bytes (2-2-10-10)
-            srcSize < 16KB => 4 bytes (2-2-14-14)
-            else           => 5 bytes (2-2-18-18)
-            big endian convention
-
-   1.1.2) Raw (uncompressed) literal block header : 1-3 bytes
-        size :  5 bits: (IS_RAW<<6) + (0<<4) + size
-               12 bits: (IS_RAW<<6) + (2<<4) + (size>>8)
-                        size&255
-               20 bits: (IS_RAW<<6) + (3<<4) + (size>>16)
-                        size>>8&255
-                        size&255
-
-   1.1.3) Rle (repeated single byte) literal block header : 1-3 bytes
-        size :  5 bits: (IS_RLE<<6) + (0<<4) + size
-               12 bits: (IS_RLE<<6) + (2<<4) + (size>>8)
-                        size&255
-               20 bits: (IS_RLE<<6) + (3<<4) + (size>>16)
-                        size>>8&255
-                        size&255
-
-   1.1.4) Huff0-compressed literal block, using precomputed CTables : 3-5 bytes
-            srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream
-            srcSize < 1 KB => 3 bytes (2-2-10-10)
-            srcSize < 16KB => 4 bytes (2-2-14-14)
-            else           => 5 bytes (2-2-18-18)
-            big endian convention
-
-        1- CTable available (stored into workspace ?)
-        2- Small input (fast heuristic ? Full comparison ? depend on clevel ?)
-
-
-   1.2) Literal block content
-
-   1.2.1) Huff0 block, using sizes from header
-        See Huff0 format
-
-   1.2.2) Huff0 block, using prepared table
-
-   1.2.3) Raw content
-
-   1.2.4) single byte
-
-
-   2) Sequences section
-      TO DO
-*/
-
-/** ZSTDv07_frameHeaderSize() :
-*   srcSize must be >= ZSTDv07_frameHeaderSize_min.
-*   @return : size of the Frame Header */
-static size_t ZSTDv07_frameHeaderSize(const void* src, size_t srcSize)
-{
-    if (srcSize < ZSTDv07_frameHeaderSize_min) return ERROR(srcSize_wrong);
-    {   BYTE const fhd = ((const BYTE*)src)[4];
-        U32 const dictID= fhd & 3;
-        U32 const directMode = (fhd >> 5) & 1;
-        U32 const fcsId = fhd >> 6;
-        return ZSTDv07_frameHeaderSize_min + !directMode + ZSTDv07_did_fieldSize[dictID] + ZSTDv07_fcs_fieldSize[fcsId]
-                + (directMode && !ZSTDv07_fcs_fieldSize[fcsId]);
-    }
-}
-
-
-/** ZSTDv07_getFrameParams() :
-*   decode Frame Header, or require larger `srcSize`.
-*   @return : 0, `fparamsPtr` is correctly filled,
-*            >0, `srcSize` is too small, result is expected `srcSize`,
-*             or an error code, which can be tested using ZSTDv07_isError() */
-size_t ZSTDv07_getFrameParams(ZSTDv07_frameParams* fparamsPtr, const void* src, size_t srcSize)
-{
-    const BYTE* ip = (const BYTE*)src;
-
-    if (srcSize < ZSTDv07_frameHeaderSize_min) return ZSTDv07_frameHeaderSize_min;
-    memset(fparamsPtr, 0, sizeof(*fparamsPtr));
-    if (MEM_readLE32(src) != ZSTDv07_MAGICNUMBER) {
-        if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTDv07_MAGIC_SKIPPABLE_START) {
-            if (srcSize < ZSTDv07_skippableHeaderSize) return ZSTDv07_skippableHeaderSize; /* magic number + skippable frame length */
-            fparamsPtr->frameContentSize = MEM_readLE32((const char *)src + 4);
-            fparamsPtr->windowSize = 0; /* windowSize==0 means a frame is skippable */
-            return 0;
-        }
-        return ERROR(prefix_unknown);
-    }
-
-    /* ensure there is enough `srcSize` to fully read/decode frame header */
-    { size_t const fhsize = ZSTDv07_frameHeaderSize(src, srcSize);
-      if (srcSize < fhsize) return fhsize; }
-
-    {   BYTE const fhdByte = ip[4];
-        size_t pos = 5;
-        U32 const dictIDSizeCode = fhdByte&3;
-        U32 const checksumFlag = (fhdByte>>2)&1;
-        U32 const directMode = (fhdByte>>5)&1;
-        U32 const fcsID = fhdByte>>6;
-        U32 const windowSizeMax = 1U << ZSTDv07_WINDOWLOG_MAX;
-        U32 windowSize = 0;
-        U32 dictID = 0;
-        U64 frameContentSize = 0;
-        if ((fhdByte & 0x08) != 0)   /* reserved bits, which must be zero */
-            return ERROR(frameParameter_unsupported);
-        if (!directMode) {
-            BYTE const wlByte = ip[pos++];
-            U32 const windowLog = (wlByte >> 3) + ZSTDv07_WINDOWLOG_ABSOLUTEMIN;
-            if (windowLog > ZSTDv07_WINDOWLOG_MAX)
-                return ERROR(frameParameter_unsupported);
-            windowSize = (1U << windowLog);
-            windowSize += (windowSize >> 3) * (wlByte&7);
-        }
-
-        switch(dictIDSizeCode)
-        {
-            default:   /* impossible */
-            case 0 : break;
-            case 1 : dictID = ip[pos]; pos++; break;
-            case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break;
-            case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break;
-        }
-        switch(fcsID)
-        {
-            default:   /* impossible */
-            case 0 : if (directMode) frameContentSize = ip[pos]; break;
-            case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break;
-            case 2 : frameContentSize = MEM_readLE32(ip+pos); break;
-            case 3 : frameContentSize = MEM_readLE64(ip+pos); break;
-        }
-        if (!windowSize) windowSize = (U32)frameContentSize;
-        if (windowSize > windowSizeMax)
-            return ERROR(frameParameter_unsupported);
-        fparamsPtr->frameContentSize = frameContentSize;
-        fparamsPtr->windowSize = windowSize;
-        fparamsPtr->dictID = dictID;
-        fparamsPtr->checksumFlag = checksumFlag;
-    }
-    return 0;
-}
-
-
-/** ZSTDv07_getDecompressedSize() :
-*   compatible with legacy mode
-*   @return : decompressed size if known, 0 otherwise
-              note : 0 can mean any of the following :
-                   - decompressed size is not provided within frame header
-                   - frame header unknown / not supported
-                   - frame header not completely provided (`srcSize` too small) */
-unsigned long long ZSTDv07_getDecompressedSize(const void* src, size_t srcSize)
-{
-    ZSTDv07_frameParams fparams;
-    size_t const frResult = ZSTDv07_getFrameParams(&fparams, src, srcSize);
-    if (frResult!=0) return 0;
-    return fparams.frameContentSize;
-}
-
-
-/** ZSTDv07_decodeFrameHeader() :
-*   `srcSize` must be the size provided by ZSTDv07_frameHeaderSize().
-*   @return : 0 if success, or an error code, which can be tested using ZSTDv07_isError() */
-static size_t ZSTDv07_decodeFrameHeader(ZSTDv07_DCtx* dctx, const void* src, size_t srcSize)
-{
-    size_t const result = ZSTDv07_getFrameParams(&(dctx->fParams), src, srcSize);
-    if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID)) return ERROR(dictionary_wrong);
-    if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0);
-    return result;
-}
-
-
-typedef struct
-{
-    blockType_t blockType;
-    U32 origSize;
-} blockProperties_t;
-
-/*! ZSTDv07_getcBlockSize() :
-*   Provides the size of compressed block from block header `src` */
-static size_t ZSTDv07_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
-{
-    const BYTE* const in = (const BYTE* const)src;
-    U32 cSize;
-
-    if (srcSize < ZSTDv07_blockHeaderSize) return ERROR(srcSize_wrong);
-
-    bpPtr->blockType = (blockType_t)((*in) >> 6);
-    cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
-    bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
-
-    if (bpPtr->blockType == bt_end) return 0;
-    if (bpPtr->blockType == bt_rle) return 1;
-    return cSize;
-}
-
-
-static size_t ZSTDv07_copyRawBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
-{
-    if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall);
-    memcpy(dst, src, srcSize);
-    return srcSize;
-}
-
-
-/*! ZSTDv07_decodeLiteralsBlock() :
-    @return : nb of bytes read from src (< srcSize ) */
-static size_t ZSTDv07_decodeLiteralsBlock(ZSTDv07_DCtx* dctx,
-                          const void* src, size_t srcSize)   /* note : srcSize < BLOCKSIZE */
-{
-    const BYTE* const istart = (const BYTE*) src;
-
-    if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
-
-    switch((litBlockType_t)(istart[0]>> 6))
-    {
-    case lbt_huffman:
-        {   size_t litSize, litCSize, singleStream=0;
-            U32 lhSize = (istart[0] >> 4) & 3;
-            if (srcSize < 5) return ERROR(corruption_detected);   /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for lhSize, + cSize (+nbSeq) */
-            switch(lhSize)
-            {
-            case 0: case 1: default:   /* note : default is impossible, since lhSize into [0..3] */
-                /* 2 - 2 - 10 - 10 */
-                lhSize=3;
-                singleStream = istart[0] & 16;
-                litSize  = ((istart[0] & 15) << 6) + (istart[1] >> 2);
-                litCSize = ((istart[1] &  3) << 8) + istart[2];
-                break;
-            case 2:
-                /* 2 - 2 - 14 - 14 */
-                lhSize=4;
-                litSize  = ((istart[0] & 15) << 10) + (istart[1] << 2) + (istart[2] >> 6);
-                litCSize = ((istart[2] & 63) <<  8) + istart[3];
-                break;
-            case 3:
-                /* 2 - 2 - 18 - 18 */
-                lhSize=5;
-                litSize  = ((istart[0] & 15) << 14) + (istart[1] << 6) + (istart[2] >> 2);
-                litCSize = ((istart[2] &  3) << 16) + (istart[3] << 8) + istart[4];
-                break;
-            }
-            if (litSize > ZSTDv07_BLOCKSIZE_ABSOLUTEMAX) return ERROR(corruption_detected);
-            if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
-
-            if (HUFv07_isError(singleStream ?
-                            HUFv07_decompress1X2_DCtx(dctx->hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize) :
-                            HUFv07_decompress4X_hufOnly (dctx->hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize) ))
-                return ERROR(corruption_detected);
-
-            dctx->litPtr = dctx->litBuffer;
-            dctx->litSize = litSize;
-            dctx->litEntropy = 1;
-            memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
-            return litCSize + lhSize;
-        }
-    case lbt_repeat:
-        {   size_t litSize, litCSize;
-            U32 lhSize = ((istart[0]) >> 4) & 3;
-            if (lhSize != 1)  /* only case supported for now : small litSize, single stream */
-                return ERROR(corruption_detected);
-            if (dctx->litEntropy==0)
-                return ERROR(dictionary_corrupted);
-
-            /* 2 - 2 - 10 - 10 */
-            lhSize=3;
-            litSize  = ((istart[0] & 15) << 6) + (istart[1] >> 2);
-            litCSize = ((istart[1] &  3) << 8) + istart[2];
-            if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
-
-            {   size_t const errorCode = HUFv07_decompress1X4_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->hufTable);
-                if (HUFv07_isError(errorCode)) return ERROR(corruption_detected);
-            }
-            dctx->litPtr = dctx->litBuffer;
-            dctx->litSize = litSize;
-            memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
-            return litCSize + lhSize;
-        }
-    case lbt_raw:
-        {   size_t litSize;
-            U32 lhSize = ((istart[0]) >> 4) & 3;
-            switch(lhSize)
-            {
-            case 0: case 1: default:   /* note : default is impossible, since lhSize into [0..3] */
-                lhSize=1;
-                litSize = istart[0] & 31;
-                break;
-            case 2:
-                litSize = ((istart[0] & 15) << 8) + istart[1];
-                break;
-            case 3:
-                litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];
-                break;
-            }
-
-            if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) {  /* risk reading beyond src buffer with wildcopy */
-                if (litSize+lhSize > srcSize) return ERROR(corruption_detected);
-                memcpy(dctx->litBuffer, istart+lhSize, litSize);
-                dctx->litPtr = dctx->litBuffer;
-                dctx->litSize = litSize;
-                memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
-                return lhSize+litSize;
-            }
-            /* direct reference into compressed stream */
-            dctx->litPtr = istart+lhSize;
-            dctx->litSize = litSize;
-            return lhSize+litSize;
-        }
-    case lbt_rle:
-        {   size_t litSize;
-            U32 lhSize = ((istart[0]) >> 4) & 3;
-            switch(lhSize)
-            {
-            case 0: case 1: default:   /* note : default is impossible, since lhSize into [0..3] */
-                lhSize = 1;
-                litSize = istart[0] & 31;
-                break;
-            case 2:
-                litSize = ((istart[0] & 15) << 8) + istart[1];
-                break;
-            case 3:
-                litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];
-                if (srcSize<4) return ERROR(corruption_detected);   /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
-                break;
-            }
-            if (litSize > ZSTDv07_BLOCKSIZE_ABSOLUTEMAX) return ERROR(corruption_detected);
-            memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
-            dctx->litPtr = dctx->litBuffer;
-            dctx->litSize = litSize;
-            return lhSize+1;
-        }
-    default:
-        return ERROR(corruption_detected);   /* impossible */
-    }
-}
-
-
-/*! ZSTDv07_buildSeqTable() :
-    @return : nb bytes read from src,
-              or an error code if it fails, testable with ZSTDv07_isError()
-*/
-static size_t ZSTDv07_buildSeqTable(FSEv07_DTable* DTable, U32 type, U32 max, U32 maxLog,
-                                 const void* src, size_t srcSize,
-                                 const S16* defaultNorm, U32 defaultLog, U32 flagRepeatTable)
-{
-    switch(type)
-    {
-    case FSEv07_ENCODING_RLE :
-        if (!srcSize) return ERROR(srcSize_wrong);
-        if ( (*(const BYTE*)src) > max) return ERROR(corruption_detected);
-        FSEv07_buildDTable_rle(DTable, *(const BYTE*)src);   /* if *src > max, data is corrupted */
-        return 1;
-    case FSEv07_ENCODING_RAW :
-        FSEv07_buildDTable(DTable, defaultNorm, max, defaultLog);
-        return 0;
-    case FSEv07_ENCODING_STATIC:
-        if (!flagRepeatTable) return ERROR(corruption_detected);
-        return 0;
-    default :   /* impossible */
-    case FSEv07_ENCODING_DYNAMIC :
-        {   U32 tableLog;
-            S16 norm[MaxSeq+1];
-            size_t const headerSize = FSEv07_readNCount(norm, &max, &tableLog, src, srcSize);
-            if (FSEv07_isError(headerSize)) return ERROR(corruption_detected);
-            if (tableLog > maxLog) return ERROR(corruption_detected);
-            FSEv07_buildDTable(DTable, norm, max, tableLog);
-            return headerSize;
-    }   }
-}
-
-
-static size_t ZSTDv07_decodeSeqHeaders(int* nbSeqPtr,
-                             FSEv07_DTable* DTableLL, FSEv07_DTable* DTableML, FSEv07_DTable* DTableOffb, U32 flagRepeatTable,
-                             const void* src, size_t srcSize)
-{
-    const BYTE* const istart = (const BYTE* const)src;
-    const BYTE* const iend = istart + srcSize;
-    const BYTE* ip = istart;
-
-    /* check */
-    if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong);
-
-    /* SeqHead */
-    {   int nbSeq = *ip++;
-        if (!nbSeq) { *nbSeqPtr=0; return 1; }
-        if (nbSeq > 0x7F) {
-            if (nbSeq == 0xFF) {
-                if (ip+2 > iend) return ERROR(srcSize_wrong);
-                nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
-            } else {
-                if (ip >= iend) return ERROR(srcSize_wrong);
-                nbSeq = ((nbSeq-0x80)<<8) + *ip++;
-            }
-        }
-        *nbSeqPtr = nbSeq;
-    }
-
-    /* FSE table descriptors */
-    if (ip + 4 > iend) return ERROR(srcSize_wrong); /* min : header byte + all 3 are "raw", hence no header, but at least xxLog bits per type */
-    {   U32 const LLtype  = *ip >> 6;
-        U32 const OFtype = (*ip >> 4) & 3;
-        U32 const MLtype  = (*ip >> 2) & 3;
-        ip++;
-
-        /* Build DTables */
-        {   size_t const llhSize = ZSTDv07_buildSeqTable(DTableLL, LLtype, MaxLL, LLFSELog, ip, iend-ip, LL_defaultNorm, LL_defaultNormLog, flagRepeatTable);
-            if (ZSTDv07_isError(llhSize)) return ERROR(corruption_detected);
-            ip += llhSize;
-        }
-        {   size_t const ofhSize = ZSTDv07_buildSeqTable(DTableOffb, OFtype, MaxOff, OffFSELog, ip, iend-ip, OF_defaultNorm, OF_defaultNormLog, flagRepeatTable);
-            if (ZSTDv07_isError(ofhSize)) return ERROR(corruption_detected);
-            ip += ofhSize;
-        }
-        {   size_t const mlhSize = ZSTDv07_buildSeqTable(DTableML, MLtype, MaxML, MLFSELog, ip, iend-ip, ML_defaultNorm, ML_defaultNormLog, flagRepeatTable);
-            if (ZSTDv07_isError(mlhSize)) return ERROR(corruption_detected);
-            ip += mlhSize;
-    }   }
-
-    return ip-istart;
-}
-
-
-typedef struct {
-    size_t litLength;
-    size_t matchLength;
-    size_t offset;
-} seq_t;
-
-typedef struct {
-    BITv07_DStream_t DStream;
-    FSEv07_DState_t stateLL;
-    FSEv07_DState_t stateOffb;
-    FSEv07_DState_t stateML;
-    size_t prevOffset[ZSTDv07_REP_INIT];
-} seqState_t;
-
-
-static seq_t ZSTDv07_decodeSequence(seqState_t* seqState)
-{
-    seq_t seq;
-
-    U32 const llCode = FSEv07_peekSymbol(&(seqState->stateLL));
-    U32 const mlCode = FSEv07_peekSymbol(&(seqState->stateML));
-    U32 const ofCode = FSEv07_peekSymbol(&(seqState->stateOffb));   /* <= maxOff, by table construction */
-
-    U32 const llBits = LL_bits[llCode];
-    U32 const mlBits = ML_bits[mlCode];
-    U32 const ofBits = ofCode;
-    U32 const totalBits = llBits+mlBits+ofBits;
-
-    static const U32 LL_base[MaxLL+1] = {
-                             0,  1,  2,  3,  4,  5,  6,  7,  8,  9,   10,    11,    12,    13,    14,     15,
-                            16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
-                            0x2000, 0x4000, 0x8000, 0x10000 };
-
-    static const U32 ML_base[MaxML+1] = {
-                             3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13,   14,    15,    16,    17,    18,
-                            19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,   30,    31,    32,    33,    34,
-                            35, 37, 39, 41, 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
-                            0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };
-
-    static const U32 OF_base[MaxOff+1] = {
-                 0,        1,       1,       5,     0xD,     0x1D,     0x3D,     0x7D,
-                 0xFD,   0x1FD,   0x3FD,   0x7FD,   0xFFD,   0x1FFD,   0x3FFD,   0x7FFD,
-                 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
-                 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD };
-
-    /* sequence */
-    {   size_t offset;
-        if (!ofCode)
-            offset = 0;
-        else {
-            offset = OF_base[ofCode] + BITv07_readBits(&(seqState->DStream), ofBits);   /* <=  (ZSTDv07_WINDOWLOG_MAX-1) bits */
-            if (MEM_32bits()) BITv07_reloadDStream(&(seqState->DStream));
-        }
-
-        if (ofCode <= 1) {
-            if ((llCode == 0) & (offset <= 1)) offset = 1-offset;
-            if (offset) {
-                size_t const temp = seqState->prevOffset[offset];
-                if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
-                seqState->prevOffset[1] = seqState->prevOffset[0];
-                seqState->prevOffset[0] = offset = temp;
-            } else {
-                offset = seqState->prevOffset[0];
-            }
-        } else {
-            seqState->prevOffset[2] = seqState->prevOffset[1];
-            seqState->prevOffset[1] = seqState->prevOffset[0];
-            seqState->prevOffset[0] = offset;
-        }
-        seq.offset = offset;
-    }
-
-    seq.matchLength = ML_base[mlCode] + ((mlCode>31) ? BITv07_readBits(&(seqState->DStream), mlBits) : 0);   /* <=  16 bits */
-    if (MEM_32bits() && (mlBits+llBits>24)) BITv07_reloadDStream(&(seqState->DStream));
-
-    seq.litLength = LL_base[llCode] + ((llCode>15) ? BITv07_readBits(&(seqState->DStream), llBits) : 0);   /* <=  16 bits */
-    if (MEM_32bits() ||
-       (totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BITv07_reloadDStream(&(seqState->DStream));
-
-    /* ANS state update */
-    FSEv07_updateState(&(seqState->stateLL), &(seqState->DStream));   /* <=  9 bits */
-    FSEv07_updateState(&(seqState->stateML), &(seqState->DStream));   /* <=  9 bits */
-    if (MEM_32bits()) BITv07_reloadDStream(&(seqState->DStream));     /* <= 18 bits */
-    FSEv07_updateState(&(seqState->stateOffb), &(seqState->DStream)); /* <=  8 bits */
-
-    return seq;
-}
-
-
-static
-size_t ZSTDv07_execSequence(BYTE* op,
-                                BYTE* const oend, seq_t sequence,
-                                const BYTE** litPtr, const BYTE* const litLimit,
-                                const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
-{
-    BYTE* const oLitEnd = op + sequence.litLength;
-    size_t const sequenceLength = sequence.litLength + sequence.matchLength;
-    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
-    BYTE* const oend_w = oend-WILDCOPY_OVERLENGTH;
-    const BYTE* const iLitEnd = *litPtr + sequence.litLength;
-    const BYTE* match = oLitEnd - sequence.offset;
-
-    /* check */
-    if ((oLitEnd>oend_w) | (oMatchEnd>oend)) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
-    if (iLitEnd > litLimit) return ERROR(corruption_detected);   /* over-read beyond lit buffer */
-
-    /* copy Literals */
-    ZSTDv07_wildcopy(op, *litPtr, sequence.litLength);   /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
-    op = oLitEnd;
-    *litPtr = iLitEnd;   /* update for next sequence */
-
-    /* copy Match */
-    if (sequence.offset > (size_t)(oLitEnd - base)) {
-        /* offset beyond prefix */
-        if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);
-        match = dictEnd - (base-match);
-        if (match + sequence.matchLength <= dictEnd) {
-            memmove(oLitEnd, match, sequence.matchLength);
-            return sequenceLength;
-        }
-        /* span extDict & currentPrefixSegment */
-        {   size_t const length1 = dictEnd - match;
-            memmove(oLitEnd, match, length1);
-            op = oLitEnd + length1;
-            sequence.matchLength -= length1;
-            match = base;
-            if (op > oend_w || sequence.matchLength < MINMATCH) {
-              while (op < oMatchEnd) *op++ = *match++;
-              return sequenceLength;
-            }
-    }   }
-    /* Requirement: op <= oend_w */
-
-    /* match within prefix */
-    if (sequence.offset < 8) {
-        /* close range match, overlap */
-        static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
-        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */
-        int const sub2 = dec64table[sequence.offset];
-        op[0] = match[0];
-        op[1] = match[1];
-        op[2] = match[2];
-        op[3] = match[3];
-        match += dec32table[sequence.offset];
-        ZSTDv07_copy4(op+4, match);
-        match -= sub2;
-    } else {
-        ZSTDv07_copy8(op, match);
-    }
-    op += 8; match += 8;
-
-    if (oMatchEnd > oend-(16-MINMATCH)) {
-        if (op < oend_w) {
-            ZSTDv07_wildcopy(op, match, oend_w - op);
-            match += oend_w - op;
-            op = oend_w;
-        }
-        while (op < oMatchEnd) *op++ = *match++;
-    } else {
-        ZSTDv07_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8 */
-    }
-    return sequenceLength;
-}
-
-
-static size_t ZSTDv07_decompressSequences(
-                               ZSTDv07_DCtx* dctx,
-                               void* dst, size_t maxDstSize,
-                         const void* seqStart, size_t seqSize)
-{
-    const BYTE* ip = (const BYTE*)seqStart;
-    const BYTE* const iend = ip + seqSize;
-    BYTE* const ostart = (BYTE* const)dst;
-    BYTE* const oend = ostart + maxDstSize;
-    BYTE* op = ostart;
-    const BYTE* litPtr = dctx->litPtr;
-    const BYTE* const litEnd = litPtr + dctx->litSize;
-    FSEv07_DTable* DTableLL = dctx->LLTable;
-    FSEv07_DTable* DTableML = dctx->MLTable;
-    FSEv07_DTable* DTableOffb = dctx->OffTable;
-    const BYTE* const base = (const BYTE*) (dctx->base);
-    const BYTE* const vBase = (const BYTE*) (dctx->vBase);
-    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
-    int nbSeq;
-
-    /* Build Decoding Tables */
-    {   size_t const seqHSize = ZSTDv07_decodeSeqHeaders(&nbSeq, DTableLL, DTableML, DTableOffb, dctx->fseEntropy, ip, seqSize);
-        if (ZSTDv07_isError(seqHSize)) return seqHSize;
-        ip += seqHSize;
-    }
-
-    /* Regen sequences */
-    if (nbSeq) {
-        seqState_t seqState;
-        dctx->fseEntropy = 1;
-        { U32 i; for (i=0; i<ZSTDv07_REP_INIT; i++) seqState.prevOffset[i] = dctx->rep[i]; }
-        { size_t const errorCode = BITv07_initDStream(&(seqState.DStream), ip, iend-ip);
-          if (ERR_isError(errorCode)) return ERROR(corruption_detected); }
-        FSEv07_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
-        FSEv07_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
-        FSEv07_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
-
-        for ( ; (BITv07_reloadDStream(&(seqState.DStream)) <= BITv07_DStream_completed) && nbSeq ; ) {
-            nbSeq--;
-            {   seq_t const sequence = ZSTDv07_decodeSequence(&seqState);
-                size_t const oneSeqSize = ZSTDv07_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
-                if (ZSTDv07_isError(oneSeqSize)) return oneSeqSize;
-                op += oneSeqSize;
-        }   }
-
-        /* check if reached exact end */
-        if (nbSeq) return ERROR(corruption_detected);
-        /* save reps for next block */
-        { U32 i; for (i=0; i<ZSTDv07_REP_INIT; i++) dctx->rep[i] = (U32)(seqState.prevOffset[i]); }
-    }
-
-    /* last literal segment */
-    {   size_t const lastLLSize = litEnd - litPtr;
-        //if (litPtr > litEnd) return ERROR(corruption_detected);   /* too many literals already used */
-        if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
-        memcpy(op, litPtr, lastLLSize);
-        op += lastLLSize;
-    }
-
-    return op-ostart;
-}
-
-
-static void ZSTDv07_checkContinuity(ZSTDv07_DCtx* dctx, const void* dst)
-{
-    if (dst != dctx->previousDstEnd) {   /* not contiguous */
-        dctx->dictEnd = dctx->previousDstEnd;
-        dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
-        dctx->base = dst;
-        dctx->previousDstEnd = dst;
-    }
-}
-
-
-static size_t ZSTDv07_decompressBlock_internal(ZSTDv07_DCtx* dctx,
-                            void* dst, size_t dstCapacity,
-                      const void* src, size_t srcSize)
-{   /* blockType == blockCompressed */
-    const BYTE* ip = (const BYTE*)src;
-
-    if (srcSize >= ZSTDv07_BLOCKSIZE_ABSOLUTEMAX) return ERROR(srcSize_wrong);
-
-    /* Decode literals sub-block */
-    {   size_t const litCSize = ZSTDv07_decodeLiteralsBlock(dctx, src, srcSize);
-        if (ZSTDv07_isError(litCSize)) return litCSize;
-        ip += litCSize;
-        srcSize -= litCSize;
-    }
-    return ZSTDv07_decompressSequences(dctx, dst, dstCapacity, ip, srcSize);
-}
-
-
-size_t ZSTDv07_decompressBlock(ZSTDv07_DCtx* dctx,
-                            void* dst, size_t dstCapacity,
-                      const void* src, size_t srcSize)
-{
-    size_t dSize;
-    ZSTDv07_checkContinuity(dctx, dst);
-    dSize = ZSTDv07_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
-    dctx->previousDstEnd = (char*)dst + dSize;
-    return dSize;
-}
-
-
-/** ZSTDv07_insertBlock() :
-    insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
-ZSTDLIBv07_API size_t ZSTDv07_insertBlock(ZSTDv07_DCtx* dctx, const void* blockStart, size_t blockSize)
-{
-    ZSTDv07_checkContinuity(dctx, blockStart);
-    dctx->previousDstEnd = (const char*)blockStart + blockSize;
-    return blockSize;
-}
-
-
-static size_t ZSTDv07_generateNxBytes(void* dst, size_t dstCapacity, BYTE byte, size_t length)
-{
-    if (length > dstCapacity) return ERROR(dstSize_tooSmall);
-    memset(dst, byte, length);
-    return length;
-}
-
-
-/*! ZSTDv07_decompressFrame() :
-*   `dctx` must be properly initialized */
-static size_t ZSTDv07_decompressFrame(ZSTDv07_DCtx* dctx,
-                                 void* dst, size_t dstCapacity,
-                                 const void* src, size_t srcSize)
-{
-    const BYTE* ip = (const BYTE*)src;
-    const BYTE* const iend = ip + srcSize;
-    BYTE* const ostart = (BYTE* const)dst;
-    BYTE* const oend = ostart + dstCapacity;
-    BYTE* op = ostart;
-    size_t remainingSize = srcSize;
-
-    /* check */
-    if (srcSize < ZSTDv07_frameHeaderSize_min+ZSTDv07_blockHeaderSize) return ERROR(srcSize_wrong);
-
-    /* Frame Header */
-    {   size_t const frameHeaderSize = ZSTDv07_frameHeaderSize(src, ZSTDv07_frameHeaderSize_min);
-        if (ZSTDv07_isError(frameHeaderSize)) return frameHeaderSize;
-        if (srcSize < frameHeaderSize+ZSTDv07_blockHeaderSize) return ERROR(srcSize_wrong);
-        if (ZSTDv07_decodeFrameHeader(dctx, src, frameHeaderSize)) return ERROR(corruption_detected);
-        ip += frameHeaderSize; remainingSize -= frameHeaderSize;
-    }
-
-    /* Loop on each block */
-    while (1) {
-        size_t decodedSize;
-        blockProperties_t blockProperties;
-        size_t const cBlockSize = ZSTDv07_getcBlockSize(ip, iend-ip, &blockProperties);
-        if (ZSTDv07_isError(cBlockSize)) return cBlockSize;
-
-        ip += ZSTDv07_blockHeaderSize;
-        remainingSize -= ZSTDv07_blockHeaderSize;
-        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
-
-        switch(blockProperties.blockType)
-        {
-        case bt_compressed:
-            decodedSize = ZSTDv07_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize);
-            break;
-        case bt_raw :
-            decodedSize = ZSTDv07_copyRawBlock(op, oend-op, ip, cBlockSize);
-            break;
-        case bt_rle :
-            decodedSize = ZSTDv07_generateNxBytes(op, oend-op, *ip, blockProperties.origSize);
-            break;
-        case bt_end :
-            /* end of frame */
-            if (remainingSize) return ERROR(srcSize_wrong);
-            decodedSize = 0;
-            break;
-        default:
-            return ERROR(GENERIC);   /* impossible */
-        }
-        if (blockProperties.blockType == bt_end) break;   /* bt_end */
-
-        if (ZSTDv07_isError(decodedSize)) return decodedSize;
-        if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, op, decodedSize);
-        op += decodedSize;
-        ip += cBlockSize;
-        remainingSize -= cBlockSize;
-    }
-
-    return op-ostart;
-}
-
-
-/*! ZSTDv07_decompress_usingPreparedDCtx() :
-*   Same as ZSTDv07_decompress_usingDict, but using a reference context `preparedDCtx`, where dictionary has been loaded.
-*   It avoids reloading the dictionary each time.
-*   `preparedDCtx` must have been properly initialized using ZSTDv07_decompressBegin_usingDict().
-*   Requires 2 contexts : 1 for reference (preparedDCtx), which will not be modified, and 1 to run the decompression operation (dctx) */
-static size_t ZSTDv07_decompress_usingPreparedDCtx(ZSTDv07_DCtx* dctx, const ZSTDv07_DCtx* refDCtx,
-                                         void* dst, size_t dstCapacity,
-                                   const void* src, size_t srcSize)
-{
-    ZSTDv07_copyDCtx(dctx, refDCtx);
-    ZSTDv07_checkContinuity(dctx, dst);
-    return ZSTDv07_decompressFrame(dctx, dst, dstCapacity, src, srcSize);
-}
-
-
-size_t ZSTDv07_decompress_usingDict(ZSTDv07_DCtx* dctx,
-                                 void* dst, size_t dstCapacity,
-                                 const void* src, size_t srcSize,
-                                 const void* dict, size_t dictSize)
-{
-    ZSTDv07_decompressBegin_usingDict(dctx, dict, dictSize);
-    ZSTDv07_checkContinuity(dctx, dst);
-    return ZSTDv07_decompressFrame(dctx, dst, dstCapacity, src, srcSize);
-}
-
-
-size_t ZSTDv07_decompressDCtx(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
-{
-    return ZSTDv07_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0);
-}
-
-
-size_t ZSTDv07_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
-{
-#if defined(ZSTDv07_HEAPMODE) && (ZSTDv07_HEAPMODE==1)
-    size_t regenSize;
-    ZSTDv07_DCtx* const dctx = ZSTDv07_createDCtx();
-    if (dctx==NULL) return ERROR(memory_allocation);
-    regenSize = ZSTDv07_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
-    ZSTDv07_freeDCtx(dctx);
-    return regenSize;
-#else   /* stack mode */
-    ZSTDv07_DCtx dctx;
-    return ZSTDv07_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize);
-#endif
-}
-
-/* ZSTD_errorFrameSizeInfoLegacy() :
-   assumes `cSize` and `dBound` are _not_ NULL */
-static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
-{
-    *cSize = ret;
-    *dBound = ZSTD_CONTENTSIZE_ERROR;
-}
-
-void ZSTDv07_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
-{
-    const BYTE* ip = (const BYTE*)src;
-    size_t remainingSize = srcSize;
-    size_t nbBlocks = 0;
-
-    /* check */
-    if (srcSize < ZSTDv07_frameHeaderSize_min+ZSTDv07_blockHeaderSize) {
-        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
-        return;
-    }
-
-    /* Frame Header */
-    {   size_t const frameHeaderSize = ZSTDv07_frameHeaderSize(src, srcSize);
-        if (ZSTDv07_isError(frameHeaderSize)) {
-            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, frameHeaderSize);
-            return;
-        }
-        if (MEM_readLE32(src) != ZSTDv07_MAGICNUMBER) {
-            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
-            return;
-        }
-        if (srcSize < frameHeaderSize+ZSTDv07_blockHeaderSize) {
-            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
-            return;
-        }
-        ip += frameHeaderSize; remainingSize -= frameHeaderSize;
-    }
-
-    /* Loop on each block */
-    while (1) {
-        blockProperties_t blockProperties;
-        size_t const cBlockSize = ZSTDv07_getcBlockSize(ip, remainingSize, &blockProperties);
-        if (ZSTDv07_isError(cBlockSize)) {
-            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
-            return;
-        }
-
-        ip += ZSTDv07_blockHeaderSize;
-        remainingSize -= ZSTDv07_blockHeaderSize;
-
-        if (blockProperties.blockType == bt_end) break;
-
-        if (cBlockSize > remainingSize) {
-            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
-            return;
-        }
-
-        ip += cBlockSize;
-        remainingSize -= cBlockSize;
-        nbBlocks++;
-    }
-
-    *cSize = ip - (const BYTE*)src;
-    *dBound = nbBlocks * ZSTDv07_BLOCKSIZE_ABSOLUTEMAX;
-}
-
-/*_******************************
-*  Streaming Decompression API
-********************************/
-size_t ZSTDv07_nextSrcSizeToDecompress(ZSTDv07_DCtx* dctx)
-{
-    return dctx->expected;
-}
-
-int ZSTDv07_isSkipFrame(ZSTDv07_DCtx* dctx)
-{
-    return dctx->stage == ZSTDds_skipFrame;
-}
-
-/** ZSTDv07_decompressContinue() :
-*   @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)
-*             or an error code, which can be tested using ZSTDv07_isError() */
-size_t ZSTDv07_decompressContinue(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
-{
-    /* Sanity check */
-    if (srcSize != dctx->expected) return ERROR(srcSize_wrong);
-    if (dstCapacity) ZSTDv07_checkContinuity(dctx, dst);
-
-    switch (dctx->stage)
-    {
-    case ZSTDds_getFrameHeaderSize :
-        if (srcSize != ZSTDv07_frameHeaderSize_min) return ERROR(srcSize_wrong);   /* impossible */
-        if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTDv07_MAGIC_SKIPPABLE_START) {
-            memcpy(dctx->headerBuffer, src, ZSTDv07_frameHeaderSize_min);
-            dctx->expected = ZSTDv07_skippableHeaderSize - ZSTDv07_frameHeaderSize_min; /* magic number + skippable frame length */
-            dctx->stage = ZSTDds_decodeSkippableHeader;
-            return 0;
-        }
-        dctx->headerSize = ZSTDv07_frameHeaderSize(src, ZSTDv07_frameHeaderSize_min);
-        if (ZSTDv07_isError(dctx->headerSize)) return dctx->headerSize;
-        memcpy(dctx->headerBuffer, src, ZSTDv07_frameHeaderSize_min);
-        if (dctx->headerSize > ZSTDv07_frameHeaderSize_min) {
-            dctx->expected = dctx->headerSize - ZSTDv07_frameHeaderSize_min;
-            dctx->stage = ZSTDds_decodeFrameHeader;
-            return 0;
-        }
-        dctx->expected = 0;   /* not necessary to copy more */
-	/* fall-through */
-    case ZSTDds_decodeFrameHeader:
-        {   size_t result;
-            memcpy(dctx->headerBuffer + ZSTDv07_frameHeaderSize_min, src, dctx->expected);
-            result = ZSTDv07_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize);
-            if (ZSTDv07_isError(result)) return result;
-            dctx->expected = ZSTDv07_blockHeaderSize;
-            dctx->stage = ZSTDds_decodeBlockHeader;
-            return 0;
-        }
-    case ZSTDds_decodeBlockHeader:
-        {   blockProperties_t bp;
-            size_t const cBlockSize = ZSTDv07_getcBlockSize(src, ZSTDv07_blockHeaderSize, &bp);
-            if (ZSTDv07_isError(cBlockSize)) return cBlockSize;
-            if (bp.blockType == bt_end) {
-                if (dctx->fParams.checksumFlag) {
-                    U64 const h64 = XXH64_digest(&dctx->xxhState);
-                    U32 const h32 = (U32)(h64>>11) & ((1<<22)-1);
-                    const BYTE* const ip = (const BYTE*)src;
-                    U32 const check32 = ip[2] + (ip[1] << 8) + ((ip[0] & 0x3F) << 16);
-                    if (check32 != h32) return ERROR(checksum_wrong);
-                }
-                dctx->expected = 0;
-                dctx->stage = ZSTDds_getFrameHeaderSize;
-            } else {
-                dctx->expected = cBlockSize;
-                dctx->bType = bp.blockType;
-                dctx->stage = ZSTDds_decompressBlock;
-            }
-            return 0;
-        }
-    case ZSTDds_decompressBlock:
-        {   size_t rSize;
-            switch(dctx->bType)
-            {
-            case bt_compressed:
-                rSize = ZSTDv07_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
-                break;
-            case bt_raw :
-                rSize = ZSTDv07_copyRawBlock(dst, dstCapacity, src, srcSize);
-                break;
-            case bt_rle :
-                return ERROR(GENERIC);   /* not yet handled */
-                break;
-            case bt_end :   /* should never happen (filtered at phase 1) */
-                rSize = 0;
-                break;
-            default:
-                return ERROR(GENERIC);   /* impossible */
-            }
-            dctx->stage = ZSTDds_decodeBlockHeader;
-            dctx->expected = ZSTDv07_blockHeaderSize;
-            dctx->previousDstEnd = (char*)dst + rSize;
-            if (ZSTDv07_isError(rSize)) return rSize;
-            if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize);
-            return rSize;
-        }
-    case ZSTDds_decodeSkippableHeader:
-        {   memcpy(dctx->headerBuffer + ZSTDv07_frameHeaderSize_min, src, dctx->expected);
-            dctx->expected = MEM_readLE32(dctx->headerBuffer + 4);
-            dctx->stage = ZSTDds_skipFrame;
-            return 0;
-        }
-    case ZSTDds_skipFrame:
-        {   dctx->expected = 0;
-            dctx->stage = ZSTDds_getFrameHeaderSize;
-            return 0;
-        }
-    default:
-        return ERROR(GENERIC);   /* impossible */
-    }
-}
-
-
-static size_t ZSTDv07_refDictContent(ZSTDv07_DCtx* dctx, const void* dict, size_t dictSize)
-{
-    dctx->dictEnd = dctx->previousDstEnd;
-    dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
-    dctx->base = dict;
-    dctx->previousDstEnd = (const char*)dict + dictSize;
-    return 0;
-}
-
-static size_t ZSTDv07_loadEntropy(ZSTDv07_DCtx* dctx, const void* const dict, size_t const dictSize)
-{
-    const BYTE* dictPtr = (const BYTE*)dict;
-    const BYTE* const dictEnd = dictPtr + dictSize;
-
-    {   size_t const hSize = HUFv07_readDTableX4(dctx->hufTable, dict, dictSize);
-        if (HUFv07_isError(hSize)) return ERROR(dictionary_corrupted);
-        dictPtr += hSize;
-    }
-
-    {   short offcodeNCount[MaxOff+1];
-        U32 offcodeMaxValue=MaxOff, offcodeLog;
-        size_t const offcodeHeaderSize = FSEv07_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
-        if (FSEv07_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
-        if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
-        { size_t const errorCode = FSEv07_buildDTable(dctx->OffTable, offcodeNCount, offcodeMaxValue, offcodeLog);
-          if (FSEv07_isError(errorCode)) return ERROR(dictionary_corrupted); }
-        dictPtr += offcodeHeaderSize;
-    }
-
-    {   short matchlengthNCount[MaxML+1];
-        unsigned matchlengthMaxValue = MaxML, matchlengthLog;
-        size_t const matchlengthHeaderSize = FSEv07_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
-        if (FSEv07_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
-        if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
-        { size_t const errorCode = FSEv07_buildDTable(dctx->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog);
-          if (FSEv07_isError(errorCode)) return ERROR(dictionary_corrupted); }
-        dictPtr += matchlengthHeaderSize;
-    }
-
-    {   short litlengthNCount[MaxLL+1];
-        unsigned litlengthMaxValue = MaxLL, litlengthLog;
-        size_t const litlengthHeaderSize = FSEv07_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
-        if (FSEv07_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
-        if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
-        { size_t const errorCode = FSEv07_buildDTable(dctx->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog);
-          if (FSEv07_isError(errorCode)) return ERROR(dictionary_corrupted); }
-        dictPtr += litlengthHeaderSize;
-    }
-
-    if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
-    dctx->rep[0] = MEM_readLE32(dictPtr+0); if (dctx->rep[0] == 0 || dctx->rep[0] >= dictSize) return ERROR(dictionary_corrupted);
-    dctx->rep[1] = MEM_readLE32(dictPtr+4); if (dctx->rep[1] == 0 || dctx->rep[1] >= dictSize) return ERROR(dictionary_corrupted);
-    dctx->rep[2] = MEM_readLE32(dictPtr+8); if (dctx->rep[2] == 0 || dctx->rep[2] >= dictSize) return ERROR(dictionary_corrupted);
-    dictPtr += 12;
-
-    dctx->litEntropy = dctx->fseEntropy = 1;
-    return dictPtr - (const BYTE*)dict;
-}
-
-static size_t ZSTDv07_decompress_insertDictionary(ZSTDv07_DCtx* dctx, const void* dict, size_t dictSize)
-{
-    if (dictSize < 8) return ZSTDv07_refDictContent(dctx, dict, dictSize);
-    {   U32 const magic = MEM_readLE32(dict);
-        if (magic != ZSTDv07_DICT_MAGIC) {
-            return ZSTDv07_refDictContent(dctx, dict, dictSize);   /* pure content mode */
-    }   }
-    dctx->dictID = MEM_readLE32((const char*)dict + 4);
-
-    /* load entropy tables */
-    dict = (const char*)dict + 8;
-    dictSize -= 8;
-    {   size_t const eSize = ZSTDv07_loadEntropy(dctx, dict, dictSize);
-        if (ZSTDv07_isError(eSize)) return ERROR(dictionary_corrupted);
-        dict = (const char*)dict + eSize;
-        dictSize -= eSize;
-    }
-
-    /* reference dictionary content */
-    return ZSTDv07_refDictContent(dctx, dict, dictSize);
-}
-
-
-size_t ZSTDv07_decompressBegin_usingDict(ZSTDv07_DCtx* dctx, const void* dict, size_t dictSize)
-{
-    { size_t const errorCode = ZSTDv07_decompressBegin(dctx);
-      if (ZSTDv07_isError(errorCode)) return errorCode; }
-
-    if (dict && dictSize) {
-        size_t const errorCode = ZSTDv07_decompress_insertDictionary(dctx, dict, dictSize);
-        if (ZSTDv07_isError(errorCode)) return ERROR(dictionary_corrupted);
-    }
-
-    return 0;
-}
-
-
-struct ZSTDv07_DDict_s {
-    void* dict;
-    size_t dictSize;
-    ZSTDv07_DCtx* refContext;
-};  /* typedef'd tp ZSTDv07_CDict within zstd.h */
-
-static ZSTDv07_DDict* ZSTDv07_createDDict_advanced(const void* dict, size_t dictSize, ZSTDv07_customMem customMem)
-{
-    if (!customMem.customAlloc && !customMem.customFree)
-        customMem = defaultCustomMem;
-
-    if (!customMem.customAlloc || !customMem.customFree)
-        return NULL;
-
-    {   ZSTDv07_DDict* const ddict = (ZSTDv07_DDict*) customMem.customAlloc(customMem.opaque, sizeof(*ddict));
-        void* const dictContent = customMem.customAlloc(customMem.opaque, dictSize);
-        ZSTDv07_DCtx* const dctx = ZSTDv07_createDCtx_advanced(customMem);
-
-        if (!dictContent || !ddict || !dctx) {
-            customMem.customFree(customMem.opaque, dictContent);
-            customMem.customFree(customMem.opaque, ddict);
-            customMem.customFree(customMem.opaque, dctx);
-            return NULL;
-        }
-
-        memcpy(dictContent, dict, dictSize);
-        {   size_t const errorCode = ZSTDv07_decompressBegin_usingDict(dctx, dictContent, dictSize);
-            if (ZSTDv07_isError(errorCode)) {
-                customMem.customFree(customMem.opaque, dictContent);
-                customMem.customFree(customMem.opaque, ddict);
-                customMem.customFree(customMem.opaque, dctx);
-                return NULL;
-        }   }
-
-        ddict->dict = dictContent;
-        ddict->dictSize = dictSize;
-        ddict->refContext = dctx;
-        return ddict;
-    }
-}
-
-/*! ZSTDv07_createDDict() :
-*   Create a digested dictionary, ready to start decompression without startup delay.
-*   `dict` can be released after `ZSTDv07_DDict` creation */
-ZSTDv07_DDict* ZSTDv07_createDDict(const void* dict, size_t dictSize)
-{
-    ZSTDv07_customMem const allocator = { NULL, NULL, NULL };
-    return ZSTDv07_createDDict_advanced(dict, dictSize, allocator);
-}
-
-size_t ZSTDv07_freeDDict(ZSTDv07_DDict* ddict)
-{
-    ZSTDv07_freeFunction const cFree = ddict->refContext->customMem.customFree;
-    void* const opaque = ddict->refContext->customMem.opaque;
-    ZSTDv07_freeDCtx(ddict->refContext);
-    cFree(opaque, ddict->dict);
-    cFree(opaque, ddict);
-    return 0;
-}
-
-/*! ZSTDv07_decompress_usingDDict() :
-*   Decompression using a pre-digested Dictionary
-*   Use dictionary without significant overhead. */
-ZSTDLIBv07_API size_t ZSTDv07_decompress_usingDDict(ZSTDv07_DCtx* dctx,
-                                           void* dst, size_t dstCapacity,
-                                     const void* src, size_t srcSize,
-                                     const ZSTDv07_DDict* ddict)
-{
-    return ZSTDv07_decompress_usingPreparedDCtx(dctx, ddict->refContext,
-                                           dst, dstCapacity,
-                                           src, srcSize);
-}
-/*
-    Buffered version of Zstd compression library
-    Copyright (C) 2015-2016, Yann Collet.
-
-    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are
-    met:
-    * Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-    copyright notice, this list of conditions and the following disclaimer
-    in the documentation and/or other materials provided with the
-    distribution.
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-    You can contact the author at :
-    - zstd homepage : http://www.zstd.net/
-*/
-
-
-
-/*-***************************************************************************
-*  Streaming decompression howto
-*
-*  A ZBUFFv07_DCtx object is required to track streaming operations.
-*  Use ZBUFFv07_createDCtx() and ZBUFFv07_freeDCtx() to create/release resources.
-*  Use ZBUFFv07_decompressInit() to start a new decompression operation,
-*   or ZBUFFv07_decompressInitDictionary() if decompression requires a dictionary.
-*  Note that ZBUFFv07_DCtx objects can be re-init multiple times.
-*
-*  Use ZBUFFv07_decompressContinue() repetitively to consume your input.
-*  *srcSizePtr and *dstCapacityPtr can be any size.
-*  The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
-*  Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
-*  The content of @dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change @dst.
-*  @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency),
-*            or 0 when a frame is completely decoded,
-*            or an error code, which can be tested using ZBUFFv07_isError().
-*
-*  Hint : recommended buffer sizes (not compulsory) : ZBUFFv07_recommendedDInSize() and ZBUFFv07_recommendedDOutSize()
-*  output : ZBUFFv07_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
-*  input  : ZBUFFv07_recommendedDInSize == 128KB + 3;
-*           just follow indications from ZBUFFv07_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
-* *******************************************************************************/
-
-typedef enum { ZBUFFds_init, ZBUFFds_loadHeader,
-               ZBUFFds_read, ZBUFFds_load, ZBUFFds_flush } ZBUFFv07_dStage;
-
-/* *** Resource management *** */
-struct ZBUFFv07_DCtx_s {
-    ZSTDv07_DCtx* zd;
-    ZSTDv07_frameParams fParams;
-    ZBUFFv07_dStage stage;
-    char*  inBuff;
-    size_t inBuffSize;
-    size_t inPos;
-    char*  outBuff;
-    size_t outBuffSize;
-    size_t outStart;
-    size_t outEnd;
-    size_t blockSize;
-    BYTE headerBuffer[ZSTDv07_FRAMEHEADERSIZE_MAX];
-    size_t lhSize;
-    ZSTDv07_customMem customMem;
-};   /* typedef'd to ZBUFFv07_DCtx within "zstd_buffered.h" */
-
-ZSTDLIBv07_API ZBUFFv07_DCtx* ZBUFFv07_createDCtx_advanced(ZSTDv07_customMem customMem);
-
-ZBUFFv07_DCtx* ZBUFFv07_createDCtx(void)
-{
-    return ZBUFFv07_createDCtx_advanced(defaultCustomMem);
-}
-
-ZBUFFv07_DCtx* ZBUFFv07_createDCtx_advanced(ZSTDv07_customMem customMem)
-{
-    ZBUFFv07_DCtx* zbd;
-
-    if (!customMem.customAlloc && !customMem.customFree)
-        customMem = defaultCustomMem;
-
-    if (!customMem.customAlloc || !customMem.customFree)
-        return NULL;
-
-    zbd = (ZBUFFv07_DCtx*)customMem.customAlloc(customMem.opaque, sizeof(ZBUFFv07_DCtx));
-    if (zbd==NULL) return NULL;
-    memset(zbd, 0, sizeof(ZBUFFv07_DCtx));
-    memcpy(&zbd->customMem, &customMem, sizeof(ZSTDv07_customMem));
-    zbd->zd = ZSTDv07_createDCtx_advanced(customMem);
-    if (zbd->zd == NULL) { ZBUFFv07_freeDCtx(zbd); return NULL; }
-    zbd->stage = ZBUFFds_init;
-    return zbd;
-}
-
-size_t ZBUFFv07_freeDCtx(ZBUFFv07_DCtx* zbd)
-{
-    if (zbd==NULL) return 0;   /* support free on null */
-    ZSTDv07_freeDCtx(zbd->zd);
-    if (zbd->inBuff) zbd->customMem.customFree(zbd->customMem.opaque, zbd->inBuff);
-    if (zbd->outBuff) zbd->customMem.customFree(zbd->customMem.opaque, zbd->outBuff);
-    zbd->customMem.customFree(zbd->customMem.opaque, zbd);
-    return 0;
-}
-
-
-/* *** Initialization *** */
-
-size_t ZBUFFv07_decompressInitDictionary(ZBUFFv07_DCtx* zbd, const void* dict, size_t dictSize)
-{
-    zbd->stage = ZBUFFds_loadHeader;
-    zbd->lhSize = zbd->inPos = zbd->outStart = zbd->outEnd = 0;
-    return ZSTDv07_decompressBegin_usingDict(zbd->zd, dict, dictSize);
-}
-
-size_t ZBUFFv07_decompressInit(ZBUFFv07_DCtx* zbd)
-{
-    return ZBUFFv07_decompressInitDictionary(zbd, NULL, 0);
-}
-
-
-/* internal util function */
-MEM_STATIC size_t ZBUFFv07_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
-{
-    size_t const length = MIN(dstCapacity, srcSize);
-    memcpy(dst, src, length);
-    return length;
-}
-
-
-/* *** Decompression *** */
-
-size_t ZBUFFv07_decompressContinue(ZBUFFv07_DCtx* zbd,
-                                void* dst, size_t* dstCapacityPtr,
-                          const void* src, size_t* srcSizePtr)
-{
-    const char* const istart = (const char*)src;
-    const char* const iend = istart + *srcSizePtr;
-    const char* ip = istart;
-    char* const ostart = (char*)dst;
-    char* const oend = ostart + *dstCapacityPtr;
-    char* op = ostart;
-    U32 notDone = 1;
-
-    while (notDone) {
-        switch(zbd->stage)
-        {
-        case ZBUFFds_init :
-            return ERROR(init_missing);
-
-        case ZBUFFds_loadHeader :
-            {   size_t const hSize = ZSTDv07_getFrameParams(&(zbd->fParams), zbd->headerBuffer, zbd->lhSize);
-                if (ZSTDv07_isError(hSize)) return hSize;
-                if (hSize != 0) {
-                    size_t const toLoad = hSize - zbd->lhSize;   /* if hSize!=0, hSize > zbd->lhSize */
-                    if (toLoad > (size_t)(iend-ip)) {   /* not enough input to load full header */
-                        memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip);
-                        zbd->lhSize += iend-ip;
-                        *dstCapacityPtr = 0;
-                        return (hSize - zbd->lhSize) + ZSTDv07_blockHeaderSize;   /* remaining header bytes + next block header */
-                    }
-                    memcpy(zbd->headerBuffer + zbd->lhSize, ip, toLoad); zbd->lhSize = hSize; ip += toLoad;
-                    break;
-            }   }
-
-            /* Consume header */
-            {   size_t const h1Size = ZSTDv07_nextSrcSizeToDecompress(zbd->zd);  /* == ZSTDv07_frameHeaderSize_min */
-                size_t const h1Result = ZSTDv07_decompressContinue(zbd->zd, NULL, 0, zbd->headerBuffer, h1Size);
-                if (ZSTDv07_isError(h1Result)) return h1Result;
-                if (h1Size < zbd->lhSize) {   /* long header */
-                    size_t const h2Size = ZSTDv07_nextSrcSizeToDecompress(zbd->zd);
-                    size_t const h2Result = ZSTDv07_decompressContinue(zbd->zd, NULL, 0, zbd->headerBuffer+h1Size, h2Size);
-                    if (ZSTDv07_isError(h2Result)) return h2Result;
-            }   }
-
-            zbd->fParams.windowSize = MAX(zbd->fParams.windowSize, 1U << ZSTDv07_WINDOWLOG_ABSOLUTEMIN);
-
-            /* Frame header instruct buffer sizes */
-            {   size_t const blockSize = MIN(zbd->fParams.windowSize, ZSTDv07_BLOCKSIZE_ABSOLUTEMAX);
-                zbd->blockSize = blockSize;
-                if (zbd->inBuffSize < blockSize) {
-                    zbd->customMem.customFree(zbd->customMem.opaque, zbd->inBuff);
-                    zbd->inBuffSize = blockSize;
-                    zbd->inBuff = (char*)zbd->customMem.customAlloc(zbd->customMem.opaque, blockSize);
-                    if (zbd->inBuff == NULL) return ERROR(memory_allocation);
-                }
-                {   size_t const neededOutSize = zbd->fParams.windowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
-                    if (zbd->outBuffSize < neededOutSize) {
-                        zbd->customMem.customFree(zbd->customMem.opaque, zbd->outBuff);
-                        zbd->outBuffSize = neededOutSize;
-                        zbd->outBuff = (char*)zbd->customMem.customAlloc(zbd->customMem.opaque, neededOutSize);
-                        if (zbd->outBuff == NULL) return ERROR(memory_allocation);
-            }   }   }
-            zbd->stage = ZBUFFds_read;
-            /* pass-through */
-	    /* fall-through */
-        case ZBUFFds_read:
-            {   size_t const neededInSize = ZSTDv07_nextSrcSizeToDecompress(zbd->zd);
-                if (neededInSize==0) {  /* end of frame */
-                    zbd->stage = ZBUFFds_init;
-                    notDone = 0;
-                    break;
-                }
-                if ((size_t)(iend-ip) >= neededInSize) {  /* decode directly from src */
-                    const int isSkipFrame = ZSTDv07_isSkipFrame(zbd->zd);
-                    size_t const decodedSize = ZSTDv07_decompressContinue(zbd->zd,
-                        zbd->outBuff + zbd->outStart, (isSkipFrame ? 0 : zbd->outBuffSize - zbd->outStart),
-                        ip, neededInSize);
-                    if (ZSTDv07_isError(decodedSize)) return decodedSize;
-                    ip += neededInSize;
-                    if (!decodedSize && !isSkipFrame) break;   /* this was just a header */
-                    zbd->outEnd = zbd->outStart +  decodedSize;
-                    zbd->stage = ZBUFFds_flush;
-                    break;
-                }
-                if (ip==iend) { notDone = 0; break; }   /* no more input */
-                zbd->stage = ZBUFFds_load;
-            }
-	    /* fall-through */
-        case ZBUFFds_load:
-            {   size_t const neededInSize = ZSTDv07_nextSrcSizeToDecompress(zbd->zd);
-                size_t const toLoad = neededInSize - zbd->inPos;   /* should always be <= remaining space within inBuff */
-                size_t loadedSize;
-                if (toLoad > zbd->inBuffSize - zbd->inPos) return ERROR(corruption_detected);   /* should never happen */
-                loadedSize = ZBUFFv07_limitCopy(zbd->inBuff + zbd->inPos, toLoad, ip, iend-ip);
-                ip += loadedSize;
-                zbd->inPos += loadedSize;
-                if (loadedSize < toLoad) { notDone = 0; break; }   /* not enough input, wait for more */
-
-                /* decode loaded input */
-                {  const int isSkipFrame = ZSTDv07_isSkipFrame(zbd->zd);
-                   size_t const decodedSize = ZSTDv07_decompressContinue(zbd->zd,
-                        zbd->outBuff + zbd->outStart, zbd->outBuffSize - zbd->outStart,
-                        zbd->inBuff, neededInSize);
-                    if (ZSTDv07_isError(decodedSize)) return decodedSize;
-                    zbd->inPos = 0;   /* input is consumed */
-                    if (!decodedSize && !isSkipFrame) { zbd->stage = ZBUFFds_read; break; }   /* this was just a header */
-                    zbd->outEnd = zbd->outStart +  decodedSize;
-                    zbd->stage = ZBUFFds_flush;
-                    /* break; */
-                    /* pass-through */
-                }
-	    }
-	    /* fall-through */
-        case ZBUFFds_flush:
-            {   size_t const toFlushSize = zbd->outEnd - zbd->outStart;
-                size_t const flushedSize = ZBUFFv07_limitCopy(op, oend-op, zbd->outBuff + zbd->outStart, toFlushSize);
-                op += flushedSize;
-                zbd->outStart += flushedSize;
-                if (flushedSize == toFlushSize) {
-                    zbd->stage = ZBUFFds_read;
-                    if (zbd->outStart + zbd->blockSize > zbd->outBuffSize)
-                        zbd->outStart = zbd->outEnd = 0;
-                    break;
-                }
-                /* cannot flush everything */
-                notDone = 0;
-                break;
-            }
-        default: return ERROR(GENERIC);   /* impossible */
-    }   }
-
-    /* result */
-    *srcSizePtr = ip-istart;
-    *dstCapacityPtr = op-ostart;
-    {   size_t nextSrcSizeHint = ZSTDv07_nextSrcSizeToDecompress(zbd->zd);
-        nextSrcSizeHint -= zbd->inPos;   /* already loaded*/
-        return nextSrcSizeHint;
-    }
-}
-
-
-
-/* *************************************
-*  Tool functions
-***************************************/
-size_t ZBUFFv07_recommendedDInSize(void)  { return ZSTDv07_BLOCKSIZE_ABSOLUTEMAX + ZSTDv07_blockHeaderSize /* block header size*/ ; }
-size_t ZBUFFv07_recommendedDOutSize(void) { return ZSTDv07_BLOCKSIZE_ABSOLUTEMAX; }
diff --git a/vendor/github.com/DataDog/zstd/zstd_v07.h b/vendor/github.com/DataDog/zstd/zstd_v07.h
deleted file mode 100644
index a566c1d..0000000
--- a/vendor/github.com/DataDog/zstd/zstd_v07.h
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef ZSTDv07_H_235446
-#define ZSTDv07_H_235446
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/*======  Dependency  ======*/
-#include <stddef.h>   /* size_t */
-
-
-/*======  Export for Windows  ======*/
-/*!
-*  ZSTDv07_DLL_EXPORT :
-*  Enable exporting of functions when building a Windows DLL
-*/
-#if defined(_WIN32) && defined(ZSTDv07_DLL_EXPORT) && (ZSTDv07_DLL_EXPORT==1)
-#  define ZSTDLIBv07_API __declspec(dllexport)
-#else
-#  define ZSTDLIBv07_API
-#endif
-
-
-/* *************************************
-*  Simple API
-***************************************/
-/*! ZSTDv07_getDecompressedSize() :
-*   @return : decompressed size if known, 0 otherwise.
-       note 1 : if `0`, follow up with ZSTDv07_getFrameParams() to know precise failure cause.
-       note 2 : decompressed size could be wrong or intentionally modified !
-                always ensure results fit within application's authorized limits */
-unsigned long long ZSTDv07_getDecompressedSize(const void* src, size_t srcSize);
-
-/*! ZSTDv07_decompress() :
-    `compressedSize` : must be _exact_ size of compressed input, otherwise decompression will fail.
-    `dstCapacity` must be equal or larger than originalSize.
-    @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
-              or an errorCode if it fails (which can be tested using ZSTDv07_isError()) */
-ZSTDLIBv07_API size_t ZSTDv07_decompress( void* dst, size_t dstCapacity,
-                                    const void* src, size_t compressedSize);
-
-/**
-ZSTDv07_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.7.x format
-    srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
-    cSize (output parameter)  : the number of bytes that would be read to decompress this frame
-                                or an error code if it fails (which can be tested using ZSTDv01_isError())
-    dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
-                                or ZSTD_CONTENTSIZE_ERROR if an error occurs
-
-    note : assumes `cSize` and `dBound` are _not_ NULL.
-*/
-void ZSTDv07_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
-                                     size_t* cSize, unsigned long long* dBound);
-
-/*======  Helper functions  ======*/
-ZSTDLIBv07_API unsigned    ZSTDv07_isError(size_t code);          /*!< tells if a `size_t` function result is an error code */
-ZSTDLIBv07_API const char* ZSTDv07_getErrorName(size_t code);     /*!< provides readable string from an error code */
-
-
-/*-*************************************
-*  Explicit memory management
-***************************************/
-/** Decompression context */
-typedef struct ZSTDv07_DCtx_s ZSTDv07_DCtx;
-ZSTDLIBv07_API ZSTDv07_DCtx* ZSTDv07_createDCtx(void);
-ZSTDLIBv07_API size_t     ZSTDv07_freeDCtx(ZSTDv07_DCtx* dctx);      /*!< @return : errorCode */
-
-/** ZSTDv07_decompressDCtx() :
-*   Same as ZSTDv07_decompress(), requires an allocated ZSTDv07_DCtx (see ZSTDv07_createDCtx()) */
-ZSTDLIBv07_API size_t ZSTDv07_decompressDCtx(ZSTDv07_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
-
-
-/*-************************
-*  Simple dictionary API
-***************************/
-/*! ZSTDv07_decompress_usingDict() :
-*   Decompression using a pre-defined Dictionary content (see dictBuilder).
-*   Dictionary must be identical to the one used during compression.
-*   Note : This function load the dictionary, resulting in a significant startup time */
-ZSTDLIBv07_API size_t ZSTDv07_decompress_usingDict(ZSTDv07_DCtx* dctx,
-                                                   void* dst, size_t dstCapacity,
-                                             const void* src, size_t srcSize,
-                                             const void* dict,size_t dictSize);
-
-
-/*-**************************
-*  Advanced Dictionary API
-****************************/
-/*! ZSTDv07_createDDict() :
-*   Create a digested dictionary, ready to start decompression operation without startup delay.
-*   `dict` can be released after creation */
-typedef struct ZSTDv07_DDict_s ZSTDv07_DDict;
-ZSTDLIBv07_API ZSTDv07_DDict* ZSTDv07_createDDict(const void* dict, size_t dictSize);
-ZSTDLIBv07_API size_t      ZSTDv07_freeDDict(ZSTDv07_DDict* ddict);
-
-/*! ZSTDv07_decompress_usingDDict() :
-*   Decompression using a pre-digested Dictionary
-*   Faster startup than ZSTDv07_decompress_usingDict(), recommended when same dictionary is used multiple times. */
-ZSTDLIBv07_API size_t ZSTDv07_decompress_usingDDict(ZSTDv07_DCtx* dctx,
-                                                    void* dst, size_t dstCapacity,
-                                              const void* src, size_t srcSize,
-                                              const ZSTDv07_DDict* ddict);
-
-typedef struct {
-    unsigned long long frameContentSize;
-    unsigned windowSize;
-    unsigned dictID;
-    unsigned checksumFlag;
-} ZSTDv07_frameParams;
-
-ZSTDLIBv07_API size_t ZSTDv07_getFrameParams(ZSTDv07_frameParams* fparamsPtr, const void* src, size_t srcSize);   /**< doesn't consume input */
-
-
-
-
-/* *************************************
-*  Streaming functions
-***************************************/
-typedef struct ZBUFFv07_DCtx_s ZBUFFv07_DCtx;
-ZSTDLIBv07_API ZBUFFv07_DCtx* ZBUFFv07_createDCtx(void);
-ZSTDLIBv07_API size_t      ZBUFFv07_freeDCtx(ZBUFFv07_DCtx* dctx);
-
-ZSTDLIBv07_API size_t ZBUFFv07_decompressInit(ZBUFFv07_DCtx* dctx);
-ZSTDLIBv07_API size_t ZBUFFv07_decompressInitDictionary(ZBUFFv07_DCtx* dctx, const void* dict, size_t dictSize);
-
-ZSTDLIBv07_API size_t ZBUFFv07_decompressContinue(ZBUFFv07_DCtx* dctx,
-                                            void* dst, size_t* dstCapacityPtr,
-                                      const void* src, size_t* srcSizePtr);
-
-/*-***************************************************************************
-*  Streaming decompression howto
-*
-*  A ZBUFFv07_DCtx object is required to track streaming operations.
-*  Use ZBUFFv07_createDCtx() and ZBUFFv07_freeDCtx() to create/release resources.
-*  Use ZBUFFv07_decompressInit() to start a new decompression operation,
-*   or ZBUFFv07_decompressInitDictionary() if decompression requires a dictionary.
-*  Note that ZBUFFv07_DCtx objects can be re-init multiple times.
-*
-*  Use ZBUFFv07_decompressContinue() repetitively to consume your input.
-*  *srcSizePtr and *dstCapacityPtr can be any size.
-*  The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
-*  Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
-*  The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`.
-*  @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency),
-*            or 0 when a frame is completely decoded,
-*            or an error code, which can be tested using ZBUFFv07_isError().
-*
-*  Hint : recommended buffer sizes (not compulsory) : ZBUFFv07_recommendedDInSize() and ZBUFFv07_recommendedDOutSize()
-*  output : ZBUFFv07_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
-*  input  : ZBUFFv07_recommendedDInSize == 128KB + 3;
-*           just follow indications from ZBUFFv07_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
-* *******************************************************************************/
-
-
-/* *************************************
-*  Tool functions
-***************************************/
-ZSTDLIBv07_API unsigned ZBUFFv07_isError(size_t errorCode);
-ZSTDLIBv07_API const char* ZBUFFv07_getErrorName(size_t errorCode);
-
-/** Functions below provide recommended buffer sizes for Compression or Decompression operations.
-*   These sizes are just hints, they tend to offer better latency */
-ZSTDLIBv07_API size_t ZBUFFv07_recommendedDInSize(void);
-ZSTDLIBv07_API size_t ZBUFFv07_recommendedDOutSize(void);
-
-
-/*-*************************************
-*  Constants
-***************************************/
-#define ZSTDv07_MAGICNUMBER            0xFD2FB527   /* v0.7 */
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif  /* ZSTDv07_H_235446 */
diff --git a/vendor/github.com/DataDog/zstd/zstdmt_compress.c b/vendor/github.com/DataDog/zstd/zstdmt_compress.c
deleted file mode 100644
index 9e537b8..0000000
--- a/vendor/github.com/DataDog/zstd/zstdmt_compress.c
+++ /dev/null
@@ -1,2110 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-
-/* ======   Compiler specifics   ====== */
-#if defined(_MSC_VER)
-#  pragma warning(disable : 4204)   /* disable: C4204: non-constant aggregate initializer */
-#endif
-
-
-/* ======   Constants   ====== */
-#define ZSTDMT_OVERLAPLOG_DEFAULT 0
-
-
-/* ======   Dependencies   ====== */
-#include <string.h>      /* memcpy, memset */
-#include <limits.h>      /* INT_MAX, UINT_MAX */
-#include "mem.h"         /* MEM_STATIC */
-#include "pool.h"        /* threadpool */
-#include "threading.h"   /* mutex */
-#include "zstd_compress_internal.h"  /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */
-#include "zstd_ldm.h"
-#include "zstdmt_compress.h"
-
-/* Guards code to support resizing the SeqPool.
- * We will want to resize the SeqPool to save memory in the future.
- * Until then, comment the code out since it is unused.
- */
-#define ZSTD_RESIZE_SEQPOOL 0
-
-/* ======   Debug   ====== */
-#if defined(DEBUGLEVEL) && (DEBUGLEVEL>=2) \
-    && !defined(_MSC_VER) \
-    && !defined(__MINGW32__)
-
-#  include <stdio.h>
-#  include <unistd.h>
-#  include <sys/times.h>
-
-#  define DEBUG_PRINTHEX(l,p,n) {            \
-    unsigned debug_u;                        \
-    for (debug_u=0; debug_u<(n); debug_u++)  \
-        RAWLOG(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \
-    RAWLOG(l, " \n");                        \
-}
-
-static unsigned long long GetCurrentClockTimeMicroseconds(void)
-{
-   static clock_t _ticksPerSecond = 0;
-   if (_ticksPerSecond <= 0) _ticksPerSecond = sysconf(_SC_CLK_TCK);
-
-   {   struct tms junk; clock_t newTicks = (clock_t) times(&junk);
-       return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond);
-}  }
-
-#define MUTEX_WAIT_TIME_DLEVEL 6
-#define ZSTD_PTHREAD_MUTEX_LOCK(mutex) {          \
-    if (DEBUGLEVEL >= MUTEX_WAIT_TIME_DLEVEL) {   \
-        unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \
-        ZSTD_pthread_mutex_lock(mutex);           \
-        {   unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \
-            unsigned long long const elapsedTime = (afterTime-beforeTime); \
-            if (elapsedTime > 1000) {  /* or whatever threshold you like; I'm using 1 millisecond here */ \
-                DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, "Thread took %llu microseconds to acquire mutex %s \n", \
-                   elapsedTime, #mutex);          \
-        }   }                                     \
-    } else {                                      \
-        ZSTD_pthread_mutex_lock(mutex);           \
-    }                                             \
-}
-
-#else
-
-#  define ZSTD_PTHREAD_MUTEX_LOCK(m) ZSTD_pthread_mutex_lock(m)
-#  define DEBUG_PRINTHEX(l,p,n) {}
-
-#endif
-
-
-/* =====   Buffer Pool   ===== */
-/* a single Buffer Pool can be invoked from multiple threads in parallel */
-
-typedef struct buffer_s {
-    void* start;
-    size_t capacity;
-} buffer_t;
-
-static const buffer_t g_nullBuffer = { NULL, 0 };
-
-typedef struct ZSTDMT_bufferPool_s {
-    ZSTD_pthread_mutex_t poolMutex;
-    size_t bufferSize;
-    unsigned totalBuffers;
-    unsigned nbBuffers;
-    ZSTD_customMem cMem;
-    buffer_t bTable[1];   /* variable size */
-} ZSTDMT_bufferPool;
-
-static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned nbWorkers, ZSTD_customMem cMem)
-{
-    unsigned const maxNbBuffers = 2*nbWorkers + 3;
-    ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)ZSTD_calloc(
-        sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t), cMem);
-    if (bufPool==NULL) return NULL;
-    if (ZSTD_pthread_mutex_init(&bufPool->poolMutex, NULL)) {
-        ZSTD_free(bufPool, cMem);
-        return NULL;
-    }
-    bufPool->bufferSize = 64 KB;
-    bufPool->totalBuffers = maxNbBuffers;
-    bufPool->nbBuffers = 0;
-    bufPool->cMem = cMem;
-    return bufPool;
-}
-
-static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool)
-{
-    unsigned u;
-    DEBUGLOG(3, "ZSTDMT_freeBufferPool (address:%08X)", (U32)(size_t)bufPool);
-    if (!bufPool) return;   /* compatibility with free on NULL */
-    for (u=0; u<bufPool->totalBuffers; u++) {
-        DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->bTable[u].start);
-        ZSTD_free(bufPool->bTable[u].start, bufPool->cMem);
-    }
-    ZSTD_pthread_mutex_destroy(&bufPool->poolMutex);
-    ZSTD_free(bufPool, bufPool->cMem);
-}
-
-/* only works at initialization, not during compression */
-static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool)
-{
-    size_t const poolSize = sizeof(*bufPool)
-                          + (bufPool->totalBuffers - 1) * sizeof(buffer_t);
-    unsigned u;
-    size_t totalBufferSize = 0;
-    ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
-    for (u=0; u<bufPool->totalBuffers; u++)
-        totalBufferSize += bufPool->bTable[u].capacity;
-    ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
-
-    return poolSize + totalBufferSize;
-}
-
-/* ZSTDMT_setBufferSize() :
- * all future buffers provided by this buffer pool will have _at least_ this size
- * note : it's better for all buffers to have same size,
- * as they become freely interchangeable, reducing malloc/free usages and memory fragmentation */
-static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* const bufPool, size_t const bSize)
-{
-    ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
-    DEBUGLOG(4, "ZSTDMT_setBufferSize: bSize = %u", (U32)bSize);
-    bufPool->bufferSize = bSize;
-    ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
-}
-
-
-static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, U32 nbWorkers)
-{
-    unsigned const maxNbBuffers = 2*nbWorkers + 3;
-    if (srcBufPool==NULL) return NULL;
-    if (srcBufPool->totalBuffers >= maxNbBuffers) /* good enough */
-        return srcBufPool;
-    /* need a larger buffer pool */
-    {   ZSTD_customMem const cMem = srcBufPool->cMem;
-        size_t const bSize = srcBufPool->bufferSize;   /* forward parameters */
-        ZSTDMT_bufferPool* newBufPool;
-        ZSTDMT_freeBufferPool(srcBufPool);
-        newBufPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
-        if (newBufPool==NULL) return newBufPool;
-        ZSTDMT_setBufferSize(newBufPool, bSize);
-        return newBufPool;
-    }
-}
-
-/** ZSTDMT_getBuffer() :
- *  assumption : bufPool must be valid
- * @return : a buffer, with start pointer and size
- *  note: allocation may fail, in this case, start==NULL and size==0 */
-static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool)
-{
-    size_t const bSize = bufPool->bufferSize;
-    DEBUGLOG(5, "ZSTDMT_getBuffer: bSize = %u", (U32)bufPool->bufferSize);
-    ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
-    if (bufPool->nbBuffers) {   /* try to use an existing buffer */
-        buffer_t const buf = bufPool->bTable[--(bufPool->nbBuffers)];
-        size_t const availBufferSize = buf.capacity;
-        bufPool->bTable[bufPool->nbBuffers] = g_nullBuffer;
-        if ((availBufferSize >= bSize) & ((availBufferSize>>3) <= bSize)) {
-            /* large enough, but not too much */
-            DEBUGLOG(5, "ZSTDMT_getBuffer: provide buffer %u of size %u",
-                        bufPool->nbBuffers, (U32)buf.capacity);
-            ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
-            return buf;
-        }
-        /* size conditions not respected : scratch this buffer, create new one */
-        DEBUGLOG(5, "ZSTDMT_getBuffer: existing buffer does not meet size conditions => freeing");
-        ZSTD_free(buf.start, bufPool->cMem);
-    }
-    ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
-    /* create new buffer */
-    DEBUGLOG(5, "ZSTDMT_getBuffer: create a new buffer");
-    {   buffer_t buffer;
-        void* const start = ZSTD_malloc(bSize, bufPool->cMem);
-        buffer.start = start;   /* note : start can be NULL if malloc fails ! */
-        buffer.capacity = (start==NULL) ? 0 : bSize;
-        if (start==NULL) {
-            DEBUGLOG(5, "ZSTDMT_getBuffer: buffer allocation failure !!");
-        } else {
-            DEBUGLOG(5, "ZSTDMT_getBuffer: created buffer of size %u", (U32)bSize);
-        }
-        return buffer;
-    }
-}
-
-#if ZSTD_RESIZE_SEQPOOL
-/** ZSTDMT_resizeBuffer() :
- * assumption : bufPool must be valid
- * @return : a buffer that is at least the buffer pool buffer size.
- *           If a reallocation happens, the data in the input buffer is copied.
- */
-static buffer_t ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buffer)
-{
-    size_t const bSize = bufPool->bufferSize;
-    if (buffer.capacity < bSize) {
-        void* const start = ZSTD_malloc(bSize, bufPool->cMem);
-        buffer_t newBuffer;
-        newBuffer.start = start;
-        newBuffer.capacity = start == NULL ? 0 : bSize;
-        if (start != NULL) {
-            assert(newBuffer.capacity >= buffer.capacity);
-            memcpy(newBuffer.start, buffer.start, buffer.capacity);
-            DEBUGLOG(5, "ZSTDMT_resizeBuffer: created buffer of size %u", (U32)bSize);
-            return newBuffer;
-        }
-        DEBUGLOG(5, "ZSTDMT_resizeBuffer: buffer allocation failure !!");
-    }
-    return buffer;
-}
-#endif
-
-/* store buffer for later re-use, up to pool capacity */
-static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf)
-{
-    DEBUGLOG(5, "ZSTDMT_releaseBuffer");
-    if (buf.start == NULL) return;   /* compatible with release on NULL */
-    ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
-    if (bufPool->nbBuffers < bufPool->totalBuffers) {
-        bufPool->bTable[bufPool->nbBuffers++] = buf;  /* stored for later use */
-        DEBUGLOG(5, "ZSTDMT_releaseBuffer: stored buffer of size %u in slot %u",
-                    (U32)buf.capacity, (U32)(bufPool->nbBuffers-1));
-        ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
-        return;
-    }
-    ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
-    /* Reached bufferPool capacity (should not happen) */
-    DEBUGLOG(5, "ZSTDMT_releaseBuffer: pool capacity reached => freeing ");
-    ZSTD_free(buf.start, bufPool->cMem);
-}
-
-
-/* =====   Seq Pool Wrapper   ====== */
-
-static rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0};
-
-typedef ZSTDMT_bufferPool ZSTDMT_seqPool;
-
-static size_t ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool* seqPool)
-{
-    return ZSTDMT_sizeof_bufferPool(seqPool);
-}
-
-static rawSeqStore_t bufferToSeq(buffer_t buffer)
-{
-    rawSeqStore_t seq = {NULL, 0, 0, 0};
-    seq.seq = (rawSeq*)buffer.start;
-    seq.capacity = buffer.capacity / sizeof(rawSeq);
-    return seq;
-}
-
-static buffer_t seqToBuffer(rawSeqStore_t seq)
-{
-    buffer_t buffer;
-    buffer.start = seq.seq;
-    buffer.capacity = seq.capacity * sizeof(rawSeq);
-    return buffer;
-}
-
-static rawSeqStore_t ZSTDMT_getSeq(ZSTDMT_seqPool* seqPool)
-{
-    if (seqPool->bufferSize == 0) {
-        return kNullRawSeqStore;
-    }
-    return bufferToSeq(ZSTDMT_getBuffer(seqPool));
-}
-
-#if ZSTD_RESIZE_SEQPOOL
-static rawSeqStore_t ZSTDMT_resizeSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq)
-{
-  return bufferToSeq(ZSTDMT_resizeBuffer(seqPool, seqToBuffer(seq)));
-}
-#endif
-
-static void ZSTDMT_releaseSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq)
-{
-  ZSTDMT_releaseBuffer(seqPool, seqToBuffer(seq));
-}
-
-static void ZSTDMT_setNbSeq(ZSTDMT_seqPool* const seqPool, size_t const nbSeq)
-{
-  ZSTDMT_setBufferSize(seqPool, nbSeq * sizeof(rawSeq));
-}
-
-static ZSTDMT_seqPool* ZSTDMT_createSeqPool(unsigned nbWorkers, ZSTD_customMem cMem)
-{
-    ZSTDMT_seqPool* const seqPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
-    if (seqPool == NULL) return NULL;
-    ZSTDMT_setNbSeq(seqPool, 0);
-    return seqPool;
-}
-
-static void ZSTDMT_freeSeqPool(ZSTDMT_seqPool* seqPool)
-{
-    ZSTDMT_freeBufferPool(seqPool);
-}
-
-static ZSTDMT_seqPool* ZSTDMT_expandSeqPool(ZSTDMT_seqPool* pool, U32 nbWorkers)
-{
-    return ZSTDMT_expandBufferPool(pool, nbWorkers);
-}
-
-
-/* =====   CCtx Pool   ===== */
-/* a single CCtx Pool can be invoked from multiple threads in parallel */
-
-typedef struct {
-    ZSTD_pthread_mutex_t poolMutex;
-    int totalCCtx;
-    int availCCtx;
-    ZSTD_customMem cMem;
-    ZSTD_CCtx* cctx[1];   /* variable size */
-} ZSTDMT_CCtxPool;
-
-/* note : all CCtx borrowed from the pool should be released back to the pool _before_ freeing the pool */
-static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
-{
-    int cid;
-    for (cid=0; cid<pool->totalCCtx; cid++)
-        ZSTD_freeCCtx(pool->cctx[cid]);  /* note : compatible with free on NULL */
-    ZSTD_pthread_mutex_destroy(&pool->poolMutex);
-    ZSTD_free(pool, pool->cMem);
-}
-
-/* ZSTDMT_createCCtxPool() :
- * implies nbWorkers >= 1 , checked by caller ZSTDMT_createCCtx() */
-static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(int nbWorkers,
-                                              ZSTD_customMem cMem)
-{
-    ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) ZSTD_calloc(
-        sizeof(ZSTDMT_CCtxPool) + (nbWorkers-1)*sizeof(ZSTD_CCtx*), cMem);
-    assert(nbWorkers > 0);
-    if (!cctxPool) return NULL;
-    if (ZSTD_pthread_mutex_init(&cctxPool->poolMutex, NULL)) {
-        ZSTD_free(cctxPool, cMem);
-        return NULL;
-    }
-    cctxPool->cMem = cMem;
-    cctxPool->totalCCtx = nbWorkers;
-    cctxPool->availCCtx = 1;   /* at least one cctx for single-thread mode */
-    cctxPool->cctx[0] = ZSTD_createCCtx_advanced(cMem);
-    if (!cctxPool->cctx[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; }
-    DEBUGLOG(3, "cctxPool created, with %u workers", nbWorkers);
-    return cctxPool;
-}
-
-static ZSTDMT_CCtxPool* ZSTDMT_expandCCtxPool(ZSTDMT_CCtxPool* srcPool,
-                                              int nbWorkers)
-{
-    if (srcPool==NULL) return NULL;
-    if (nbWorkers <= srcPool->totalCCtx) return srcPool;   /* good enough */
-    /* need a larger cctx pool */
-    {   ZSTD_customMem const cMem = srcPool->cMem;
-        ZSTDMT_freeCCtxPool(srcPool);
-        return ZSTDMT_createCCtxPool(nbWorkers, cMem);
-    }
-}
-
-/* only works during initialization phase, not during compression */
-static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool)
-{
-    ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);
-    {   unsigned const nbWorkers = cctxPool->totalCCtx;
-        size_t const poolSize = sizeof(*cctxPool)
-                                + (nbWorkers-1) * sizeof(ZSTD_CCtx*);
-        unsigned u;
-        size_t totalCCtxSize = 0;
-        for (u=0; u<nbWorkers; u++) {
-            totalCCtxSize += ZSTD_sizeof_CCtx(cctxPool->cctx[u]);
-        }
-        ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
-        assert(nbWorkers > 0);
-        return poolSize + totalCCtxSize;
-    }
-}
-
-static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool)
-{
-    DEBUGLOG(5, "ZSTDMT_getCCtx");
-    ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);
-    if (cctxPool->availCCtx) {
-        cctxPool->availCCtx--;
-        {   ZSTD_CCtx* const cctx = cctxPool->cctx[cctxPool->availCCtx];
-            ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
-            return cctx;
-    }   }
-    ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
-    DEBUGLOG(5, "create one more CCtx");
-    return ZSTD_createCCtx_advanced(cctxPool->cMem);   /* note : can be NULL, when creation fails ! */
-}
-
-static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx)
-{
-    if (cctx==NULL) return;   /* compatibility with release on NULL */
-    ZSTD_pthread_mutex_lock(&pool->poolMutex);
-    if (pool->availCCtx < pool->totalCCtx)
-        pool->cctx[pool->availCCtx++] = cctx;
-    else {
-        /* pool overflow : should not happen, since totalCCtx==nbWorkers */
-        DEBUGLOG(4, "CCtx pool overflow : free cctx");
-        ZSTD_freeCCtx(cctx);
-    }
-    ZSTD_pthread_mutex_unlock(&pool->poolMutex);
-}
-
-/* ====   Serial State   ==== */
-
-typedef struct {
-    void const* start;
-    size_t size;
-} range_t;
-
-typedef struct {
-    /* All variables in the struct are protected by mutex. */
-    ZSTD_pthread_mutex_t mutex;
-    ZSTD_pthread_cond_t cond;
-    ZSTD_CCtx_params params;
-    ldmState_t ldmState;
-    XXH64_state_t xxhState;
-    unsigned nextJobID;
-    /* Protects ldmWindow.
-     * Must be acquired after the main mutex when acquiring both.
-     */
-    ZSTD_pthread_mutex_t ldmWindowMutex;
-    ZSTD_pthread_cond_t ldmWindowCond;  /* Signaled when ldmWindow is updated */
-    ZSTD_window_t ldmWindow;  /* A thread-safe copy of ldmState.window */
-} serialState_t;
-
-static int ZSTDMT_serialState_reset(serialState_t* serialState, ZSTDMT_seqPool* seqPool, ZSTD_CCtx_params params, size_t jobSize)
-{
-    /* Adjust parameters */
-    if (params.ldmParams.enableLdm) {
-        DEBUGLOG(4, "LDM window size = %u KB", (1U << params.cParams.windowLog) >> 10);
-        ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
-        assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
-        assert(params.ldmParams.hashRateLog < 32);
-        serialState->ldmState.hashPower =
-                ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength);
-    } else {
-        memset(&params.ldmParams, 0, sizeof(params.ldmParams));
-    }
-    serialState->nextJobID = 0;
-    if (params.fParams.checksumFlag)
-        XXH64_reset(&serialState->xxhState, 0);
-    if (params.ldmParams.enableLdm) {
-        ZSTD_customMem cMem = params.customMem;
-        unsigned const hashLog = params.ldmParams.hashLog;
-        size_t const hashSize = ((size_t)1 << hashLog) * sizeof(ldmEntry_t);
-        unsigned const bucketLog =
-            params.ldmParams.hashLog - params.ldmParams.bucketSizeLog;
-        size_t const bucketSize = (size_t)1 << bucketLog;
-        unsigned const prevBucketLog =
-            serialState->params.ldmParams.hashLog -
-            serialState->params.ldmParams.bucketSizeLog;
-        /* Size the seq pool tables */
-        ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, jobSize));
-        /* Reset the window */
-        ZSTD_window_clear(&serialState->ldmState.window);
-        serialState->ldmWindow = serialState->ldmState.window;
-        /* Resize tables and output space if necessary. */
-        if (serialState->ldmState.hashTable == NULL || serialState->params.ldmParams.hashLog < hashLog) {
-            ZSTD_free(serialState->ldmState.hashTable, cMem);
-            serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_malloc(hashSize, cMem);
-        }
-        if (serialState->ldmState.bucketOffsets == NULL || prevBucketLog < bucketLog) {
-            ZSTD_free(serialState->ldmState.bucketOffsets, cMem);
-            serialState->ldmState.bucketOffsets = (BYTE*)ZSTD_malloc(bucketSize, cMem);
-        }
-        if (!serialState->ldmState.hashTable || !serialState->ldmState.bucketOffsets)
-            return 1;
-        /* Zero the tables */
-        memset(serialState->ldmState.hashTable, 0, hashSize);
-        memset(serialState->ldmState.bucketOffsets, 0, bucketSize);
-    }
-    serialState->params = params;
-    serialState->params.jobSize = (U32)jobSize;
-    return 0;
-}
-
-static int ZSTDMT_serialState_init(serialState_t* serialState)
-{
-    int initError = 0;
-    memset(serialState, 0, sizeof(*serialState));
-    initError |= ZSTD_pthread_mutex_init(&serialState->mutex, NULL);
-    initError |= ZSTD_pthread_cond_init(&serialState->cond, NULL);
-    initError |= ZSTD_pthread_mutex_init(&serialState->ldmWindowMutex, NULL);
-    initError |= ZSTD_pthread_cond_init(&serialState->ldmWindowCond, NULL);
-    return initError;
-}
-
-static void ZSTDMT_serialState_free(serialState_t* serialState)
-{
-    ZSTD_customMem cMem = serialState->params.customMem;
-    ZSTD_pthread_mutex_destroy(&serialState->mutex);
-    ZSTD_pthread_cond_destroy(&serialState->cond);
-    ZSTD_pthread_mutex_destroy(&serialState->ldmWindowMutex);
-    ZSTD_pthread_cond_destroy(&serialState->ldmWindowCond);
-    ZSTD_free(serialState->ldmState.hashTable, cMem);
-    ZSTD_free(serialState->ldmState.bucketOffsets, cMem);
-}
-
-static void ZSTDMT_serialState_update(serialState_t* serialState,
-                                      ZSTD_CCtx* jobCCtx, rawSeqStore_t seqStore,
-                                      range_t src, unsigned jobID)
-{
-    /* Wait for our turn */
-    ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
-    while (serialState->nextJobID < jobID) {
-        DEBUGLOG(5, "wait for serialState->cond");
-        ZSTD_pthread_cond_wait(&serialState->cond, &serialState->mutex);
-    }
-    /* A future job may error and skip our job */
-    if (serialState->nextJobID == jobID) {
-        /* It is now our turn, do any processing necessary */
-        if (serialState->params.ldmParams.enableLdm) {
-            size_t error;
-            assert(seqStore.seq != NULL && seqStore.pos == 0 &&
-                   seqStore.size == 0 && seqStore.capacity > 0);
-            assert(src.size <= serialState->params.jobSize);
-            ZSTD_window_update(&serialState->ldmState.window, src.start, src.size);
-            error = ZSTD_ldm_generateSequences(
-                &serialState->ldmState, &seqStore,
-                &serialState->params.ldmParams, src.start, src.size);
-            /* We provide a large enough buffer to never fail. */
-            assert(!ZSTD_isError(error)); (void)error;
-            /* Update ldmWindow to match the ldmState.window and signal the main
-             * thread if it is waiting for a buffer.
-             */
-            ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex);
-            serialState->ldmWindow = serialState->ldmState.window;
-            ZSTD_pthread_cond_signal(&serialState->ldmWindowCond);
-            ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex);
-        }
-        if (serialState->params.fParams.checksumFlag && src.size > 0)
-            XXH64_update(&serialState->xxhState, src.start, src.size);
-    }
-    /* Now it is the next jobs turn */
-    serialState->nextJobID++;
-    ZSTD_pthread_cond_broadcast(&serialState->cond);
-    ZSTD_pthread_mutex_unlock(&serialState->mutex);
-
-    if (seqStore.size > 0) {
-        size_t const err = ZSTD_referenceExternalSequences(
-            jobCCtx, seqStore.seq, seqStore.size);
-        assert(serialState->params.ldmParams.enableLdm);
-        assert(!ZSTD_isError(err));
-        (void)err;
-    }
-}
-
-static void ZSTDMT_serialState_ensureFinished(serialState_t* serialState,
-                                              unsigned jobID, size_t cSize)
-{
-    ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
-    if (serialState->nextJobID <= jobID) {
-        assert(ZSTD_isError(cSize)); (void)cSize;
-        DEBUGLOG(5, "Skipping past job %u because of error", jobID);
-        serialState->nextJobID = jobID + 1;
-        ZSTD_pthread_cond_broadcast(&serialState->cond);
-
-        ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex);
-        ZSTD_window_clear(&serialState->ldmWindow);
-        ZSTD_pthread_cond_signal(&serialState->ldmWindowCond);
-        ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex);
-    }
-    ZSTD_pthread_mutex_unlock(&serialState->mutex);
-
-}
-
-
-/* ------------------------------------------ */
-/* =====          Worker thread         ===== */
-/* ------------------------------------------ */
-
-static const range_t kNullRange = { NULL, 0 };
-
-typedef struct {
-    size_t   consumed;                   /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */
-    size_t   cSize;                      /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */
-    ZSTD_pthread_mutex_t job_mutex;      /* Thread-safe - used by mtctx and worker */
-    ZSTD_pthread_cond_t job_cond;        /* Thread-safe - used by mtctx and worker */
-    ZSTDMT_CCtxPool* cctxPool;           /* Thread-safe - used by mtctx and (all) workers */
-    ZSTDMT_bufferPool* bufPool;          /* Thread-safe - used by mtctx and (all) workers */
-    ZSTDMT_seqPool* seqPool;             /* Thread-safe - used by mtctx and (all) workers */
-    serialState_t* serial;               /* Thread-safe - used by mtctx and (all) workers */
-    buffer_t dstBuff;                    /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */
-    range_t prefix;                      /* set by mtctx, then read by worker & mtctx => no barrier */
-    range_t src;                         /* set by mtctx, then read by worker & mtctx => no barrier */
-    unsigned jobID;                      /* set by mtctx, then read by worker => no barrier */
-    unsigned firstJob;                   /* set by mtctx, then read by worker => no barrier */
-    unsigned lastJob;                    /* set by mtctx, then read by worker => no barrier */
-    ZSTD_CCtx_params params;             /* set by mtctx, then read by worker => no barrier */
-    const ZSTD_CDict* cdict;             /* set by mtctx, then read by worker => no barrier */
-    unsigned long long fullFrameSize;    /* set by mtctx, then read by worker => no barrier */
-    size_t   dstFlushed;                 /* used only by mtctx */
-    unsigned frameChecksumNeeded;        /* used only by mtctx */
-} ZSTDMT_jobDescription;
-
-#define JOB_ERROR(e) {                          \
-    ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);   \
-    job->cSize = e;                             \
-    ZSTD_pthread_mutex_unlock(&job->job_mutex); \
-    goto _endJob;                               \
-}
-
-/* ZSTDMT_compressionJob() is a POOL_function type */
-static void ZSTDMT_compressionJob(void* jobDescription)
-{
-    ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription;
-    ZSTD_CCtx_params jobParams = job->params;   /* do not modify job->params ! copy it, modify the copy */
-    ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(job->cctxPool);
-    rawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool);
-    buffer_t dstBuff = job->dstBuff;
-    size_t lastCBlockSize = 0;
-
-    /* resources */
-    if (cctx==NULL) JOB_ERROR(ERROR(memory_allocation));
-    if (dstBuff.start == NULL) {   /* streaming job : doesn't provide a dstBuffer */
-        dstBuff = ZSTDMT_getBuffer(job->bufPool);
-        if (dstBuff.start==NULL) JOB_ERROR(ERROR(memory_allocation));
-        job->dstBuff = dstBuff;   /* this value can be read in ZSTDMT_flush, when it copies the whole job */
-    }
-    if (jobParams.ldmParams.enableLdm && rawSeqStore.seq == NULL)
-        JOB_ERROR(ERROR(memory_allocation));
-
-    /* Don't compute the checksum for chunks, since we compute it externally,
-     * but write it in the header.
-     */
-    if (job->jobID != 0) jobParams.fParams.checksumFlag = 0;
-    /* Don't run LDM for the chunks, since we handle it externally */
-    jobParams.ldmParams.enableLdm = 0;
-
-
-    /* init */
-    if (job->cdict) {
-        size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, jobParams, job->fullFrameSize);
-        assert(job->firstJob);  /* only allowed for first job */
-        if (ZSTD_isError(initError)) JOB_ERROR(initError);
-    } else {  /* srcStart points at reloaded section */
-        U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size;
-        {   size_t const forceWindowError = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob);
-            if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError);
-        }
-        {   size_t const initError = ZSTD_compressBegin_advanced_internal(cctx,
-                                        job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */
-                                        ZSTD_dtlm_fast,
-                                        NULL, /*cdict*/
-                                        jobParams, pledgedSrcSize);
-            if (ZSTD_isError(initError)) JOB_ERROR(initError);
-    }   }
-
-    /* Perform serial step as early as possible, but after CCtx initialization */
-    ZSTDMT_serialState_update(job->serial, cctx, rawSeqStore, job->src, job->jobID);
-
-    if (!job->firstJob) {  /* flush and overwrite frame header when it's not first job */
-        size_t const hSize = ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0);
-        if (ZSTD_isError(hSize)) JOB_ERROR(hSize);
-        DEBUGLOG(5, "ZSTDMT_compressionJob: flush and overwrite %u bytes of frame header (not first job)", (U32)hSize);
-        ZSTD_invalidateRepCodes(cctx);
-    }
-
-    /* compress */
-    {   size_t const chunkSize = 4*ZSTD_BLOCKSIZE_MAX;
-        int const nbChunks = (int)((job->src.size + (chunkSize-1)) / chunkSize);
-        const BYTE* ip = (const BYTE*) job->src.start;
-        BYTE* const ostart = (BYTE*)dstBuff.start;
-        BYTE* op = ostart;
-        BYTE* oend = op + dstBuff.capacity;
-        int chunkNb;
-        if (sizeof(size_t) > sizeof(int)) assert(job->src.size < ((size_t)INT_MAX) * chunkSize);   /* check overflow */
-        DEBUGLOG(5, "ZSTDMT_compressionJob: compress %u bytes in %i blocks", (U32)job->src.size, nbChunks);
-        assert(job->cSize == 0);
-        for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) {
-            size_t const cSize = ZSTD_compressContinue(cctx, op, oend-op, ip, chunkSize);
-            if (ZSTD_isError(cSize)) JOB_ERROR(cSize);
-            ip += chunkSize;
-            op += cSize; assert(op < oend);
-            /* stats */
-            ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
-            job->cSize += cSize;
-            job->consumed = chunkSize * chunkNb;
-            DEBUGLOG(5, "ZSTDMT_compressionJob: compress new block : cSize==%u bytes (total: %u)",
-                        (U32)cSize, (U32)job->cSize);
-            ZSTD_pthread_cond_signal(&job->job_cond);   /* warns some more data is ready to be flushed */
-            ZSTD_pthread_mutex_unlock(&job->job_mutex);
-        }
-        /* last block */
-        assert(chunkSize > 0);
-        assert((chunkSize & (chunkSize - 1)) == 0);  /* chunkSize must be power of 2 for mask==(chunkSize-1) to work */
-        if ((nbChunks > 0) | job->lastJob /*must output a "last block" flag*/ ) {
-            size_t const lastBlockSize1 = job->src.size & (chunkSize-1);
-            size_t const lastBlockSize = ((lastBlockSize1==0) & (job->src.size>=chunkSize)) ? chunkSize : lastBlockSize1;
-            size_t const cSize = (job->lastJob) ?
-                 ZSTD_compressEnd     (cctx, op, oend-op, ip, lastBlockSize) :
-                 ZSTD_compressContinue(cctx, op, oend-op, ip, lastBlockSize);
-            if (ZSTD_isError(cSize)) JOB_ERROR(cSize);
-            lastCBlockSize = cSize;
-    }   }
-
-_endJob:
-    ZSTDMT_serialState_ensureFinished(job->serial, job->jobID, job->cSize);
-    if (job->prefix.size > 0)
-        DEBUGLOG(5, "Finished with prefix: %zx", (size_t)job->prefix.start);
-    DEBUGLOG(5, "Finished with source: %zx", (size_t)job->src.start);
-    /* release resources */
-    ZSTDMT_releaseSeq(job->seqPool, rawSeqStore);
-    ZSTDMT_releaseCCtx(job->cctxPool, cctx);
-    /* report */
-    ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
-    if (ZSTD_isError(job->cSize)) assert(lastCBlockSize == 0);
-    job->cSize += lastCBlockSize;
-    job->consumed = job->src.size;  /* when job->consumed == job->src.size , compression job is presumed completed */
-    ZSTD_pthread_cond_signal(&job->job_cond);
-    ZSTD_pthread_mutex_unlock(&job->job_mutex);
-}
-
-
-/* ------------------------------------------ */
-/* =====   Multi-threaded compression   ===== */
-/* ------------------------------------------ */
-
-typedef struct {
-    range_t prefix;         /* read-only non-owned prefix buffer */
-    buffer_t buffer;
-    size_t filled;
-} inBuff_t;
-
-typedef struct {
-  BYTE* buffer;     /* The round input buffer. All jobs get references
-                     * to pieces of the buffer. ZSTDMT_tryGetInputRange()
-                     * handles handing out job input buffers, and makes
-                     * sure it doesn't overlap with any pieces still in use.
-                     */
-  size_t capacity;  /* The capacity of buffer. */
-  size_t pos;       /* The position of the current inBuff in the round
-                     * buffer. Updated past the end if the inBuff once
-                     * the inBuff is sent to the worker thread.
-                     * pos <= capacity.
-                     */
-} roundBuff_t;
-
-static const roundBuff_t kNullRoundBuff = {NULL, 0, 0};
-
-#define RSYNC_LENGTH 32
-
-typedef struct {
-  U64 hash;
-  U64 hitMask;
-  U64 primePower;
-} rsyncState_t;
-
-struct ZSTDMT_CCtx_s {
-    POOL_ctx* factory;
-    ZSTDMT_jobDescription* jobs;
-    ZSTDMT_bufferPool* bufPool;
-    ZSTDMT_CCtxPool* cctxPool;
-    ZSTDMT_seqPool* seqPool;
-    ZSTD_CCtx_params params;
-    size_t targetSectionSize;
-    size_t targetPrefixSize;
-    int jobReady;        /* 1 => one job is already prepared, but pool has shortage of workers. Don't create a new job. */
-    inBuff_t inBuff;
-    roundBuff_t roundBuff;
-    serialState_t serial;
-    rsyncState_t rsync;
-    unsigned singleBlockingThread;
-    unsigned jobIDMask;
-    unsigned doneJobID;
-    unsigned nextJobID;
-    unsigned frameEnded;
-    unsigned allJobsCompleted;
-    unsigned long long frameContentSize;
-    unsigned long long consumed;
-    unsigned long long produced;
-    ZSTD_customMem cMem;
-    ZSTD_CDict* cdictLocal;
-    const ZSTD_CDict* cdict;
-};
-
-static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, U32 nbJobs, ZSTD_customMem cMem)
-{
-    U32 jobNb;
-    if (jobTable == NULL) return;
-    for (jobNb=0; jobNb<nbJobs; jobNb++) {
-        ZSTD_pthread_mutex_destroy(&jobTable[jobNb].job_mutex);
-        ZSTD_pthread_cond_destroy(&jobTable[jobNb].job_cond);
-    }
-    ZSTD_free(jobTable, cMem);
-}
-
-/* ZSTDMT_allocJobsTable()
- * allocate and init a job table.
- * update *nbJobsPtr to next power of 2 value, as size of table */
-static ZSTDMT_jobDescription* ZSTDMT_createJobsTable(U32* nbJobsPtr, ZSTD_customMem cMem)
-{
-    U32 const nbJobsLog2 = ZSTD_highbit32(*nbJobsPtr) + 1;
-    U32 const nbJobs = 1 << nbJobsLog2;
-    U32 jobNb;
-    ZSTDMT_jobDescription* const jobTable = (ZSTDMT_jobDescription*)
-                ZSTD_calloc(nbJobs * sizeof(ZSTDMT_jobDescription), cMem);
-    int initError = 0;
-    if (jobTable==NULL) return NULL;
-    *nbJobsPtr = nbJobs;
-    for (jobNb=0; jobNb<nbJobs; jobNb++) {
-        initError |= ZSTD_pthread_mutex_init(&jobTable[jobNb].job_mutex, NULL);
-        initError |= ZSTD_pthread_cond_init(&jobTable[jobNb].job_cond, NULL);
-    }
-    if (initError != 0) {
-        ZSTDMT_freeJobsTable(jobTable, nbJobs, cMem);
-        return NULL;
-    }
-    return jobTable;
-}
-
-static size_t ZSTDMT_expandJobsTable (ZSTDMT_CCtx* mtctx, U32 nbWorkers) {
-    U32 nbJobs = nbWorkers + 2;
-    if (nbJobs > mtctx->jobIDMask+1) {  /* need more job capacity */
-        ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
-        mtctx->jobIDMask = 0;
-        mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, mtctx->cMem);
-        if (mtctx->jobs==NULL) return ERROR(memory_allocation);
-        assert((nbJobs != 0) && ((nbJobs & (nbJobs - 1)) == 0));  /* ensure nbJobs is a power of 2 */
-        mtctx->jobIDMask = nbJobs - 1;
-    }
-    return 0;
-}
-
-
-/* ZSTDMT_CCtxParam_setNbWorkers():
- * Internal use only */
-size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers)
-{
-    return ZSTD_CCtxParams_setParameter(params, ZSTD_c_nbWorkers, (int)nbWorkers);
-}
-
-MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, ZSTD_customMem cMem)
-{
-    ZSTDMT_CCtx* mtctx;
-    U32 nbJobs = nbWorkers + 2;
-    int initError;
-    DEBUGLOG(3, "ZSTDMT_createCCtx_advanced (nbWorkers = %u)", nbWorkers);
-
-    if (nbWorkers < 1) return NULL;
-    nbWorkers = MIN(nbWorkers , ZSTDMT_NBWORKERS_MAX);
-    if ((cMem.customAlloc!=NULL) ^ (cMem.customFree!=NULL))
-        /* invalid custom allocator */
-        return NULL;
-
-    mtctx = (ZSTDMT_CCtx*) ZSTD_calloc(sizeof(ZSTDMT_CCtx), cMem);
-    if (!mtctx) return NULL;
-    ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);
-    mtctx->cMem = cMem;
-    mtctx->allJobsCompleted = 1;
-    mtctx->factory = POOL_create_advanced(nbWorkers, 0, cMem);
-    mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem);
-    assert(nbJobs > 0); assert((nbJobs & (nbJobs - 1)) == 0);  /* ensure nbJobs is a power of 2 */
-    mtctx->jobIDMask = nbJobs - 1;
-    mtctx->bufPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
-    mtctx->cctxPool = ZSTDMT_createCCtxPool(nbWorkers, cMem);
-    mtctx->seqPool = ZSTDMT_createSeqPool(nbWorkers, cMem);
-    initError = ZSTDMT_serialState_init(&mtctx->serial);
-    mtctx->roundBuff = kNullRoundBuff;
-    if (!mtctx->factory | !mtctx->jobs | !mtctx->bufPool | !mtctx->cctxPool | !mtctx->seqPool | initError) {
-        ZSTDMT_freeCCtx(mtctx);
-        return NULL;
-    }
-    DEBUGLOG(3, "mt_cctx created, for %u threads", nbWorkers);
-    return mtctx;
-}
-
-ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem)
-{
-#ifdef ZSTD_MULTITHREAD
-    return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem);
-#else
-    (void)nbWorkers;
-    (void)cMem;
-    return NULL;
-#endif
-}
-
-ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers)
-{
-    return ZSTDMT_createCCtx_advanced(nbWorkers, ZSTD_defaultCMem);
-}
-
-
-/* ZSTDMT_releaseAllJobResources() :
- * note : ensure all workers are killed first ! */
-static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx)
-{
-    unsigned jobID;
-    DEBUGLOG(3, "ZSTDMT_releaseAllJobResources");
-    for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) {
-        DEBUGLOG(4, "job%02u: release dst address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start);
-        ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
-        mtctx->jobs[jobID].dstBuff = g_nullBuffer;
-        mtctx->jobs[jobID].cSize = 0;
-    }
-    memset(mtctx->jobs, 0, (mtctx->jobIDMask+1)*sizeof(ZSTDMT_jobDescription));
-    mtctx->inBuff.buffer = g_nullBuffer;
-    mtctx->inBuff.filled = 0;
-    mtctx->allJobsCompleted = 1;
-}
-
-static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* mtctx)
-{
-    DEBUGLOG(4, "ZSTDMT_waitForAllJobsCompleted");
-    while (mtctx->doneJobID < mtctx->nextJobID) {
-        unsigned const jobID = mtctx->doneJobID & mtctx->jobIDMask;
-        ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex);
-        while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) {
-            DEBUGLOG(4, "waiting for jobCompleted signal from job %u", mtctx->doneJobID);   /* we want to block when waiting for data to flush */
-            ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex);
-        }
-        ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex);
-        mtctx->doneJobID++;
-    }
-}
-
-size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx)
-{
-    if (mtctx==NULL) return 0;   /* compatible with free on NULL */
-    POOL_free(mtctx->factory);   /* stop and free worker threads */
-    ZSTDMT_releaseAllJobResources(mtctx);  /* release job resources into pools first */
-    ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
-    ZSTDMT_freeBufferPool(mtctx->bufPool);
-    ZSTDMT_freeCCtxPool(mtctx->cctxPool);
-    ZSTDMT_freeSeqPool(mtctx->seqPool);
-    ZSTDMT_serialState_free(&mtctx->serial);
-    ZSTD_freeCDict(mtctx->cdictLocal);
-    if (mtctx->roundBuff.buffer)
-        ZSTD_free(mtctx->roundBuff.buffer, mtctx->cMem);
-    ZSTD_free(mtctx, mtctx->cMem);
-    return 0;
-}
-
-size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx)
-{
-    if (mtctx == NULL) return 0;   /* supports sizeof NULL */
-    return sizeof(*mtctx)
-            + POOL_sizeof(mtctx->factory)
-            + ZSTDMT_sizeof_bufferPool(mtctx->bufPool)
-            + (mtctx->jobIDMask+1) * sizeof(ZSTDMT_jobDescription)
-            + ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool)
-            + ZSTDMT_sizeof_seqPool(mtctx->seqPool)
-            + ZSTD_sizeof_CDict(mtctx->cdictLocal)
-            + mtctx->roundBuff.capacity;
-}
-
-/* Internal only */
-size_t
-ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params,
-                                   ZSTDMT_parameter parameter,
-                                   int value)
-{
-    DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter");
-    switch(parameter)
-    {
-    case ZSTDMT_p_jobSize :
-        DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter : set jobSize to %i", value);
-        return ZSTD_CCtxParams_setParameter(params, ZSTD_c_jobSize, value);
-    case ZSTDMT_p_overlapLog :
-        DEBUGLOG(4, "ZSTDMT_p_overlapLog : %i", value);
-        return ZSTD_CCtxParams_setParameter(params, ZSTD_c_overlapLog, value);
-    case ZSTDMT_p_rsyncable :
-        DEBUGLOG(4, "ZSTD_p_rsyncable : %i", value);
-        return ZSTD_CCtxParams_setParameter(params, ZSTD_c_rsyncable, value);
-    default :
-        return ERROR(parameter_unsupported);
-    }
-}
-
-size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value)
-{
-    DEBUGLOG(4, "ZSTDMT_setMTCtxParameter");
-    return ZSTDMT_CCtxParam_setMTCtxParameter(&mtctx->params, parameter, value);
-}
-
-size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value)
-{
-    switch (parameter) {
-    case ZSTDMT_p_jobSize:
-        return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_jobSize, value);
-    case ZSTDMT_p_overlapLog:
-        return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_overlapLog, value);
-    case ZSTDMT_p_rsyncable:
-        return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_rsyncable, value);
-    default:
-        return ERROR(parameter_unsupported);
-    }
-}
-
-/* Sets parameters relevant to the compression job,
- * initializing others to default values. */
-static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(ZSTD_CCtx_params const params)
-{
-    ZSTD_CCtx_params jobParams = params;
-    /* Clear parameters related to multithreading */
-    jobParams.forceWindow = 0;
-    jobParams.nbWorkers = 0;
-    jobParams.jobSize = 0;
-    jobParams.overlapLog = 0;
-    jobParams.rsyncable = 0;
-    memset(&jobParams.ldmParams, 0, sizeof(ldmParams_t));
-    memset(&jobParams.customMem, 0, sizeof(ZSTD_customMem));
-    return jobParams;
-}
-
-
-/* ZSTDMT_resize() :
- * @return : error code if fails, 0 on success */
-static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers)
-{
-    if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation);
-    FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) );
-    mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, nbWorkers);
-    if (mtctx->bufPool == NULL) return ERROR(memory_allocation);
-    mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers);
-    if (mtctx->cctxPool == NULL) return ERROR(memory_allocation);
-    mtctx->seqPool = ZSTDMT_expandSeqPool(mtctx->seqPool, nbWorkers);
-    if (mtctx->seqPool == NULL) return ERROR(memory_allocation);
-    ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);
-    return 0;
-}
-
-
-/*! ZSTDMT_updateCParams_whileCompressing() :
- *  Updates a selected set of compression parameters, remaining compatible with currently active frame.
- *  New parameters will be applied to next compression job. */
-void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams)
-{
-    U32 const saved_wlog = mtctx->params.cParams.windowLog;   /* Do not modify windowLog while compressing */
-    int const compressionLevel = cctxParams->compressionLevel;
-    DEBUGLOG(5, "ZSTDMT_updateCParams_whileCompressing (level:%i)",
-                compressionLevel);
-    mtctx->params.compressionLevel = compressionLevel;
-    {   ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, 0, 0);
-        cParams.windowLog = saved_wlog;
-        mtctx->params.cParams = cParams;
-    }
-}
-
-/* ZSTDMT_getFrameProgression():
- * tells how much data has been consumed (input) and produced (output) for current frame.
- * able to count progression inside worker threads.
- * Note : mutex will be acquired during statistics collection inside workers. */
-ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx)
-{
-    ZSTD_frameProgression fps;
-    DEBUGLOG(5, "ZSTDMT_getFrameProgression");
-    fps.ingested = mtctx->consumed + mtctx->inBuff.filled;
-    fps.consumed = mtctx->consumed;
-    fps.produced = fps.flushed = mtctx->produced;
-    fps.currentJobID = mtctx->nextJobID;
-    fps.nbActiveWorkers = 0;
-    {   unsigned jobNb;
-        unsigned lastJobNb = mtctx->nextJobID + mtctx->jobReady; assert(mtctx->jobReady <= 1);
-        DEBUGLOG(6, "ZSTDMT_getFrameProgression: jobs: from %u to <%u (jobReady:%u)",
-                    mtctx->doneJobID, lastJobNb, mtctx->jobReady)
-        for (jobNb = mtctx->doneJobID ; jobNb < lastJobNb ; jobNb++) {
-            unsigned const wJobID = jobNb & mtctx->jobIDMask;
-            ZSTDMT_jobDescription* jobPtr = &mtctx->jobs[wJobID];
-            ZSTD_pthread_mutex_lock(&jobPtr->job_mutex);
-            {   size_t const cResult = jobPtr->cSize;
-                size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;
-                size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed;
-                assert(flushed <= produced);
-                fps.ingested += jobPtr->src.size;
-                fps.consumed += jobPtr->consumed;
-                fps.produced += produced;
-                fps.flushed  += flushed;
-                fps.nbActiveWorkers += (jobPtr->consumed < jobPtr->src.size);
-            }
-            ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
-        }
-    }
-    return fps;
-}
-
-
-size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx)
-{
-    size_t toFlush;
-    unsigned const jobID = mtctx->doneJobID;
-    assert(jobID <= mtctx->nextJobID);
-    if (jobID == mtctx->nextJobID) return 0;   /* no active job => nothing to flush */
-
-    /* look into oldest non-fully-flushed job */
-    {   unsigned const wJobID = jobID & mtctx->jobIDMask;
-        ZSTDMT_jobDescription* const jobPtr = &mtctx->jobs[wJobID];
-        ZSTD_pthread_mutex_lock(&jobPtr->job_mutex);
-        {   size_t const cResult = jobPtr->cSize;
-            size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;
-            size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed;
-            assert(flushed <= produced);
-            assert(jobPtr->consumed <= jobPtr->src.size);
-            toFlush = produced - flushed;
-            /* if toFlush==0, nothing is available to flush.
-             * However, jobID is expected to still be active:
-             * if jobID was already completed and fully flushed,
-             * ZSTDMT_flushProduced() should have already moved onto next job.
-             * Therefore, some input has not yet been consumed. */
-            if (toFlush==0) {
-                assert(jobPtr->consumed < jobPtr->src.size);
-            }
-        }
-        ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
-    }
-
-    return toFlush;
-}
-
-
-/* ------------------------------------------ */
-/* =====   Multi-threaded compression   ===== */
-/* ------------------------------------------ */
-
-static unsigned ZSTDMT_computeTargetJobLog(ZSTD_CCtx_params const params)
-{
-    unsigned jobLog;
-    if (params.ldmParams.enableLdm) {
-        /* In Long Range Mode, the windowLog is typically oversized.
-         * In which case, it's preferable to determine the jobSize
-         * based on chainLog instead. */
-        jobLog = MAX(21, params.cParams.chainLog + 4);
-    } else {
-        jobLog = MAX(20, params.cParams.windowLog + 2);
-    }
-    return MIN(jobLog, (unsigned)ZSTDMT_JOBLOG_MAX);
-}
-
-static int ZSTDMT_overlapLog_default(ZSTD_strategy strat)
-{
-    switch(strat)
-    {
-        case ZSTD_btultra2:
-            return 9;
-        case ZSTD_btultra:
-        case ZSTD_btopt:
-            return 8;
-        case ZSTD_btlazy2:
-        case ZSTD_lazy2:
-            return 7;
-        case ZSTD_lazy:
-        case ZSTD_greedy:
-        case ZSTD_dfast:
-        case ZSTD_fast:
-        default:;
-    }
-    return 6;
-}
-
-static int ZSTDMT_overlapLog(int ovlog, ZSTD_strategy strat)
-{
-    assert(0 <= ovlog && ovlog <= 9);
-    if (ovlog == 0) return ZSTDMT_overlapLog_default(strat);
-    return ovlog;
-}
-
-static size_t ZSTDMT_computeOverlapSize(ZSTD_CCtx_params const params)
-{
-    int const overlapRLog = 9 - ZSTDMT_overlapLog(params.overlapLog, params.cParams.strategy);
-    int ovLog = (overlapRLog >= 8) ? 0 : (params.cParams.windowLog - overlapRLog);
-    assert(0 <= overlapRLog && overlapRLog <= 8);
-    if (params.ldmParams.enableLdm) {
-        /* In Long Range Mode, the windowLog is typically oversized.
-         * In which case, it's preferable to determine the jobSize
-         * based on chainLog instead.
-         * Then, ovLog becomes a fraction of the jobSize, rather than windowSize */
-        ovLog = MIN(params.cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2)
-                - overlapRLog;
-    }
-    assert(0 <= ovLog && ovLog <= ZSTD_WINDOWLOG_MAX);
-    DEBUGLOG(4, "overlapLog : %i", params.overlapLog);
-    DEBUGLOG(4, "overlap size : %i", 1 << ovLog);
-    return (ovLog==0) ? 0 : (size_t)1 << ovLog;
-}
-
-static unsigned
-ZSTDMT_computeNbJobs(ZSTD_CCtx_params params, size_t srcSize, unsigned nbWorkers)
-{
-    assert(nbWorkers>0);
-    {   size_t const jobSizeTarget = (size_t)1 << ZSTDMT_computeTargetJobLog(params);
-        size_t const jobMaxSize = jobSizeTarget << 2;
-        size_t const passSizeMax = jobMaxSize * nbWorkers;
-        unsigned const multiplier = (unsigned)(srcSize / passSizeMax) + 1;
-        unsigned const nbJobsLarge = multiplier * nbWorkers;
-        unsigned const nbJobsMax = (unsigned)(srcSize / jobSizeTarget) + 1;
-        unsigned const nbJobsSmall = MIN(nbJobsMax, nbWorkers);
-        return (multiplier>1) ? nbJobsLarge : nbJobsSmall;
-}   }
-
-/* ZSTDMT_compress_advanced_internal() :
- * This is a blocking function : it will only give back control to caller after finishing its compression job.
- */
-static size_t ZSTDMT_compress_advanced_internal(
-                ZSTDMT_CCtx* mtctx,
-                void* dst, size_t dstCapacity,
-          const void* src, size_t srcSize,
-          const ZSTD_CDict* cdict,
-                ZSTD_CCtx_params params)
-{
-    ZSTD_CCtx_params const jobParams = ZSTDMT_initJobCCtxParams(params);
-    size_t const overlapSize = ZSTDMT_computeOverlapSize(params);
-    unsigned const nbJobs = ZSTDMT_computeNbJobs(params, srcSize, params.nbWorkers);
-    size_t const proposedJobSize = (srcSize + (nbJobs-1)) / nbJobs;
-    size_t const avgJobSize = (((proposedJobSize-1) & 0x1FFFF) < 0x7FFF) ? proposedJobSize + 0xFFFF : proposedJobSize;   /* avoid too small last block */
-    const char* const srcStart = (const char*)src;
-    size_t remainingSrcSize = srcSize;
-    unsigned const compressWithinDst = (dstCapacity >= ZSTD_compressBound(srcSize)) ? nbJobs : (unsigned)(dstCapacity / ZSTD_compressBound(avgJobSize));  /* presumes avgJobSize >= 256 KB, which should be the case */
-    size_t frameStartPos = 0, dstBufferPos = 0;
-    assert(jobParams.nbWorkers == 0);
-    assert(mtctx->cctxPool->totalCCtx == params.nbWorkers);
-
-    params.jobSize = (U32)avgJobSize;
-    DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: nbJobs=%2u (rawSize=%u bytes; fixedSize=%u) ",
-                nbJobs, (U32)proposedJobSize, (U32)avgJobSize);
-
-    if ((nbJobs==1) | (params.nbWorkers<=1)) {   /* fallback to single-thread mode : this is a blocking invocation anyway */
-        ZSTD_CCtx* const cctx = mtctx->cctxPool->cctx[0];
-        DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: fallback to single-thread mode");
-        if (cdict) return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, jobParams.fParams);
-        return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, NULL, 0, jobParams);
-    }
-
-    assert(avgJobSize >= 256 KB);  /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), required to compress directly into Dst (no additional buffer) */
-    ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(avgJobSize) );
-    if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, avgJobSize))
-        return ERROR(memory_allocation);
-
-    FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbJobs) );  /* only expands if necessary */
-
-    {   unsigned u;
-        for (u=0; u<nbJobs; u++) {
-            size_t const jobSize = MIN(remainingSrcSize, avgJobSize);
-            size_t const dstBufferCapacity = ZSTD_compressBound(jobSize);
-            buffer_t const dstAsBuffer = { (char*)dst + dstBufferPos, dstBufferCapacity };
-            buffer_t const dstBuffer = u < compressWithinDst ? dstAsBuffer : g_nullBuffer;
-            size_t dictSize = u ? overlapSize : 0;
-
-            mtctx->jobs[u].prefix.start = srcStart + frameStartPos - dictSize;
-            mtctx->jobs[u].prefix.size = dictSize;
-            mtctx->jobs[u].src.start = srcStart + frameStartPos;
-            mtctx->jobs[u].src.size = jobSize; assert(jobSize > 0);  /* avoid job.src.size == 0 */
-            mtctx->jobs[u].consumed = 0;
-            mtctx->jobs[u].cSize = 0;
-            mtctx->jobs[u].cdict = (u==0) ? cdict : NULL;
-            mtctx->jobs[u].fullFrameSize = srcSize;
-            mtctx->jobs[u].params = jobParams;
-            /* do not calculate checksum within sections, but write it in header for first section */
-            mtctx->jobs[u].dstBuff = dstBuffer;
-            mtctx->jobs[u].cctxPool = mtctx->cctxPool;
-            mtctx->jobs[u].bufPool = mtctx->bufPool;
-            mtctx->jobs[u].seqPool = mtctx->seqPool;
-            mtctx->jobs[u].serial = &mtctx->serial;
-            mtctx->jobs[u].jobID = u;
-            mtctx->jobs[u].firstJob = (u==0);
-            mtctx->jobs[u].lastJob = (u==nbJobs-1);
-
-            DEBUGLOG(5, "ZSTDMT_compress_advanced_internal: posting job %u  (%u bytes)", u, (U32)jobSize);
-            DEBUG_PRINTHEX(6, mtctx->jobs[u].prefix.start, 12);
-            POOL_add(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[u]);
-
-            frameStartPos += jobSize;
-            dstBufferPos += dstBufferCapacity;
-            remainingSrcSize -= jobSize;
-    }   }
-
-    /* collect result */
-    {   size_t error = 0, dstPos = 0;
-        unsigned jobID;
-        for (jobID=0; jobID<nbJobs; jobID++) {
-            DEBUGLOG(5, "waiting for job %u ", jobID);
-            ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex);
-            while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) {
-                DEBUGLOG(5, "waiting for jobCompleted signal from job %u", jobID);
-                ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex);
-            }
-            ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex);
-            DEBUGLOG(5, "ready to write job %u ", jobID);
-
-            {   size_t const cSize = mtctx->jobs[jobID].cSize;
-                if (ZSTD_isError(cSize)) error = cSize;
-                if ((!error) && (dstPos + cSize > dstCapacity)) error = ERROR(dstSize_tooSmall);
-                if (jobID) {   /* note : job 0 is written directly at dst, which is correct position */
-                    if (!error)
-                        memmove((char*)dst + dstPos, mtctx->jobs[jobID].dstBuff.start, cSize);  /* may overlap when job compressed within dst */
-                    if (jobID >= compressWithinDst) {  /* job compressed into its own buffer, which must be released */
-                        DEBUGLOG(5, "releasing buffer %u>=%u", jobID, compressWithinDst);
-                        ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
-                }   }
-                mtctx->jobs[jobID].dstBuff = g_nullBuffer;
-                mtctx->jobs[jobID].cSize = 0;
-                dstPos += cSize ;
-            }
-        }  /* for (jobID=0; jobID<nbJobs; jobID++) */
-
-        DEBUGLOG(4, "checksumFlag : %u ", params.fParams.checksumFlag);
-        if (params.fParams.checksumFlag) {
-            U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState);
-            if (dstPos + 4 > dstCapacity) {
-                error = ERROR(dstSize_tooSmall);
-            } else {
-                DEBUGLOG(4, "writing checksum : %08X \n", checksum);
-                MEM_writeLE32((char*)dst + dstPos, checksum);
-                dstPos += 4;
-        }   }
-
-        if (!error) DEBUGLOG(4, "compressed size : %u  ", (U32)dstPos);
-        return error ? error : dstPos;
-    }
-}
-
-size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
-                                void* dst, size_t dstCapacity,
-                          const void* src, size_t srcSize,
-                          const ZSTD_CDict* cdict,
-                                ZSTD_parameters params,
-                                int overlapLog)
-{
-    ZSTD_CCtx_params cctxParams = mtctx->params;
-    cctxParams.cParams = params.cParams;
-    cctxParams.fParams = params.fParams;
-    assert(ZSTD_OVERLAPLOG_MIN <= overlapLog && overlapLog <= ZSTD_OVERLAPLOG_MAX);
-    cctxParams.overlapLog = overlapLog;
-    return ZSTDMT_compress_advanced_internal(mtctx,
-                                             dst, dstCapacity,
-                                             src, srcSize,
-                                             cdict, cctxParams);
-}
-
-
-size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
-                           void* dst, size_t dstCapacity,
-                     const void* src, size_t srcSize,
-                           int compressionLevel)
-{
-    ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, 0);
-    int const overlapLog = ZSTDMT_overlapLog_default(params.cParams.strategy);
-    params.fParams.contentSizeFlag = 1;
-    return ZSTDMT_compress_advanced(mtctx, dst, dstCapacity, src, srcSize, NULL, params, overlapLog);
-}
-
-
-/* ====================================== */
-/* =======      Streaming API     ======= */
-/* ====================================== */
-
-size_t ZSTDMT_initCStream_internal(
-        ZSTDMT_CCtx* mtctx,
-        const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType,
-        const ZSTD_CDict* cdict, ZSTD_CCtx_params params,
-        unsigned long long pledgedSrcSize)
-{
-    DEBUGLOG(4, "ZSTDMT_initCStream_internal (pledgedSrcSize=%u, nbWorkers=%u, cctxPool=%u)",
-                (U32)pledgedSrcSize, params.nbWorkers, mtctx->cctxPool->totalCCtx);
-
-    /* params supposed partially fully validated at this point */
-    assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
-    assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
-
-    /* init */
-    if (params.nbWorkers != mtctx->params.nbWorkers)
-        FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, params.nbWorkers) );
-
-    if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN;
-    if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = (size_t)ZSTDMT_JOBSIZE_MAX;
-
-    mtctx->singleBlockingThread = (pledgedSrcSize <= ZSTDMT_JOBSIZE_MIN);  /* do not trigger multi-threading when srcSize is too small */
-    if (mtctx->singleBlockingThread) {
-        ZSTD_CCtx_params const singleThreadParams = ZSTDMT_initJobCCtxParams(params);
-        DEBUGLOG(5, "ZSTDMT_initCStream_internal: switch to single blocking thread mode");
-        assert(singleThreadParams.nbWorkers == 0);
-        return ZSTD_initCStream_internal(mtctx->cctxPool->cctx[0],
-                                         dict, dictSize, cdict,
-                                         singleThreadParams, pledgedSrcSize);
-    }
-
-    DEBUGLOG(4, "ZSTDMT_initCStream_internal: %u workers", params.nbWorkers);
-
-    if (mtctx->allJobsCompleted == 0) {   /* previous compression not correctly finished */
-        ZSTDMT_waitForAllJobsCompleted(mtctx);
-        ZSTDMT_releaseAllJobResources(mtctx);
-        mtctx->allJobsCompleted = 1;
-    }
-
-    mtctx->params = params;
-    mtctx->frameContentSize = pledgedSrcSize;
-    if (dict) {
-        ZSTD_freeCDict(mtctx->cdictLocal);
-        mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
-                                                    ZSTD_dlm_byCopy, dictContentType, /* note : a loadPrefix becomes an internal CDict */
-                                                    params.cParams, mtctx->cMem);
-        mtctx->cdict = mtctx->cdictLocal;
-        if (mtctx->cdictLocal == NULL) return ERROR(memory_allocation);
-    } else {
-        ZSTD_freeCDict(mtctx->cdictLocal);
-        mtctx->cdictLocal = NULL;
-        mtctx->cdict = cdict;
-    }
-
-    mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(params);
-    DEBUGLOG(4, "overlapLog=%i => %u KB", params.overlapLog, (U32)(mtctx->targetPrefixSize>>10));
-    mtctx->targetSectionSize = params.jobSize;
-    if (mtctx->targetSectionSize == 0) {
-        mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(params);
-    }
-    assert(mtctx->targetSectionSize <= (size_t)ZSTDMT_JOBSIZE_MAX);
-
-    if (params.rsyncable) {
-        /* Aim for the targetsectionSize as the average job size. */
-        U32 const jobSizeMB = (U32)(mtctx->targetSectionSize >> 20);
-        U32 const rsyncBits = ZSTD_highbit32(jobSizeMB) + 20;
-        assert(jobSizeMB >= 1);
-        DEBUGLOG(4, "rsyncLog = %u", rsyncBits);
-        mtctx->rsync.hash = 0;
-        mtctx->rsync.hitMask = (1ULL << rsyncBits) - 1;
-        mtctx->rsync.primePower = ZSTD_rollingHash_primePower(RSYNC_LENGTH);
-    }
-    if (mtctx->targetSectionSize < mtctx->targetPrefixSize) mtctx->targetSectionSize = mtctx->targetPrefixSize;  /* job size must be >= overlap size */
-    DEBUGLOG(4, "Job Size : %u KB (note : set to %u)", (U32)(mtctx->targetSectionSize>>10), (U32)params.jobSize);
-    DEBUGLOG(4, "inBuff Size : %u KB", (U32)(mtctx->targetSectionSize>>10));
-    ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize));
-    {
-        /* If ldm is enabled we need windowSize space. */
-        size_t const windowSize = mtctx->params.ldmParams.enableLdm ? (1U << mtctx->params.cParams.windowLog) : 0;
-        /* Two buffers of slack, plus extra space for the overlap
-         * This is the minimum slack that LDM works with. One extra because
-         * flush might waste up to targetSectionSize-1 bytes. Another extra
-         * for the overlap (if > 0), then one to fill which doesn't overlap
-         * with the LDM window.
-         */
-        size_t const nbSlackBuffers = 2 + (mtctx->targetPrefixSize > 0);
-        size_t const slackSize = mtctx->targetSectionSize * nbSlackBuffers;
-        /* Compute the total size, and always have enough slack */
-        size_t const nbWorkers = MAX(mtctx->params.nbWorkers, 1);
-        size_t const sectionsSize = mtctx->targetSectionSize * nbWorkers;
-        size_t const capacity = MAX(windowSize, sectionsSize) + slackSize;
-        if (mtctx->roundBuff.capacity < capacity) {
-            if (mtctx->roundBuff.buffer)
-                ZSTD_free(mtctx->roundBuff.buffer, mtctx->cMem);
-            mtctx->roundBuff.buffer = (BYTE*)ZSTD_malloc(capacity, mtctx->cMem);
-            if (mtctx->roundBuff.buffer == NULL) {
-                mtctx->roundBuff.capacity = 0;
-                return ERROR(memory_allocation);
-            }
-            mtctx->roundBuff.capacity = capacity;
-        }
-    }
-    DEBUGLOG(4, "roundBuff capacity : %u KB", (U32)(mtctx->roundBuff.capacity>>10));
-    mtctx->roundBuff.pos = 0;
-    mtctx->inBuff.buffer = g_nullBuffer;
-    mtctx->inBuff.filled = 0;
-    mtctx->inBuff.prefix = kNullRange;
-    mtctx->doneJobID = 0;
-    mtctx->nextJobID = 0;
-    mtctx->frameEnded = 0;
-    mtctx->allJobsCompleted = 0;
-    mtctx->consumed = 0;
-    mtctx->produced = 0;
-    if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, mtctx->targetSectionSize))
-        return ERROR(memory_allocation);
-    return 0;
-}
-
-size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
-                             const void* dict, size_t dictSize,
-                                   ZSTD_parameters params,
-                                   unsigned long long pledgedSrcSize)
-{
-    ZSTD_CCtx_params cctxParams = mtctx->params;  /* retrieve sticky params */
-    DEBUGLOG(4, "ZSTDMT_initCStream_advanced (pledgedSrcSize=%u)", (U32)pledgedSrcSize);
-    cctxParams.cParams = params.cParams;
-    cctxParams.fParams = params.fParams;
-    return ZSTDMT_initCStream_internal(mtctx, dict, dictSize, ZSTD_dct_auto, NULL,
-                                       cctxParams, pledgedSrcSize);
-}
-
-size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
-                               const ZSTD_CDict* cdict,
-                                     ZSTD_frameParameters fParams,
-                                     unsigned long long pledgedSrcSize)
-{
-    ZSTD_CCtx_params cctxParams = mtctx->params;
-    if (cdict==NULL) return ERROR(dictionary_wrong);   /* method incompatible with NULL cdict */
-    cctxParams.cParams = ZSTD_getCParamsFromCDict(cdict);
-    cctxParams.fParams = fParams;
-    return ZSTDMT_initCStream_internal(mtctx, NULL, 0 /*dictSize*/, ZSTD_dct_auto, cdict,
-                                       cctxParams, pledgedSrcSize);
-}
-
-
-/* ZSTDMT_resetCStream() :
- * pledgedSrcSize can be zero == unknown (for the time being)
- * prefer using ZSTD_CONTENTSIZE_UNKNOWN,
- * as `0` might mean "empty" in the future */
-size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize)
-{
-    if (!pledgedSrcSize) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
-    return ZSTDMT_initCStream_internal(mtctx, NULL, 0, ZSTD_dct_auto, 0, mtctx->params,
-                                       pledgedSrcSize);
-}
-
-size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel) {
-    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0);
-    ZSTD_CCtx_params cctxParams = mtctx->params;   /* retrieve sticky params */
-    DEBUGLOG(4, "ZSTDMT_initCStream (cLevel=%i)", compressionLevel);
-    cctxParams.cParams = params.cParams;
-    cctxParams.fParams = params.fParams;
-    return ZSTDMT_initCStream_internal(mtctx, NULL, 0, ZSTD_dct_auto, NULL, cctxParams, ZSTD_CONTENTSIZE_UNKNOWN);
-}
-
-
-/* ZSTDMT_writeLastEmptyBlock()
- * Write a single empty block with an end-of-frame to finish a frame.
- * Job must be created from streaming variant.
- * This function is always successful if expected conditions are fulfilled.
- */
-static void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job)
-{
-    assert(job->lastJob == 1);
-    assert(job->src.size == 0);   /* last job is empty -> will be simplified into a last empty block */
-    assert(job->firstJob == 0);   /* cannot be first job, as it also needs to create frame header */
-    assert(job->dstBuff.start == NULL);   /* invoked from streaming variant only (otherwise, dstBuff might be user's output) */
-    job->dstBuff = ZSTDMT_getBuffer(job->bufPool);
-    if (job->dstBuff.start == NULL) {
-      job->cSize = ERROR(memory_allocation);
-      return;
-    }
-    assert(job->dstBuff.capacity >= ZSTD_blockHeaderSize);   /* no buffer should ever be that small */
-    job->src = kNullRange;
-    job->cSize = ZSTD_writeLastEmptyBlock(job->dstBuff.start, job->dstBuff.capacity);
-    assert(!ZSTD_isError(job->cSize));
-    assert(job->consumed == 0);
-}
-
-static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* mtctx, size_t srcSize, ZSTD_EndDirective endOp)
-{
-    unsigned const jobID = mtctx->nextJobID & mtctx->jobIDMask;
-    int const endFrame = (endOp == ZSTD_e_end);
-
-    if (mtctx->nextJobID > mtctx->doneJobID + mtctx->jobIDMask) {
-        DEBUGLOG(5, "ZSTDMT_createCompressionJob: will not create new job : table is full");
-        assert((mtctx->nextJobID & mtctx->jobIDMask) == (mtctx->doneJobID & mtctx->jobIDMask));
-        return 0;
-    }
-
-    if (!mtctx->jobReady) {
-        BYTE const* src = (BYTE const*)mtctx->inBuff.buffer.start;
-        DEBUGLOG(5, "ZSTDMT_createCompressionJob: preparing job %u to compress %u bytes with %u preload ",
-                    mtctx->nextJobID, (U32)srcSize, (U32)mtctx->inBuff.prefix.size);
-        mtctx->jobs[jobID].src.start = src;
-        mtctx->jobs[jobID].src.size = srcSize;
-        assert(mtctx->inBuff.filled >= srcSize);
-        mtctx->jobs[jobID].prefix = mtctx->inBuff.prefix;
-        mtctx->jobs[jobID].consumed = 0;
-        mtctx->jobs[jobID].cSize = 0;
-        mtctx->jobs[jobID].params = mtctx->params;
-        mtctx->jobs[jobID].cdict = mtctx->nextJobID==0 ? mtctx->cdict : NULL;
-        mtctx->jobs[jobID].fullFrameSize = mtctx->frameContentSize;
-        mtctx->jobs[jobID].dstBuff = g_nullBuffer;
-        mtctx->jobs[jobID].cctxPool = mtctx->cctxPool;
-        mtctx->jobs[jobID].bufPool = mtctx->bufPool;
-        mtctx->jobs[jobID].seqPool = mtctx->seqPool;
-        mtctx->jobs[jobID].serial = &mtctx->serial;
-        mtctx->jobs[jobID].jobID = mtctx->nextJobID;
-        mtctx->jobs[jobID].firstJob = (mtctx->nextJobID==0);
-        mtctx->jobs[jobID].lastJob = endFrame;
-        mtctx->jobs[jobID].frameChecksumNeeded = mtctx->params.fParams.checksumFlag && endFrame && (mtctx->nextJobID>0);
-        mtctx->jobs[jobID].dstFlushed = 0;
-
-        /* Update the round buffer pos and clear the input buffer to be reset */
-        mtctx->roundBuff.pos += srcSize;
-        mtctx->inBuff.buffer = g_nullBuffer;
-        mtctx->inBuff.filled = 0;
-        /* Set the prefix */
-        if (!endFrame) {
-            size_t const newPrefixSize = MIN(srcSize, mtctx->targetPrefixSize);
-            mtctx->inBuff.prefix.start = src + srcSize - newPrefixSize;
-            mtctx->inBuff.prefix.size = newPrefixSize;
-        } else {   /* endFrame==1 => no need for another input buffer */
-            mtctx->inBuff.prefix = kNullRange;
-            mtctx->frameEnded = endFrame;
-            if (mtctx->nextJobID == 0) {
-                /* single job exception : checksum is already calculated directly within worker thread */
-                mtctx->params.fParams.checksumFlag = 0;
-        }   }
-
-        if ( (srcSize == 0)
-          && (mtctx->nextJobID>0)/*single job must also write frame header*/ ) {
-            DEBUGLOG(5, "ZSTDMT_createCompressionJob: creating a last empty block to end frame");
-            assert(endOp == ZSTD_e_end);  /* only possible case : need to end the frame with an empty last block */
-            ZSTDMT_writeLastEmptyBlock(mtctx->jobs + jobID);
-            mtctx->nextJobID++;
-            return 0;
-        }
-    }
-
-    DEBUGLOG(5, "ZSTDMT_createCompressionJob: posting job %u : %u bytes  (end:%u, jobNb == %u (mod:%u))",
-                mtctx->nextJobID,
-                (U32)mtctx->jobs[jobID].src.size,
-                mtctx->jobs[jobID].lastJob,
-                mtctx->nextJobID,
-                jobID);
-    if (POOL_tryAdd(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[jobID])) {
-        mtctx->nextJobID++;
-        mtctx->jobReady = 0;
-    } else {
-        DEBUGLOG(5, "ZSTDMT_createCompressionJob: no worker available for job %u", mtctx->nextJobID);
-        mtctx->jobReady = 1;
-    }
-    return 0;
-}
-
-
-/*! ZSTDMT_flushProduced() :
- *  flush whatever data has been produced but not yet flushed in current job.
- *  move to next job if current one is fully flushed.
- * `output` : `pos` will be updated with amount of data flushed .
- * `blockToFlush` : if >0, the function will block and wait if there is no data available to flush .
- * @return : amount of data remaining within internal buffer, 0 if no more, 1 if unknown but > 0, or an error code */
-static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, unsigned blockToFlush, ZSTD_EndDirective end)
-{
-    unsigned const wJobID = mtctx->doneJobID & mtctx->jobIDMask;
-    DEBUGLOG(5, "ZSTDMT_flushProduced (blocking:%u , job %u <= %u)",
-                blockToFlush, mtctx->doneJobID, mtctx->nextJobID);
-    assert(output->size >= output->pos);
-
-    ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex);
-    if (  blockToFlush
-      && (mtctx->doneJobID < mtctx->nextJobID) ) {
-        assert(mtctx->jobs[wJobID].dstFlushed <= mtctx->jobs[wJobID].cSize);
-        while (mtctx->jobs[wJobID].dstFlushed == mtctx->jobs[wJobID].cSize) {  /* nothing to flush */
-            if (mtctx->jobs[wJobID].consumed == mtctx->jobs[wJobID].src.size) {
-                DEBUGLOG(5, "job %u is completely consumed (%u == %u) => don't wait for cond, there will be none",
-                            mtctx->doneJobID, (U32)mtctx->jobs[wJobID].consumed, (U32)mtctx->jobs[wJobID].src.size);
-                break;
-            }
-            DEBUGLOG(5, "waiting for something to flush from job %u (currently flushed: %u bytes)",
-                        mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);
-            ZSTD_pthread_cond_wait(&mtctx->jobs[wJobID].job_cond, &mtctx->jobs[wJobID].job_mutex);  /* block when nothing to flush but some to come */
-    }   }
-
-    /* try to flush something */
-    {   size_t cSize = mtctx->jobs[wJobID].cSize;                  /* shared */
-        size_t const srcConsumed = mtctx->jobs[wJobID].consumed;   /* shared */
-        size_t const srcSize = mtctx->jobs[wJobID].src.size;       /* read-only, could be done after mutex lock, but no-declaration-after-statement */
-        ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
-        if (ZSTD_isError(cSize)) {
-            DEBUGLOG(5, "ZSTDMT_flushProduced: job %u : compression error detected : %s",
-                        mtctx->doneJobID, ZSTD_getErrorName(cSize));
-            ZSTDMT_waitForAllJobsCompleted(mtctx);
-            ZSTDMT_releaseAllJobResources(mtctx);
-            return cSize;
-        }
-        /* add frame checksum if necessary (can only happen once) */
-        assert(srcConsumed <= srcSize);
-        if ( (srcConsumed == srcSize)   /* job completed -> worker no longer active */
-          && mtctx->jobs[wJobID].frameChecksumNeeded ) {
-            U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState);
-            DEBUGLOG(4, "ZSTDMT_flushProduced: writing checksum : %08X \n", checksum);
-            MEM_writeLE32((char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].cSize, checksum);
-            cSize += 4;
-            mtctx->jobs[wJobID].cSize += 4;  /* can write this shared value, as worker is no longer active */
-            mtctx->jobs[wJobID].frameChecksumNeeded = 0;
-        }
-
-        if (cSize > 0) {   /* compression is ongoing or completed */
-            size_t const toFlush = MIN(cSize - mtctx->jobs[wJobID].dstFlushed, output->size - output->pos);
-            DEBUGLOG(5, "ZSTDMT_flushProduced: Flushing %u bytes from job %u (completion:%u/%u, generated:%u)",
-                        (U32)toFlush, mtctx->doneJobID, (U32)srcConsumed, (U32)srcSize, (U32)cSize);
-            assert(mtctx->doneJobID < mtctx->nextJobID);
-            assert(cSize >= mtctx->jobs[wJobID].dstFlushed);
-            assert(mtctx->jobs[wJobID].dstBuff.start != NULL);
-            memcpy((char*)output->dst + output->pos,
-                   (const char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].dstFlushed,
-                   toFlush);
-            output->pos += toFlush;
-            mtctx->jobs[wJobID].dstFlushed += toFlush;  /* can write : this value is only used by mtctx */
-
-            if ( (srcConsumed == srcSize)    /* job is completed */
-              && (mtctx->jobs[wJobID].dstFlushed == cSize) ) {   /* output buffer fully flushed => free this job position */
-                DEBUGLOG(5, "Job %u completed (%u bytes), moving to next one",
-                        mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);
-                ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[wJobID].dstBuff);
-                DEBUGLOG(5, "dstBuffer released");
-                mtctx->jobs[wJobID].dstBuff = g_nullBuffer;
-                mtctx->jobs[wJobID].cSize = 0;   /* ensure this job slot is considered "not started" in future check */
-                mtctx->consumed += srcSize;
-                mtctx->produced += cSize;
-                mtctx->doneJobID++;
-        }   }
-
-        /* return value : how many bytes left in buffer ; fake it to 1 when unknown but >0 */
-        if (cSize > mtctx->jobs[wJobID].dstFlushed) return (cSize - mtctx->jobs[wJobID].dstFlushed);
-        if (srcSize > srcConsumed) return 1;   /* current job not completely compressed */
-    }
-    if (mtctx->doneJobID < mtctx->nextJobID) return 1;   /* some more jobs ongoing */
-    if (mtctx->jobReady) return 1;      /* one job is ready to push, just not yet in the list */
-    if (mtctx->inBuff.filled > 0) return 1;   /* input is not empty, and still needs to be converted into a job */
-    mtctx->allJobsCompleted = mtctx->frameEnded;   /* all jobs are entirely flushed => if this one is last one, frame is completed */
-    if (end == ZSTD_e_end) return !mtctx->frameEnded;  /* for ZSTD_e_end, question becomes : is frame completed ? instead of : are internal buffers fully flushed ? */
-    return 0;   /* internal buffers fully flushed */
-}
-
-/**
- * Returns the range of data used by the earliest job that is not yet complete.
- * If the data of the first job is broken up into two segments, we cover both
- * sections.
- */
-static range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx)
-{
-    unsigned const firstJobID = mtctx->doneJobID;
-    unsigned const lastJobID = mtctx->nextJobID;
-    unsigned jobID;
-
-    for (jobID = firstJobID; jobID < lastJobID; ++jobID) {
-        unsigned const wJobID = jobID & mtctx->jobIDMask;
-        size_t consumed;
-
-        ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex);
-        consumed = mtctx->jobs[wJobID].consumed;
-        ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
-
-        if (consumed < mtctx->jobs[wJobID].src.size) {
-            range_t range = mtctx->jobs[wJobID].prefix;
-            if (range.size == 0) {
-                /* Empty prefix */
-                range = mtctx->jobs[wJobID].src;
-            }
-            /* Job source in multiple segments not supported yet */
-            assert(range.start <= mtctx->jobs[wJobID].src.start);
-            return range;
-        }
-    }
-    return kNullRange;
-}
-
-/**
- * Returns non-zero iff buffer and range overlap.
- */
-static int ZSTDMT_isOverlapped(buffer_t buffer, range_t range)
-{
-    BYTE const* const bufferStart = (BYTE const*)buffer.start;
-    BYTE const* const bufferEnd = bufferStart + buffer.capacity;
-    BYTE const* const rangeStart = (BYTE const*)range.start;
-    BYTE const* const rangeEnd = rangeStart + range.size;
-
-    if (rangeStart == NULL || bufferStart == NULL)
-        return 0;
-    /* Empty ranges cannot overlap */
-    if (bufferStart == bufferEnd || rangeStart == rangeEnd)
-        return 0;
-
-    return bufferStart < rangeEnd && rangeStart < bufferEnd;
-}
-
-static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window)
-{
-    range_t extDict;
-    range_t prefix;
-
-    DEBUGLOG(5, "ZSTDMT_doesOverlapWindow");
-    extDict.start = window.dictBase + window.lowLimit;
-    extDict.size = window.dictLimit - window.lowLimit;
-
-    prefix.start = window.base + window.dictLimit;
-    prefix.size = window.nextSrc - (window.base + window.dictLimit);
-    DEBUGLOG(5, "extDict [0x%zx, 0x%zx)",
-                (size_t)extDict.start,
-                (size_t)extDict.start + extDict.size);
-    DEBUGLOG(5, "prefix  [0x%zx, 0x%zx)",
-                (size_t)prefix.start,
-                (size_t)prefix.start + prefix.size);
-
-    return ZSTDMT_isOverlapped(buffer, extDict)
-        || ZSTDMT_isOverlapped(buffer, prefix);
-}
-
-static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer)
-{
-    if (mtctx->params.ldmParams.enableLdm) {
-        ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex;
-        DEBUGLOG(5, "ZSTDMT_waitForLdmComplete");
-        DEBUGLOG(5, "source  [0x%zx, 0x%zx)",
-                    (size_t)buffer.start,
-                    (size_t)buffer.start + buffer.capacity);
-        ZSTD_PTHREAD_MUTEX_LOCK(mutex);
-        while (ZSTDMT_doesOverlapWindow(buffer, mtctx->serial.ldmWindow)) {
-            DEBUGLOG(5, "Waiting for LDM to finish...");
-            ZSTD_pthread_cond_wait(&mtctx->serial.ldmWindowCond, mutex);
-        }
-        DEBUGLOG(6, "Done waiting for LDM to finish");
-        ZSTD_pthread_mutex_unlock(mutex);
-    }
-}
-
-/**
- * Attempts to set the inBuff to the next section to fill.
- * If any part of the new section is still in use we give up.
- * Returns non-zero if the buffer is filled.
- */
-static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx)
-{
-    range_t const inUse = ZSTDMT_getInputDataInUse(mtctx);
-    size_t const spaceLeft = mtctx->roundBuff.capacity - mtctx->roundBuff.pos;
-    size_t const target = mtctx->targetSectionSize;
-    buffer_t buffer;
-
-    DEBUGLOG(5, "ZSTDMT_tryGetInputRange");
-    assert(mtctx->inBuff.buffer.start == NULL);
-    assert(mtctx->roundBuff.capacity >= target);
-
-    if (spaceLeft < target) {
-        /* ZSTD_invalidateRepCodes() doesn't work for extDict variants.
-         * Simply copy the prefix to the beginning in that case.
-         */
-        BYTE* const start = (BYTE*)mtctx->roundBuff.buffer;
-        size_t const prefixSize = mtctx->inBuff.prefix.size;
-
-        buffer.start = start;
-        buffer.capacity = prefixSize;
-        if (ZSTDMT_isOverlapped(buffer, inUse)) {
-            DEBUGLOG(5, "Waiting for buffer...");
-            return 0;
-        }
-        ZSTDMT_waitForLdmComplete(mtctx, buffer);
-        memmove(start, mtctx->inBuff.prefix.start, prefixSize);
-        mtctx->inBuff.prefix.start = start;
-        mtctx->roundBuff.pos = prefixSize;
-    }
-    buffer.start = mtctx->roundBuff.buffer + mtctx->roundBuff.pos;
-    buffer.capacity = target;
-
-    if (ZSTDMT_isOverlapped(buffer, inUse)) {
-        DEBUGLOG(5, "Waiting for buffer...");
-        return 0;
-    }
-    assert(!ZSTDMT_isOverlapped(buffer, mtctx->inBuff.prefix));
-
-    ZSTDMT_waitForLdmComplete(mtctx, buffer);
-
-    DEBUGLOG(5, "Using prefix range [%zx, %zx)",
-                (size_t)mtctx->inBuff.prefix.start,
-                (size_t)mtctx->inBuff.prefix.start + mtctx->inBuff.prefix.size);
-    DEBUGLOG(5, "Using source range [%zx, %zx)",
-                (size_t)buffer.start,
-                (size_t)buffer.start + buffer.capacity);
-
-
-    mtctx->inBuff.buffer = buffer;
-    mtctx->inBuff.filled = 0;
-    assert(mtctx->roundBuff.pos + buffer.capacity <= mtctx->roundBuff.capacity);
-    return 1;
-}
-
-typedef struct {
-  size_t toLoad;  /* The number of bytes to load from the input. */
-  int flush;      /* Boolean declaring if we must flush because we found a synchronization point. */
-} syncPoint_t;
-
-/**
- * Searches through the input for a synchronization point. If one is found, we
- * will instruct the caller to flush, and return the number of bytes to load.
- * Otherwise, we will load as many bytes as possible and instruct the caller
- * to continue as normal.
- */
-static syncPoint_t
-findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input)
-{
-    BYTE const* const istart = (BYTE const*)input.src + input.pos;
-    U64 const primePower = mtctx->rsync.primePower;
-    U64 const hitMask = mtctx->rsync.hitMask;
-
-    syncPoint_t syncPoint;
-    U64 hash;
-    BYTE const* prev;
-    size_t pos;
-
-    syncPoint.toLoad = MIN(input.size - input.pos, mtctx->targetSectionSize - mtctx->inBuff.filled);
-    syncPoint.flush = 0;
-    if (!mtctx->params.rsyncable)
-        /* Rsync is disabled. */
-        return syncPoint;
-    if (mtctx->inBuff.filled + syncPoint.toLoad < RSYNC_LENGTH)
-        /* Not enough to compute the hash.
-         * We will miss any synchronization points in this RSYNC_LENGTH byte
-         * window. However, since it depends only in the internal buffers, if the
-         * state is already synchronized, we will remain synchronized.
-         * Additionally, the probability that we miss a synchronization point is
-         * low: RSYNC_LENGTH / targetSectionSize.
-         */
-        return syncPoint;
-    /* Initialize the loop variables. */
-    if (mtctx->inBuff.filled >= RSYNC_LENGTH) {
-        /* We have enough bytes buffered to initialize the hash.
-         * Start scanning at the beginning of the input.
-         */
-        pos = 0;
-        prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH;
-        hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH);
-    } else {
-        /* We don't have enough bytes buffered to initialize the hash, but
-         * we know we have at least RSYNC_LENGTH bytes total.
-         * Start scanning after the first RSYNC_LENGTH bytes less the bytes
-         * already buffered.
-         */
-        pos = RSYNC_LENGTH - mtctx->inBuff.filled;
-        prev = (BYTE const*)mtctx->inBuff.buffer.start - pos;
-        hash = ZSTD_rollingHash_compute(mtctx->inBuff.buffer.start, mtctx->inBuff.filled);
-        hash = ZSTD_rollingHash_append(hash, istart, pos);
-    }
-    /* Starting with the hash of the previous RSYNC_LENGTH bytes, roll
-     * through the input. If we hit a synchronization point, then cut the
-     * job off, and tell the compressor to flush the job. Otherwise, load
-     * all the bytes and continue as normal.
-     * If we go too long without a synchronization point (targetSectionSize)
-     * then a block will be emitted anyways, but this is okay, since if we
-     * are already synchronized we will remain synchronized.
-     */
-    for (; pos < syncPoint.toLoad; ++pos) {
-        BYTE const toRemove = pos < RSYNC_LENGTH ? prev[pos] : istart[pos - RSYNC_LENGTH];
-        /* if (pos >= RSYNC_LENGTH) assert(ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); */
-        hash = ZSTD_rollingHash_rotate(hash, toRemove, istart[pos], primePower);
-        if ((hash & hitMask) == hitMask) {
-            syncPoint.toLoad = pos + 1;
-            syncPoint.flush = 1;
-            break;
-        }
-    }
-    return syncPoint;
-}
-
-size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx)
-{
-    size_t hintInSize = mtctx->targetSectionSize - mtctx->inBuff.filled;
-    if (hintInSize==0) hintInSize = mtctx->targetSectionSize;
-    return hintInSize;
-}
-
-/** ZSTDMT_compressStream_generic() :
- *  internal use only - exposed to be invoked from zstd_compress.c
- *  assumption : output and input are valid (pos <= size)
- * @return : minimum amount of data remaining to flush, 0 if none */
-size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
-                                     ZSTD_outBuffer* output,
-                                     ZSTD_inBuffer* input,
-                                     ZSTD_EndDirective endOp)
-{
-    unsigned forwardInputProgress = 0;
-    DEBUGLOG(5, "ZSTDMT_compressStream_generic (endOp=%u, srcSize=%u)",
-                (U32)endOp, (U32)(input->size - input->pos));
-    assert(output->pos <= output->size);
-    assert(input->pos  <= input->size);
-
-    if (mtctx->singleBlockingThread) {  /* delegate to single-thread (synchronous) */
-        return ZSTD_compressStream2(mtctx->cctxPool->cctx[0], output, input, endOp);
-    }
-
-    if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) {
-        /* current frame being ended. Only flush/end are allowed */
-        return ERROR(stage_wrong);
-    }
-
-    /* single-pass shortcut (note : synchronous-mode) */
-    if ( (!mtctx->params.rsyncable)   /* rsyncable mode is disabled */
-      && (mtctx->nextJobID == 0)      /* just started */
-      && (mtctx->inBuff.filled == 0)  /* nothing buffered */
-      && (!mtctx->jobReady)           /* no job already created */
-      && (endOp == ZSTD_e_end)        /* end order */
-      && (output->size - output->pos >= ZSTD_compressBound(input->size - input->pos)) ) { /* enough space in dst */
-        size_t const cSize = ZSTDMT_compress_advanced_internal(mtctx,
-                (char*)output->dst + output->pos, output->size - output->pos,
-                (const char*)input->src + input->pos, input->size - input->pos,
-                mtctx->cdict, mtctx->params);
-        if (ZSTD_isError(cSize)) return cSize;
-        input->pos = input->size;
-        output->pos += cSize;
-        mtctx->allJobsCompleted = 1;
-        mtctx->frameEnded = 1;
-        return 0;
-    }
-
-    /* fill input buffer */
-    if ( (!mtctx->jobReady)
-      && (input->size > input->pos) ) {   /* support NULL input */
-        if (mtctx->inBuff.buffer.start == NULL) {
-            assert(mtctx->inBuff.filled == 0); /* Can't fill an empty buffer */
-            if (!ZSTDMT_tryGetInputRange(mtctx)) {
-                /* It is only possible for this operation to fail if there are
-                 * still compression jobs ongoing.
-                 */
-                DEBUGLOG(5, "ZSTDMT_tryGetInputRange failed");
-                assert(mtctx->doneJobID != mtctx->nextJobID);
-            } else
-                DEBUGLOG(5, "ZSTDMT_tryGetInputRange completed successfully : mtctx->inBuff.buffer.start = %p", mtctx->inBuff.buffer.start);
-        }
-        if (mtctx->inBuff.buffer.start != NULL) {
-            syncPoint_t const syncPoint = findSynchronizationPoint(mtctx, *input);
-            if (syncPoint.flush && endOp == ZSTD_e_continue) {
-                endOp = ZSTD_e_flush;
-            }
-            assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize);
-            DEBUGLOG(5, "ZSTDMT_compressStream_generic: adding %u bytes on top of %u to buffer of size %u",
-                        (U32)syncPoint.toLoad, (U32)mtctx->inBuff.filled, (U32)mtctx->targetSectionSize);
-            memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, syncPoint.toLoad);
-            input->pos += syncPoint.toLoad;
-            mtctx->inBuff.filled += syncPoint.toLoad;
-            forwardInputProgress = syncPoint.toLoad>0;
-        }
-        if ((input->pos < input->size) && (endOp == ZSTD_e_end))
-            endOp = ZSTD_e_flush;   /* can't end now : not all input consumed */
-    }
-
-    if ( (mtctx->jobReady)
-      || (mtctx->inBuff.filled >= mtctx->targetSectionSize)  /* filled enough : let's compress */
-      || ((endOp != ZSTD_e_continue) && (mtctx->inBuff.filled > 0))  /* something to flush : let's go */
-      || ((endOp == ZSTD_e_end) && (!mtctx->frameEnded)) ) {   /* must finish the frame with a zero-size block */
-        size_t const jobSize = mtctx->inBuff.filled;
-        assert(mtctx->inBuff.filled <= mtctx->targetSectionSize);
-        FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) );
-    }
-
-    /* check for potential compressed data ready to be flushed */
-    {   size_t const remainingToFlush = ZSTDMT_flushProduced(mtctx, output, !forwardInputProgress, endOp); /* block if there was no forward input progress */
-        if (input->pos < input->size) return MAX(remainingToFlush, 1);  /* input not consumed : do not end flush yet */
-        DEBUGLOG(5, "end of ZSTDMT_compressStream_generic: remainingToFlush = %u", (U32)remainingToFlush);
-        return remainingToFlush;
-    }
-}
-
-
-size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
-{
-    FORWARD_IF_ERROR( ZSTDMT_compressStream_generic(mtctx, output, input, ZSTD_e_continue) );
-
-    /* recommended next input size : fill current input buffer */
-    return mtctx->targetSectionSize - mtctx->inBuff.filled;   /* note : could be zero when input buffer is fully filled and no more availability to create new job */
-}
-
-
-static size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_EndDirective endFrame)
-{
-    size_t const srcSize = mtctx->inBuff.filled;
-    DEBUGLOG(5, "ZSTDMT_flushStream_internal");
-
-    if ( mtctx->jobReady     /* one job ready for a worker to pick up */
-      || (srcSize > 0)       /* still some data within input buffer */
-      || ((endFrame==ZSTD_e_end) && !mtctx->frameEnded)) {  /* need a last 0-size block to end frame */
-           DEBUGLOG(5, "ZSTDMT_flushStream_internal : create a new job (%u bytes, end:%u)",
-                        (U32)srcSize, (U32)endFrame);
-        FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, srcSize, endFrame) );
-    }
-
-    /* check if there is any data available to flush */
-    return ZSTDMT_flushProduced(mtctx, output, 1 /* blockToFlush */, endFrame);
-}
-
-
-size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output)
-{
-    DEBUGLOG(5, "ZSTDMT_flushStream");
-    if (mtctx->singleBlockingThread)
-        return ZSTD_flushStream(mtctx->cctxPool->cctx[0], output);
-    return ZSTDMT_flushStream_internal(mtctx, output, ZSTD_e_flush);
-}
-
-size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output)
-{
-    DEBUGLOG(4, "ZSTDMT_endStream");
-    if (mtctx->singleBlockingThread)
-        return ZSTD_endStream(mtctx->cctxPool->cctx[0], output);
-    return ZSTDMT_flushStream_internal(mtctx, output, ZSTD_e_end);
-}
diff --git a/vendor/github.com/DataDog/zstd/zstdmt_compress.h b/vendor/github.com/DataDog/zstd/zstdmt_compress.h
deleted file mode 100644
index 12a5260..0000000
--- a/vendor/github.com/DataDog/zstd/zstdmt_compress.h
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
- #ifndef ZSTDMT_COMPRESS_H
- #define ZSTDMT_COMPRESS_H
-
- #if defined (__cplusplus)
- extern "C" {
- #endif
-
-
-/* Note : This is an internal API.
- *        These APIs used to be exposed with ZSTDLIB_API,
- *        because it used to be the only way to invoke MT compression.
- *        Now, it's recommended to use ZSTD_compress2 and ZSTD_compressStream2()
- *        instead.
- *
- *        If you depend on these APIs and can't switch, then define
- *        ZSTD_LEGACY_MULTITHREADED_API when making the dynamic library.
- *        However, we may completely remove these functions in a future
- *        release, so please switch soon.
- *
- *        This API requires ZSTD_MULTITHREAD to be defined during compilation,
- *        otherwise ZSTDMT_createCCtx*() will fail.
- */
-
-#ifdef ZSTD_LEGACY_MULTITHREADED_API
-#  define ZSTDMT_API ZSTDLIB_API
-#else
-#  define ZSTDMT_API
-#endif
-
-/* ===   Dependencies   === */
-#include <stddef.h>                /* size_t */
-#define ZSTD_STATIC_LINKING_ONLY   /* ZSTD_parameters */
-#include "zstd.h"            /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */
-
-
-/* ===   Constants   === */
-#ifndef ZSTDMT_NBWORKERS_MAX
-#  define ZSTDMT_NBWORKERS_MAX 200
-#endif
-#ifndef ZSTDMT_JOBSIZE_MIN
-#  define ZSTDMT_JOBSIZE_MIN (1 MB)
-#endif
-#define ZSTDMT_JOBLOG_MAX   (MEM_32bits() ? 29 : 30)
-#define ZSTDMT_JOBSIZE_MAX  (MEM_32bits() ? (512 MB) : (1024 MB))
-
-
-/* ===   Memory management   === */
-typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx;
-/* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */
-ZSTDMT_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers);
-/* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */
-ZSTDMT_API ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers,
-                                                    ZSTD_customMem cMem);
-ZSTDMT_API size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx);
-
-ZSTDMT_API size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx);
-
-
-/* ===   Simple one-pass compression function   === */
-
-ZSTDMT_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
-                                       void* dst, size_t dstCapacity,
-                                 const void* src, size_t srcSize,
-                                       int compressionLevel);
-
-
-
-/* ===   Streaming functions   === */
-
-ZSTDMT_API size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel);
-ZSTDMT_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize);  /**< if srcSize is not known at reset time, use ZSTD_CONTENTSIZE_UNKNOWN. Note: for compatibility with older programs, 0 means the same as ZSTD_CONTENTSIZE_UNKNOWN, but it will change in the future to mean "empty" */
-
-ZSTDMT_API size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx);
-ZSTDMT_API size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
-
-ZSTDMT_API size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output);   /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
-ZSTDMT_API size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output);     /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
-
-
-/* ===   Advanced functions and parameters  === */
-
-ZSTDMT_API size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
-                                          void* dst, size_t dstCapacity,
-                                    const void* src, size_t srcSize,
-                                    const ZSTD_CDict* cdict,
-                                          ZSTD_parameters params,
-                                          int overlapLog);
-
-ZSTDMT_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
-                                        const void* dict, size_t dictSize,   /* dict can be released after init, a local copy is preserved within zcs */
-                                        ZSTD_parameters params,
-                                        unsigned long long pledgedSrcSize);  /* pledgedSrcSize is optional and can be zero == unknown */
-
-ZSTDMT_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
-                                        const ZSTD_CDict* cdict,
-                                        ZSTD_frameParameters fparams,
-                                        unsigned long long pledgedSrcSize);  /* note : zero means empty */
-
-/* ZSTDMT_parameter :
- * List of parameters that can be set using ZSTDMT_setMTCtxParameter() */
-typedef enum {
-    ZSTDMT_p_jobSize,     /* Each job is compressed in parallel. By default, this value is dynamically determined depending on compression parameters. Can be set explicitly here. */
-    ZSTDMT_p_overlapLog,  /* Each job may reload a part of previous job to enhance compression ratio; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window. This is a "sticky" parameter : its value will be re-used on next compression job */
-    ZSTDMT_p_rsyncable    /* Enables rsyncable mode. */
-} ZSTDMT_parameter;
-
-/* ZSTDMT_setMTCtxParameter() :
- * allow setting individual parameters, one at a time, among a list of enums defined in ZSTDMT_parameter.
- * The function must be called typically after ZSTD_createCCtx() but __before ZSTDMT_init*() !__
- * Parameters not explicitly reset by ZSTDMT_init*() remain the same in consecutive compression sessions.
- * @return : 0, or an error code (which can be tested using ZSTD_isError()) */
-ZSTDMT_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value);
-
-/* ZSTDMT_getMTCtxParameter() :
- * Query the ZSTDMT_CCtx for a parameter value.
- * @return : 0, or an error code (which can be tested using ZSTD_isError()) */
-ZSTDMT_API size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value);
-
-
-/*! ZSTDMT_compressStream_generic() :
- *  Combines ZSTDMT_compressStream() with optional ZSTDMT_flushStream() or ZSTDMT_endStream()
- *  depending on flush directive.
- * @return : minimum amount of data still to be flushed
- *           0 if fully flushed
- *           or an error code
- *  note : needs to be init using any ZSTD_initCStream*() variant */
-ZSTDMT_API size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
-                                                ZSTD_outBuffer* output,
-                                                ZSTD_inBuffer* input,
-                                                ZSTD_EndDirective endOp);
-
-
-/* ========================================================
- * ===  Private interface, for use by ZSTD_compress.c   ===
- * ===  Not exposed in libzstd. Never invoke directly   ===
- * ======================================================== */
-
- /*! ZSTDMT_toFlushNow()
-  *  Tell how many bytes are ready to be flushed immediately.
-  *  Probe the oldest active job (not yet entirely flushed) and check its output buffer.
-  *  If return 0, it means there is no active job,
-  *  or, it means oldest job is still active, but everything produced has been flushed so far,
-  *  therefore flushing is limited by speed of oldest job. */
-size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx);
-
-/*! ZSTDMT_CCtxParam_setMTCtxParameter()
- *  like ZSTDMT_setMTCtxParameter(), but into a ZSTD_CCtx_Params */
-size_t ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params, ZSTDMT_parameter parameter, int value);
-
-/*! ZSTDMT_CCtxParam_setNbWorkers()
- *  Set nbWorkers, and clamp it.
- *  Also reset jobSize and overlapLog */
-size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers);
-
-/*! ZSTDMT_updateCParams_whileCompressing() :
- *  Updates only a selected set of compression parameters, to remain compatible with current frame.
- *  New parameters will be applied to next compression job. */
-void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams);
-
-/*! ZSTDMT_getFrameProgression():
- *  tells how much data has been consumed (input) and produced (output) for current frame.
- *  able to count progression inside worker threads.
- */
-ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx);
-
-
-/*! ZSTDMT_initCStream_internal() :
- *  Private use only. Init streaming operation.
- *  expects params to be valid.
- *  must receive dict, or cdict, or none, but not both.
- *  @return : 0, or an error code */
-size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs,
-                    const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType,
-                    const ZSTD_CDict* cdict,
-                    ZSTD_CCtx_params params, unsigned long long pledgedSrcSize);
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif   /* ZSTDMT_COMPRESS_H */
diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/Shopify/sarama/.gitignore
index 6e362e4..2c9adc2 100644
--- a/vendor/github.com/Shopify/sarama/.gitignore
+++ b/vendor/github.com/Shopify/sarama/.gitignore
@@ -25,3 +25,5 @@
 
 coverage.txt
 profile.out
+
+simplest-uncommitted-msg-0.1-jar-with-dependencies.jar
diff --git a/vendor/github.com/Shopify/sarama/.golangci.yml b/vendor/github.com/Shopify/sarama/.golangci.yml
new file mode 100644
index 0000000..09e5c46
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/.golangci.yml
@@ -0,0 +1,74 @@
+run:
+  timeout: 5m
+  deadline: 10m
+
+linters-settings:
+  govet:
+    check-shadowing: false
+  golint:
+    min-confidence: 0
+  gocyclo:
+    min-complexity: 99
+  maligned:
+    suggest-new: true
+  dupl:
+    threshold: 100
+  goconst:
+    min-len: 2
+    min-occurrences: 3
+  misspell:
+    locale: US
+  goimports:
+    local-prefixes: github.com/Shopify/sarama
+  gocritic:
+    enabled-tags:
+      - diagnostic
+      - experimental
+      - opinionated
+      - performance
+      - style
+    disabled-checks:
+      - wrapperFunc
+      - ifElseChain
+  funlen:
+    lines: 300
+    statements: 300
+
+linters:
+  disable-all: true
+  enable:
+    - bodyclose
+    - deadcode
+    - depguard
+    - dogsled
+    # - dupl
+    - errcheck
+    - funlen
+    - gochecknoinits
+    # - goconst
+    # - gocritic
+    - gocyclo
+    - gofmt
+    - goimports
+    # - golint
+    - gosec
+    # - gosimple
+    - govet
+    # - ineffassign
+    # - misspell
+    # - nakedret
+    # - scopelint
+    - staticcheck
+    - structcheck
+    # - stylecheck
+    - typecheck
+    - unconvert
+    - unused
+    - varcheck
+    - whitespace
+
+issues:
+  exclude:
+    - "G404: Use of weak random number generator"
+  # maximum count of issues with the same text. set to 0 for unlimited. default is 3.
+  max-same-issues: 0
diff --git a/vendor/github.com/Shopify/sarama/.travis.yml b/vendor/github.com/Shopify/sarama/.travis.yml
deleted file mode 100644
index 4331fa1..0000000
--- a/vendor/github.com/Shopify/sarama/.travis.yml
+++ /dev/null
@@ -1,37 +0,0 @@
-dist: xenial
-language: go
-go:
-- 1.11.x
-- 1.12.x
-
-env:
-  global:
-  - KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095
-  - TOXIPROXY_ADDR=http://localhost:8474
-  - KAFKA_INSTALL_ROOT=/home/travis/kafka
-  - KAFKA_HOSTNAME=localhost
-  - DEBUG=true
-  matrix:
-  - KAFKA_VERSION=2.1.1 KAFKA_SCALA_VERSION=2.12
-  - KAFKA_VERSION=2.2.1 KAFKA_SCALA_VERSION=2.12
-  - KAFKA_VERSION=2.3.0 KAFKA_SCALA_VERSION=2.12
-
-before_install:
-- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR}
-- vagrant/install_cluster.sh
-- vagrant/boot_cluster.sh
-- vagrant/create_topics.sh
-- vagrant/run_java_producer.sh
-
-install: make install_dependencies
-
-script:
-- make test
-- make vet
-- make errcheck
-- if [[ "$TRAVIS_GO_VERSION" == 1.12* ]]; then make fmt; fi
-
-after_success:
-- bash <(curl -s https://codecov.io/bash)
-
-after_script: vagrant/halt_cluster.sh
diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md
index 02bd0ff..59ccd1d 100644
--- a/vendor/github.com/Shopify/sarama/CHANGELOG.md
+++ b/vendor/github.com/Shopify/sarama/CHANGELOG.md
@@ -1,5 +1,237 @@
 # Changelog
 
+#### Unreleased
+
+#### Version 1.28.0 (2021-02-15)
+
+**Note that with this release we change `RoundRobinBalancer` strategy to match Java client behavior. See #1788 for details.**
+
+- #1870 - @kvch - Update Kerberos library to latest major
+- #1876 - @bai - Update docs, reference pkg.go.dev
+- #1846 - @wclaeys - Do not ignore Consumer.Offsets.AutoCommit.Enable config on Close
+- #1747 - @XSAM - fix: mock sync producer does not handle the offset while sending messages
+- #1863 - @bai - Add support for Kafka 2.7.0 + update lz4 and klauspost/compress dependencies
+- #1788 - @kzinglzy - feat[balance_strategy]: announcing a new round robin balance strategy
+- #1862 - @bai - Fix CI setenv permissions issues
+- #1832 - @ilyakaznacheev - Update Godoc link to pkg.go.dev
+- #1822 - @danp - KIP-392: Allow consumers to fetch from closest replica
+
+#### Version 1.27.2 (2020-10-21)
+
+# Improvements
+
+#1750 - @krantideep95 Adds missing mock responses for mocking consumer group
+
+# Fixes
+
+#1817 - reverts #1785 - Add private method to Client interface to prevent implementation
+
+#### Version 1.27.1 (2020-10-07)
+
+# Improvements
+
+#1775 - @d1egoaz - Adds a Producer Interceptor example
+#1781 - @justin-chen - Refresh brokers given list of seed brokers
+#1784 - @justin-chen - Add randomize seed broker method
+#1790 - @d1egoaz - remove example binary
+#1798 - @bai - Test against Go 1.15
+#1785 - @justin-chen - Add private method to Client interface to prevent implementation
+#1802 - @uvw - Support Go 1.13 error unwrapping
+
+# Fixes
+
+#1791 - @stanislavkozlovski - bump default version to 1.0.0
+
+#### Version 1.27.0 (2020-08-11)
+
+# Improvements
+
+#1466 - @rubenvp8510  - Expose kerberos fast negotiation configuration
+#1695 - @KJTsanaktsidis - Use docker-compose to run the functional tests
+#1699 - @wclaeys  - Consumer group support for manually comitting offsets
+#1714 - @bai - Bump Go to version 1.14.3, golangci-lint to 1.27.0
+#1726 - @d1egoaz - Include zstd on the functional tests
+#1730 - @d1egoaz - KIP-42 Add producer and consumer interceptors
+#1738 - @varun06 - fixed variable names that are named same as some std lib package names
+#1741 - @varun06 - updated zstd dependency to latest v1.10.10
+#1743 - @varun06 - Fixed declaration dependencies and other lint issues in code base
+#1763 - @alrs - remove deprecated tls options from test
+#1769 - @bai - Add support for Kafka 2.6.0
+
+# Fixes
+
+#1697 - @kvch - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication
+#1744 - @alrs  - Fix isBalanced Function Signature
+
+#### Version 1.26.4 (2020-05-19)
+
+# Fixes
+
+- #1701 - @d1egoaz - Set server name only for the current broker
+- #1694 - @dnwe - testfix: set KAFKA_HEAP_OPTS for zk and kafka
+
+#### Version 1.26.3 (2020-05-07)
+
+# Fixes
+
+- #1692 - @d1egoaz - Set tls ServerName to fix issue: either ServerName or InsecureSkipVerify must be specified in the tls.Config
+
+#### Version 1.26.2 (2020-05-06)
+
+# ⚠️ Known Issues
+
+This release has been marked as not ready for production and may be unstable, please use v1.26.4.
+
+# Improvements
+
+- #1560 - @iyacontrol - add sync pool for gzip 1-9
+- #1605 - @dnwe - feat: protocol support for V11 fetch w/ rackID
+- #1617 - @sladkoff / @dwi-di / @random-dwi - Add support for alter/list partition reassignements APIs
+- #1632 - @bai - Add support for Go 1.14
+- #1640 - @random-dwi - Feature/fix list partition reassignments
+- #1646 - @mimaison - Add DescribeLogDirs to admin client
+- #1667 - @bai - Add support for kafka 2.5.0
+
+# Fixes
+
+- #1594 - @sladkoff - Sets ConfigEntry.Default flag in addition to the ConfigEntry.Source for Kafka versions > V1_1_0_0
+- #1601 - @alrs - fix: remove use of testing.T.FailNow() inside goroutine
+- #1602 - @d1egoaz - adds a note about consumer groups Consume method
+- #1607 - @darklore - Fix memory leak when Broker.Open and Broker.Close called repeatedly
+- #1613 - @wblakecaldwell - Updated "retrying" log message when BackoffFunc implemented
+- #1614 - @alrs - produce_response.go: Remove Unused Functions
+- #1619 - @alrs - tools/kafka-producer-performance: prune unused flag variables
+- #1639 - @agriffaut - Handle errors with no message but error code
+- #1643 - @kzinglzy - fix `config.net.keepalive`
+- #1644 - @KJTsanaktsidis - Fix brokers continually allocating new Session IDs
+- #1645 - @Stephan14 - Remove broker(s) which no longer exist in metadata
+- #1650 - @lavoiesl - Return the response error in heartbeatLoop
+- #1661 - @KJTsanaktsidis - Fix "broker received out of order sequence" when brokers die
+- #1666 - @KevinJCross - Bugfix: Allow TLS connections to work over socks proxy.
+
+#### Version 1.26.1 (2020-02-04)
+
+Improvements:
+- Add requests-in-flight metric ([1539](https://github.com/Shopify/sarama/pull/1539))
+- Fix misleading example for cluster admin ([1595](https://github.com/Shopify/sarama/pull/1595))
+- Replace Travis with GitHub Actions, linters housekeeping ([1573](https://github.com/Shopify/sarama/pull/1573))
+- Allow BalanceStrategy to provide custom assignment data ([1592](https://github.com/Shopify/sarama/pull/1592))
+
+Bug Fixes:
+- Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/Shopify/sarama/pull/1590))
+- Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/Shopify/sarama/pull/1589))
+
+#### Version 1.26.0 (2020-01-24)
+
+New Features:
+- Enable zstd compression
+  ([1574](https://github.com/Shopify/sarama/pull/1574),
+  [1582](https://github.com/Shopify/sarama/pull/1582))
+- Support headers in tools kafka-console-producer
+  ([1549](https://github.com/Shopify/sarama/pull/1549))
+
+Improvements:
+- Add SASL AuthIdentity to SASL frames (authzid)
+  ([1585](https://github.com/Shopify/sarama/pull/1585)).
+
+Bug Fixes:
+- Sending messages with ZStd compression enabled fails in multiple ways
+  ([1252](https://github.com/Shopify/sarama/issues/1252)).
+- Use the broker for any admin on BrokerConfig
+  ([1571](https://github.com/Shopify/sarama/pull/1571)).
+- Set DescribeConfigRequest Version field
+  ([1576](https://github.com/Shopify/sarama/pull/1576)).
+- ConsumerGroup flooding logs with client/metadata update req
+  ([1578](https://github.com/Shopify/sarama/pull/1578)).
+- MetadataRequest version in DescribeCluster
+  ([1580](https://github.com/Shopify/sarama/pull/1580)).
+- Fix deadlock in consumer group handleError
+  ([1581](https://github.com/Shopify/sarama/pull/1581))
+- Fill in the Fetch{Request,Response} protocol
+  ([1582](https://github.com/Shopify/sarama/pull/1582)).
+- Retry topic request on ControllerNotAvailable
+  ([1586](https://github.com/Shopify/sarama/pull/1586)).
+
+#### Version 1.25.0 (2020-01-13)
+
+New Features:
+- Support TLS protocol in kafka-producer-performance
+  ([1538](https://github.com/Shopify/sarama/pull/1538)).
+- Add support for kafka 2.4.0
+  ([1552](https://github.com/Shopify/sarama/pull/1552)).
+
+Improvements:
+- Allow the Consumer to disable auto-commit offsets
+  ([1164](https://github.com/Shopify/sarama/pull/1164)).
+- Produce records with consistent timestamps
+  ([1455](https://github.com/Shopify/sarama/pull/1455)).
+
+Bug Fixes:
+- Fix incorrect SetTopicMetadata name mentions
+  ([1534](https://github.com/Shopify/sarama/pull/1534)).
+- Fix client.tryRefreshMetadata Println
+  ([1535](https://github.com/Shopify/sarama/pull/1535)).
+- Fix panic on calling updateMetadata on closed client
+  ([1531](https://github.com/Shopify/sarama/pull/1531)).
+- Fix possible faulty metrics in TestFuncProducing
+  ([1545](https://github.com/Shopify/sarama/pull/1545)).
+
+#### Version 1.24.1 (2019-10-31)
+
+New Features:
+- Add DescribeLogDirs Request/Response pair
+  ([1520](https://github.com/Shopify/sarama/pull/1520)).
+
+Bug Fixes:
+- Fix ClusterAdmin returning invalid controller ID on DescribeCluster
+  ([1518](https://github.com/Shopify/sarama/pull/1518)).
+- Fix issue with consumergroup not rebalancing when new partition is added
+  ([1525](https://github.com/Shopify/sarama/pull/1525)).
+- Ensure consistent use of read/write deadlines
+  ([1529](https://github.com/Shopify/sarama/pull/1529)).
+
+#### Version 1.24.0 (2019-10-09)
+
+New Features:
+- Add sticky partition assignor
+  ([1416](https://github.com/Shopify/sarama/pull/1416)).
+- Switch from cgo zstd package to pure Go implementation
+  ([1477](https://github.com/Shopify/sarama/pull/1477)).
+
+Improvements:
+- Allow creating ClusterAdmin from client
+  ([1415](https://github.com/Shopify/sarama/pull/1415)).
+- Set KafkaVersion in ListAcls method
+  ([1452](https://github.com/Shopify/sarama/pull/1452)).
+- Set request version in CreateACL ClusterAdmin method
+  ([1458](https://github.com/Shopify/sarama/pull/1458)).
+- Set request version in DeleteACL ClusterAdmin method
+  ([1461](https://github.com/Shopify/sarama/pull/1461)).
+- Handle missed error codes on TopicMetaDataRequest and GroupCoordinatorRequest
+  ([1464](https://github.com/Shopify/sarama/pull/1464)).
+- Remove direct usage of gofork
+  ([1465](https://github.com/Shopify/sarama/pull/1465)).
+- Add support for Go 1.13
+  ([1478](https://github.com/Shopify/sarama/pull/1478)).
+- Improve behavior of NewMockListAclsResponse
+  ([1481](https://github.com/Shopify/sarama/pull/1481)).
+
+Bug Fixes:
+- Fix race condition in consumergroup example
+  ([1434](https://github.com/Shopify/sarama/pull/1434)).
+- Fix brokerProducer goroutine leak
+  ([1442](https://github.com/Shopify/sarama/pull/1442)).
+- Use released version of lz4 library
+  ([1469](https://github.com/Shopify/sarama/pull/1469)).
+- Set correct version in MockDeleteTopicsResponse
+  ([1484](https://github.com/Shopify/sarama/pull/1484)).
+- Fix CLI help message typo
+  ([1494](https://github.com/Shopify/sarama/pull/1494)).
+
+Known Issues:
+- Please **don't** use Zstd, as it doesn't work right now.
+  See https://github.com/Shopify/sarama/issues/1252
+
 #### Version 1.23.1 (2019-07-22)
 
 Bug Fixes:
diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/Shopify/sarama/Makefile
index 360b220..4714d77 100644
--- a/vendor/github.com/Shopify/sarama/Makefile
+++ b/vendor/github.com/Shopify/sarama/Makefile
@@ -1,52 +1,31 @@
-export GO111MODULE=on
+default: fmt get update test lint
 
-default: fmt vet errcheck test lint
+GO       := go
+GOBUILD  := CGO_ENABLED=0 $(GO) build $(BUILD_FLAG)
+GOTEST   := $(GO) test -gcflags='-l' -p 3 -v -race -timeout 6m -coverprofile=profile.out -covermode=atomic
 
-# Taken from https://github.com/codecov/example-go#caveat-multiple-files
-.PHONY: test
-test:
-	echo "" > coverage.txt
-	for d in `go list ./...`; do \
-		go test -p 1 -v -timeout 240s -race -coverprofile=profile.out -covermode=atomic $$d || exit 1; \
-		if [ -f profile.out ]; then \
-			cat profile.out >> coverage.txt; \
-			rm profile.out; \
-		fi \
-	done
+FILES    := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -not -name '*_test.go')
+TESTS    := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -name '*_test.go')
 
-GOLINT := $(shell command -v golint)
-
-.PHONY: lint
-lint:
-ifndef GOLINT
-	go get golang.org/x/lint/golint
-endif
-	go list ./... | xargs golint
-
-.PHONY: vet
-vet:
-	go vet ./...
-
-ERRCHECK := $(shell command -v errcheck)
-# See https://github.com/kisielk/errcheck/pull/141 for details on ignorepkg
-.PHONY: errcheck
-errcheck:
-ifndef ERRCHECK
-	go get github.com/kisielk/errcheck
-endif
-	errcheck -ignorepkg fmt github.com/Shopify/sarama/...
-
-.PHONY: fmt
-fmt:
-	@if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi
-
-.PHONY : install_dependencies
-install_dependencies: get
-
-.PHONY: get
 get:
-	go get -t -v ./...
+	$(GO) get ./...
+	$(GO) mod verify
+	$(GO) mod tidy
 
-.PHONY: clean
-clean:
-	go clean ./...
+update:
+	$(GO) get -u -v ./...
+	$(GO) mod verify
+	$(GO) mod tidy
+
+fmt:
+	gofmt -s -l -w $(FILES) $(TESTS)
+
+lint:
+	GOFLAGS="-tags=functional" golangci-lint run
+
+test:
+	$(GOTEST) ./...
+
+.PHONY: test_functional
+test_functional:
+	$(GOTEST) -tags=functional ./...
diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md
index 4cd736b..f2beb73 100644
--- a/vendor/github.com/Shopify/sarama/README.md
+++ b/vendor/github.com/Shopify/sarama/README.md
@@ -1,39 +1,36 @@
-sarama
-======
+# sarama
 
-[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.svg)](https://godoc.org/github.com/Shopify/sarama)
+[![Go Reference](https://pkg.go.dev/badge/github.com/Shopify/sarama.svg)](https://pkg.go.dev/github.com/Shopify/sarama)
 [![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama)
 [![Coverage](https://codecov.io/gh/Shopify/sarama/branch/master/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama)
 
 Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later).
 
-### Getting started
+## Getting started
 
-- API documentation and examples are available via [godoc](https://godoc.org/github.com/Shopify/sarama).
+- API documentation and examples are available via [pkg.go.dev](https://pkg.go.dev/github.com/Shopify/sarama).
 - Mocks for testing are available in the [mocks](./mocks) subpackage.
 - The [examples](./examples) directory contains more elaborate example applications.
 - The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
 
 You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions).
 
-### Compatibility and API stability
+## Compatibility and API stability
 
 Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
 the two latest stable releases of Kafka and Go, and we provide a two month
 grace period for older releases. This means we currently officially support
-Go 1.11 through 1.12, and Kafka 2.0 through 2.3, although older releases are
+Go 1.15 through 1.16, and Kafka 2.6 through 2.8, although older releases are
 still likely to work.
 
 Sarama follows semantic versioning and provides API stability via the gopkg.in service.
 You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1.
 A changelog is available [here](CHANGELOG.md).
 
-### Contributing
+## Contributing
 
-* Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md).
-* Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more
-  technical and design details.
-* The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol)
-  contains a wealth of useful information.
-* For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
-* If you have any questions, just ask!
+- Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md).
+- Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more technical and design details.
+- The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) contains a wealth of useful information.
+- For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
+- If you have any questions, just ask!
diff --git a/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/Shopify/sarama/Vagrantfile
index f4b848a..07d7ffb 100644
--- a/vendor/github.com/Shopify/sarama/Vagrantfile
+++ b/vendor/github.com/Shopify/sarama/Vagrantfile
@@ -1,14 +1,8 @@
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-
-# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
-VAGRANTFILE_API_VERSION = "2"
-
 # We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB
 MEMORY = 3072
 
-Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
-  config.vm.box = "ubuntu/trusty64"
+Vagrant.configure("2") do |config|
+  config.vm.box = "ubuntu/bionic64"
 
   config.vm.provision :shell, path: "vagrant/provision.sh"
 
diff --git a/vendor/github.com/Shopify/sarama/acl_bindings.go b/vendor/github.com/Shopify/sarama/acl_bindings.go
index 50b689d..13440be 100644
--- a/vendor/github.com/Shopify/sarama/acl_bindings.go
+++ b/vendor/github.com/Shopify/sarama/acl_bindings.go
@@ -1,6 +1,6 @@
 package sarama
 
-//Resource holds information about acl resource type
+// Resource holds information about acl resource type
 type Resource struct {
 	ResourceType        AclResourceType
 	ResourceName        string
@@ -46,7 +46,7 @@
 	return nil
 }
 
-//Acl holds information about acl type
+// Acl holds information about acl type
 type Acl struct {
 	Principal      string
 	Host           string
@@ -93,7 +93,7 @@
 	return nil
 }
 
-//ResourceAcls is an acl resource type
+// ResourceAcls is an acl resource type
 type ResourceAcls struct {
 	Resource
 	Acls []*Acl
diff --git a/vendor/github.com/Shopify/sarama/acl_create_request.go b/vendor/github.com/Shopify/sarama/acl_create_request.go
index da1cdef..449102f 100644
--- a/vendor/github.com/Shopify/sarama/acl_create_request.go
+++ b/vendor/github.com/Shopify/sarama/acl_create_request.go
@@ -1,6 +1,6 @@
 package sarama
 
-//CreateAclsRequest is an acl creation request
+// CreateAclsRequest is an acl creation request
 type CreateAclsRequest struct {
 	Version      int16
 	AclCreations []*AclCreation
@@ -47,6 +47,10 @@
 	return c.Version
 }
 
+func (c *CreateAclsRequest) headerVersion() int16 {
+	return 1
+}
+
 func (c *CreateAclsRequest) requiredVersion() KafkaVersion {
 	switch c.Version {
 	case 1:
@@ -56,7 +60,7 @@
 	}
 }
 
-//AclCreation is a wrapper around Resource and Acl type
+// AclCreation is a wrapper around Resource and Acl type
 type AclCreation struct {
 	Resource
 	Acl
diff --git a/vendor/github.com/Shopify/sarama/acl_create_response.go b/vendor/github.com/Shopify/sarama/acl_create_response.go
index f5a5e9a..21d6c34 100644
--- a/vendor/github.com/Shopify/sarama/acl_create_response.go
+++ b/vendor/github.com/Shopify/sarama/acl_create_response.go
@@ -2,7 +2,7 @@
 
 import "time"
 
-//CreateAclsResponse is a an acl reponse creation type
+// CreateAclsResponse is a an acl response creation type
 type CreateAclsResponse struct {
 	ThrottleTime         time.Duration
 	AclCreationResponses []*AclCreationResponse
@@ -55,11 +55,15 @@
 	return 0
 }
 
+func (c *CreateAclsResponse) headerVersion() int16 {
+	return 0
+}
+
 func (c *CreateAclsResponse) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
 
-//AclCreationResponse is an acl creation response type
+// AclCreationResponse is an acl creation response type
 type AclCreationResponse struct {
 	Err    KError
 	ErrMsg *string
diff --git a/vendor/github.com/Shopify/sarama/acl_delete_request.go b/vendor/github.com/Shopify/sarama/acl_delete_request.go
index 15908ea..5e5c03b 100644
--- a/vendor/github.com/Shopify/sarama/acl_delete_request.go
+++ b/vendor/github.com/Shopify/sarama/acl_delete_request.go
@@ -1,6 +1,6 @@
 package sarama
 
-//DeleteAclsRequest is a delete acl request
+// DeleteAclsRequest is a delete acl request
 type DeleteAclsRequest struct {
 	Version int
 	Filters []*AclFilter
@@ -48,6 +48,10 @@
 	return int16(d.Version)
 }
 
+func (d *DeleteAclsRequest) headerVersion() int16 {
+	return 1
+}
+
 func (d *DeleteAclsRequest) requiredVersion() KafkaVersion {
 	switch d.Version {
 	case 1:
diff --git a/vendor/github.com/Shopify/sarama/acl_delete_response.go b/vendor/github.com/Shopify/sarama/acl_delete_response.go
index 6529565..cd33749 100644
--- a/vendor/github.com/Shopify/sarama/acl_delete_response.go
+++ b/vendor/github.com/Shopify/sarama/acl_delete_response.go
@@ -2,7 +2,7 @@
 
 import "time"
 
-//DeleteAclsResponse is a delete acl response
+// DeleteAclsResponse is a delete acl response
 type DeleteAclsResponse struct {
 	Version         int16
 	ThrottleTime    time.Duration
@@ -53,14 +53,18 @@
 }
 
 func (d *DeleteAclsResponse) version() int16 {
-	return int16(d.Version)
+	return d.Version
+}
+
+func (d *DeleteAclsResponse) headerVersion() int16 {
+	return 0
 }
 
 func (d *DeleteAclsResponse) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
 
-//FilterResponse is a filter response type
+// FilterResponse is a filter response type
 type FilterResponse struct {
 	Err          KError
 	ErrMsg       *string
@@ -111,7 +115,7 @@
 	return nil
 }
 
-//MatchingAcl is a matching acl type
+// MatchingAcl is a matching acl type
 type MatchingAcl struct {
 	Err    KError
 	ErrMsg *string
diff --git a/vendor/github.com/Shopify/sarama/acl_describe_request.go b/vendor/github.com/Shopify/sarama/acl_describe_request.go
index 5222d46..e0fe902 100644
--- a/vendor/github.com/Shopify/sarama/acl_describe_request.go
+++ b/vendor/github.com/Shopify/sarama/acl_describe_request.go
@@ -1,6 +1,6 @@
 package sarama
 
-//DescribeAclsRequest is a secribe acl request type
+// DescribeAclsRequest is a secribe acl request type
 type DescribeAclsRequest struct {
 	Version int
 	AclFilter
@@ -25,6 +25,10 @@
 	return int16(d.Version)
 }
 
+func (d *DescribeAclsRequest) headerVersion() int16 {
+	return 1
+}
+
 func (d *DescribeAclsRequest) requiredVersion() KafkaVersion {
 	switch d.Version {
 	case 1:
diff --git a/vendor/github.com/Shopify/sarama/acl_describe_response.go b/vendor/github.com/Shopify/sarama/acl_describe_response.go
index 12126e5..3255fd4 100644
--- a/vendor/github.com/Shopify/sarama/acl_describe_response.go
+++ b/vendor/github.com/Shopify/sarama/acl_describe_response.go
@@ -2,7 +2,7 @@
 
 import "time"
 
-//DescribeAclsResponse is a describe acl response type
+// DescribeAclsResponse is a describe acl response type
 type DescribeAclsResponse struct {
 	Version      int16
 	ThrottleTime time.Duration
@@ -74,7 +74,11 @@
 }
 
 func (d *DescribeAclsResponse) version() int16 {
-	return int16(d.Version)
+	return d.Version
+}
+
+func (d *DescribeAclsResponse) headerVersion() int16 {
+	return 0
 }
 
 func (d *DescribeAclsResponse) requiredVersion() KafkaVersion {
diff --git a/vendor/github.com/Shopify/sarama/acl_filter.go b/vendor/github.com/Shopify/sarama/acl_filter.go
index fad5558..b380161 100644
--- a/vendor/github.com/Shopify/sarama/acl_filter.go
+++ b/vendor/github.com/Shopify/sarama/acl_filter.go
@@ -46,7 +46,6 @@
 
 	if a.Version == 1 {
 		pattern, err := pd.getInt8()
-
 		if err != nil {
 			return err
 		}
diff --git a/vendor/github.com/Shopify/sarama/acl_types.go b/vendor/github.com/Shopify/sarama/acl_types.go
index c10ad7b..c3ba8dd 100644
--- a/vendor/github.com/Shopify/sarama/acl_types.go
+++ b/vendor/github.com/Shopify/sarama/acl_types.go
@@ -1,5 +1,10 @@
 package sarama
 
+import (
+	"fmt"
+	"strings"
+)
+
 type (
 	AclOperation int
 
@@ -27,6 +32,61 @@
 	AclOperationIdempotentWrite
 )
 
+func (a *AclOperation) String() string {
+	mapping := map[AclOperation]string{
+		AclOperationUnknown:         "Unknown",
+		AclOperationAny:             "Any",
+		AclOperationAll:             "All",
+		AclOperationRead:            "Read",
+		AclOperationWrite:           "Write",
+		AclOperationCreate:          "Create",
+		AclOperationDelete:          "Delete",
+		AclOperationAlter:           "Alter",
+		AclOperationDescribe:        "Describe",
+		AclOperationClusterAction:   "ClusterAction",
+		AclOperationDescribeConfigs: "DescribeConfigs",
+		AclOperationAlterConfigs:    "AlterConfigs",
+		AclOperationIdempotentWrite: "IdempotentWrite",
+	}
+	s, ok := mapping[*a]
+	if !ok {
+		s = mapping[AclOperationUnknown]
+	}
+	return s
+}
+
+// MarshalText returns the text form of the AclOperation (name without prefix)
+func (a *AclOperation) MarshalText() ([]byte, error) {
+	return []byte(a.String()), nil
+}
+
+// UnmarshalText takes a text reprentation of the operation and converts it to an AclOperation
+func (a *AclOperation) UnmarshalText(text []byte) error {
+	normalized := strings.ToLower(string(text))
+	mapping := map[string]AclOperation{
+		"unknown":         AclOperationUnknown,
+		"any":             AclOperationAny,
+		"all":             AclOperationAll,
+		"read":            AclOperationRead,
+		"write":           AclOperationWrite,
+		"create":          AclOperationCreate,
+		"delete":          AclOperationDelete,
+		"alter":           AclOperationAlter,
+		"describe":        AclOperationDescribe,
+		"clusteraction":   AclOperationClusterAction,
+		"describeconfigs": AclOperationDescribeConfigs,
+		"alterconfigs":    AclOperationAlterConfigs,
+		"idempotentwrite": AclOperationIdempotentWrite,
+	}
+	ao, ok := mapping[normalized]
+	if !ok {
+		*a = AclOperationUnknown
+		return fmt.Errorf("no acl operation with name %s", normalized)
+	}
+	*a = ao
+	return nil
+}
+
 // ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java
 const (
 	AclPermissionUnknown AclPermissionType = iota
@@ -35,6 +95,44 @@
 	AclPermissionAllow
 )
 
+func (a *AclPermissionType) String() string {
+	mapping := map[AclPermissionType]string{
+		AclPermissionUnknown: "Unknown",
+		AclPermissionAny:     "Any",
+		AclPermissionDeny:    "Deny",
+		AclPermissionAllow:   "Allow",
+	}
+	s, ok := mapping[*a]
+	if !ok {
+		s = mapping[AclPermissionUnknown]
+	}
+	return s
+}
+
+// MarshalText returns the text form of the AclPermissionType (name without prefix)
+func (a *AclPermissionType) MarshalText() ([]byte, error) {
+	return []byte(a.String()), nil
+}
+
+// UnmarshalText takes a text reprentation of the permission type and converts it to an AclPermissionType
+func (a *AclPermissionType) UnmarshalText(text []byte) error {
+	normalized := strings.ToLower(string(text))
+	mapping := map[string]AclPermissionType{
+		"unknown": AclPermissionUnknown,
+		"any":     AclPermissionAny,
+		"deny":    AclPermissionDeny,
+		"allow":   AclPermissionAllow,
+	}
+
+	apt, ok := mapping[normalized]
+	if !ok {
+		*a = AclPermissionUnknown
+		return fmt.Errorf("no acl permission with name %s", normalized)
+	}
+	*a = apt
+	return nil
+}
+
 // ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java
 const (
 	AclResourceUnknown AclResourceType = iota
@@ -43,8 +141,53 @@
 	AclResourceGroup
 	AclResourceCluster
 	AclResourceTransactionalID
+	AclResourceDelegationToken
 )
 
+func (a *AclResourceType) String() string {
+	mapping := map[AclResourceType]string{
+		AclResourceUnknown:         "Unknown",
+		AclResourceAny:             "Any",
+		AclResourceTopic:           "Topic",
+		AclResourceGroup:           "Group",
+		AclResourceCluster:         "Cluster",
+		AclResourceTransactionalID: "TransactionalID",
+		AclResourceDelegationToken: "DelegationToken",
+	}
+	s, ok := mapping[*a]
+	if !ok {
+		s = mapping[AclResourceUnknown]
+	}
+	return s
+}
+
+// MarshalText returns the text form of the AclResourceType (name without prefix)
+func (a *AclResourceType) MarshalText() ([]byte, error) {
+	return []byte(a.String()), nil
+}
+
+// UnmarshalText takes a text reprentation of the resource type and converts it to an AclResourceType
+func (a *AclResourceType) UnmarshalText(text []byte) error {
+	normalized := strings.ToLower(string(text))
+	mapping := map[string]AclResourceType{
+		"unknown":         AclResourceUnknown,
+		"any":             AclResourceAny,
+		"topic":           AclResourceTopic,
+		"group":           AclResourceGroup,
+		"cluster":         AclResourceCluster,
+		"transactionalid": AclResourceTransactionalID,
+		"delegationtoken": AclResourceDelegationToken,
+	}
+
+	art, ok := mapping[normalized]
+	if !ok {
+		*a = AclResourceUnknown
+		return fmt.Errorf("no acl resource with name %s", normalized)
+	}
+	*a = art
+	return nil
+}
+
 // ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/PatternType.java
 const (
 	AclPatternUnknown AclResourcePatternType = iota
@@ -53,3 +196,43 @@
 	AclPatternLiteral
 	AclPatternPrefixed
 )
+
+func (a *AclResourcePatternType) String() string {
+	mapping := map[AclResourcePatternType]string{
+		AclPatternUnknown:  "Unknown",
+		AclPatternAny:      "Any",
+		AclPatternMatch:    "Match",
+		AclPatternLiteral:  "Literal",
+		AclPatternPrefixed: "Prefixed",
+	}
+	s, ok := mapping[*a]
+	if !ok {
+		s = mapping[AclPatternUnknown]
+	}
+	return s
+}
+
+// MarshalText returns the text form of the AclResourcePatternType (name without prefix)
+func (a *AclResourcePatternType) MarshalText() ([]byte, error) {
+	return []byte(a.String()), nil
+}
+
+// UnmarshalText takes a text reprentation of the resource pattern type and converts it to an AclResourcePatternType
+func (a *AclResourcePatternType) UnmarshalText(text []byte) error {
+	normalized := strings.ToLower(string(text))
+	mapping := map[string]AclResourcePatternType{
+		"unknown":  AclPatternUnknown,
+		"any":      AclPatternAny,
+		"match":    AclPatternMatch,
+		"literal":  AclPatternLiteral,
+		"prefixed": AclPatternPrefixed,
+	}
+
+	arpt, ok := mapping[normalized]
+	if !ok {
+		*a = AclPatternUnknown
+		return fmt.Errorf("no acl resource pattern with name %s", normalized)
+	}
+	*a = arpt
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go
index fc227ab..a96af93 100644
--- a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go
+++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go
@@ -1,6 +1,6 @@
 package sarama
 
-//AddOffsetsToTxnRequest adds offsets to a transaction request
+// AddOffsetsToTxnRequest adds offsets to a transaction request
 type AddOffsetsToTxnRequest struct {
 	TransactionalID string
 	ProducerID      int64
@@ -48,6 +48,10 @@
 	return 0
 }
 
+func (a *AddOffsetsToTxnRequest) headerVersion() int16 {
+	return 1
+}
+
 func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go
index c88c1f8..bb61973 100644
--- a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go
+++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go
@@ -4,7 +4,7 @@
 	"time"
 )
 
-//AddOffsetsToTxnResponse is a response type for adding offsets to txns
+// AddOffsetsToTxnResponse is a response type for adding offsets to txns
 type AddOffsetsToTxnResponse struct {
 	ThrottleTime time.Duration
 	Err          KError
@@ -40,6 +40,10 @@
 	return 0
 }
 
+func (a *AddOffsetsToTxnResponse) headerVersion() int16 {
+	return 0
+}
+
 func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go
index 8d4b42e..57ecf64 100644
--- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go
+++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go
@@ -1,6 +1,6 @@
 package sarama
 
-//AddPartitionsToTxnRequest is a add paartition request
+// AddPartitionsToTxnRequest is a add paartition request
 type AddPartitionsToTxnRequest struct {
 	TransactionalID string
 	ProducerID      int64
@@ -72,6 +72,10 @@
 	return 0
 }
 
+func (a *AddPartitionsToTxnRequest) headerVersion() int16 {
+	return 1
+}
+
 func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go
index eb4f23e..0989565 100644
--- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go
+++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go
@@ -4,7 +4,7 @@
 	"time"
 )
 
-//AddPartitionsToTxnResponse is a partition errors to transaction type
+// AddPartitionsToTxnResponse is a partition errors to transaction type
 type AddPartitionsToTxnResponse struct {
 	ThrottleTime time.Duration
 	Errors       map[string][]*PartitionError
@@ -79,11 +79,15 @@
 	return 0
 }
 
+func (a *AddPartitionsToTxnResponse) headerVersion() int16 {
+	return 0
+}
+
 func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
 
-//PartitionError is a partition error type
+// PartitionError is a partition error type
 type PartitionError struct {
 	Partition int32
 	Err       KError
diff --git a/vendor/github.com/Shopify/sarama/admin.go b/vendor/github.com/Shopify/sarama/admin.go
index 1db6a0e..abe18b1 100644
--- a/vendor/github.com/Shopify/sarama/admin.go
+++ b/vendor/github.com/Shopify/sarama/admin.go
@@ -2,8 +2,11 @@
 
 import (
 	"errors"
+	"fmt"
 	"math/rand"
+	"strconv"
 	"sync"
+	"time"
 )
 
 // ClusterAdmin is the administrative client for Kafka, which supports managing and inspecting topics,
@@ -39,6 +42,14 @@
 	// new partitions. This operation is supported by brokers with version 1.0.0 or higher.
 	CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error
 
+	// Alter the replica assignment for partitions.
+	// This operation is supported by brokers with version 2.4.0.0 or higher.
+	AlterPartitionReassignments(topic string, assignment [][]int32) error
+
+	// Provides info on ongoing partitions replica reassignments.
+	// This operation is supported by brokers with version 2.4.0.0 or higher.
+	ListPartitionReassignments(topics string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error)
+
 	// Delete records whose offset is smaller than the given offset of the corresponding partition.
 	// This operation is supported by brokers with version 0.11.0.0 or higher.
 	DeleteRecords(topic string, partitionOffsets map[int32]int64) error
@@ -90,6 +101,18 @@
 	// Get information about the nodes in the cluster
 	DescribeCluster() (brokers []*Broker, controllerID int32, err error)
 
+	// Get information about all log directories on the given set of brokers
+	DescribeLogDirs(brokers []int32) (map[int32][]DescribeLogDirsResponseDirMetadata, error)
+
+	// Get information about SCRAM users
+	DescribeUserScramCredentials(users []string) ([]*DescribeUserScramCredentialsResult, error)
+
+	// Delete SCRAM users
+	DeleteUserScramCredentials(delete []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error)
+
+	// Upsert SCRAM users
+	UpsertUserScramCredentials(upsert []AlterUserScramCredentialsUpsert) ([]*AlterUserScramCredentialsResult, error)
+
 	// Close shuts down the admin and closes underlying client.
 	Close() error
 }
@@ -105,9 +128,14 @@
 	if err != nil {
 		return nil, err
 	}
+	return NewClusterAdminFromClient(client)
+}
 
-	//make sure we can retrieve the controller
-	_, err = client.Controller()
+// NewClusterAdminFromClient creates a new ClusterAdmin using the given client.
+// Note that underlying client will also be closed on admin's Close() call.
+func NewClusterAdminFromClient(client Client) (ClusterAdmin, error) {
+	// make sure we can retrieve the controller
+	_, err := client.Controller()
 	if err != nil {
 		return nil, err
 	}
@@ -127,8 +155,45 @@
 	return ca.client.Controller()
 }
 
-func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error {
+func (ca *clusterAdmin) refreshController() (*Broker, error) {
+	return ca.client.RefreshController()
+}
 
+// isErrNoController returns `true` if the given error type unwraps to an
+// `ErrNotController` response from Kafka
+func isErrNoController(err error) bool {
+	switch e := err.(type) {
+	case *TopicError:
+		return e.Err == ErrNotController
+	case *TopicPartitionError:
+		return e.Err == ErrNotController
+	case KError:
+		return e == ErrNotController
+	}
+	return false
+}
+
+// retryOnError will repeatedly call the given (error-returning) func in the
+// case that its response is non-nil and retryable (as determined by the
+// provided retryable func) up to the maximum number of tries permitted by
+// the admin client configuration
+func (ca *clusterAdmin) retryOnError(retryable func(error) bool, fn func() error) error {
+	var err error
+	for attempt := 0; attempt < ca.conf.Admin.Retry.Max; attempt++ {
+		err = fn()
+		if err == nil || !retryable(err) {
+			return err
+		}
+		Logger.Printf(
+			"admin/request retrying after %dms... (%d attempts remaining)\n",
+			ca.conf.Admin.Retry.Backoff/time.Millisecond, ca.conf.Admin.Retry.Max-attempt)
+		time.Sleep(ca.conf.Admin.Retry.Backoff)
+		continue
+	}
+	return err
+}
+
+func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error {
 	if topic == "" {
 		return ErrInvalidTopic
 	}
@@ -153,26 +218,31 @@
 		request.Version = 2
 	}
 
-	b, err := ca.Controller()
-	if err != nil {
-		return err
-	}
+	return ca.retryOnError(isErrNoController, func() error {
+		b, err := ca.Controller()
+		if err != nil {
+			return err
+		}
 
-	rsp, err := b.CreateTopics(request)
-	if err != nil {
-		return err
-	}
+		rsp, err := b.CreateTopics(request)
+		if err != nil {
+			return err
+		}
 
-	topicErr, ok := rsp.TopicErrors[topic]
-	if !ok {
-		return ErrIncompleteResponse
-	}
+		topicErr, ok := rsp.TopicErrors[topic]
+		if !ok {
+			return ErrIncompleteResponse
+		}
 
-	if topicErr.Err != ErrNoError {
-		return topicErr
-	}
+		if topicErr.Err != ErrNoError {
+			if topicErr.Err == ErrNotController {
+				_, _ = ca.refreshController()
+			}
+			return topicErr
+		}
 
-	return nil
+		return nil
+	})
 }
 
 func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) {
@@ -209,6 +279,10 @@
 		Topics: []string{},
 	}
 
+	if ca.conf.Version.IsAtLeast(V0_10_0_0) {
+		request.Version = 1
+	}
+
 	response, err := controller.GetMetadata(request)
 	if err != nil {
 		return nil, int32(0), err
@@ -217,6 +291,16 @@
 	return response.Brokers, response.ControllerID, nil
 }
 
+func (ca *clusterAdmin) findBroker(id int32) (*Broker, error) {
+	brokers := ca.client.Brokers()
+	for _, b := range brokers {
+		if b.ID() == id {
+			return b, nil
+		}
+	}
+	return nil, fmt.Errorf("could not find broker id %d", id)
+}
+
 func (ca *clusterAdmin) findAnyBroker() (*Broker, error) {
 	brokers := ca.client.Brokers()
 	if len(brokers) > 0 {
@@ -274,6 +358,15 @@
 	describeConfigsReq := &DescribeConfigsRequest{
 		Resources: describeConfigsResources,
 	}
+
+	if ca.conf.Version.IsAtLeast(V1_1_0_0) {
+		describeConfigsReq.Version = 1
+	}
+
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		describeConfigsReq.Version = 2
+	}
+
 	describeConfigsResp, err := b.DescribeConfigs(describeConfigsReq)
 	if err != nil {
 		return nil, err
@@ -299,7 +392,6 @@
 }
 
 func (ca *clusterAdmin) DeleteTopic(topic string) error {
-
 	if topic == "" {
 		return ErrInvalidTopic
 	}
@@ -313,25 +405,31 @@
 		request.Version = 1
 	}
 
-	b, err := ca.Controller()
-	if err != nil {
-		return err
-	}
+	return ca.retryOnError(isErrNoController, func() error {
+		b, err := ca.Controller()
+		if err != nil {
+			return err
+		}
 
-	rsp, err := b.DeleteTopics(request)
-	if err != nil {
-		return err
-	}
+		rsp, err := b.DeleteTopics(request)
+		if err != nil {
+			return err
+		}
 
-	topicErr, ok := rsp.TopicErrorCodes[topic]
-	if !ok {
-		return ErrIncompleteResponse
-	}
+		topicErr, ok := rsp.TopicErrorCodes[topic]
+		if !ok {
+			return ErrIncompleteResponse
+		}
 
-	if topicErr != ErrNoError {
-		return topicErr
-	}
-	return nil
+		if topicErr != ErrNoError {
+			if topicErr == ErrNotController {
+				_, _ = ca.refreshController()
+			}
+			return topicErr
+		}
+
+		return nil
+	})
 }
 
 func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error {
@@ -347,30 +445,110 @@
 		Timeout:         ca.conf.Admin.Timeout,
 	}
 
+	return ca.retryOnError(isErrNoController, func() error {
+		b, err := ca.Controller()
+		if err != nil {
+			return err
+		}
+
+		rsp, err := b.CreatePartitions(request)
+		if err != nil {
+			return err
+		}
+
+		topicErr, ok := rsp.TopicPartitionErrors[topic]
+		if !ok {
+			return ErrIncompleteResponse
+		}
+
+		if topicErr.Err != ErrNoError {
+			if topicErr.Err == ErrNotController {
+				_, _ = ca.refreshController()
+			}
+			return topicErr
+		}
+
+		return nil
+	})
+}
+
+func (ca *clusterAdmin) AlterPartitionReassignments(topic string, assignment [][]int32) error {
+	if topic == "" {
+		return ErrInvalidTopic
+	}
+
+	request := &AlterPartitionReassignmentsRequest{
+		TimeoutMs: int32(60000),
+		Version:   int16(0),
+	}
+
+	for i := 0; i < len(assignment); i++ {
+		request.AddBlock(topic, int32(i), assignment[i])
+	}
+
+	return ca.retryOnError(isErrNoController, func() error {
+		b, err := ca.Controller()
+		if err != nil {
+			return err
+		}
+
+		errs := make([]error, 0)
+
+		rsp, err := b.AlterPartitionReassignments(request)
+
+		if err != nil {
+			errs = append(errs, err)
+		} else {
+			if rsp.ErrorCode > 0 {
+				errs = append(errs, errors.New(rsp.ErrorCode.Error()))
+			}
+
+			for topic, topicErrors := range rsp.Errors {
+				for partition, partitionError := range topicErrors {
+					if partitionError.errorCode != ErrNoError {
+						errStr := fmt.Sprintf("[%s-%d]: %s", topic, partition, partitionError.errorCode.Error())
+						errs = append(errs, errors.New(errStr))
+					}
+				}
+			}
+		}
+
+		if len(errs) > 0 {
+			return ErrReassignPartitions{MultiError{&errs}}
+		}
+
+		return nil
+	})
+}
+
+func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error) {
+	if topic == "" {
+		return nil, ErrInvalidTopic
+	}
+
+	request := &ListPartitionReassignmentsRequest{
+		TimeoutMs: int32(60000),
+		Version:   int16(0),
+	}
+
+	request.AddBlock(topic, partitions)
+
 	b, err := ca.Controller()
 	if err != nil {
-		return err
+		return nil, err
 	}
+	_ = b.Open(ca.client.Config())
 
-	rsp, err := b.CreatePartitions(request)
-	if err != nil {
-		return err
+	rsp, err := b.ListPartitionReassignments(request)
+
+	if err == nil && rsp != nil {
+		return rsp.TopicStatus, nil
+	} else {
+		return nil, err
 	}
-
-	topicErr, ok := rsp.TopicPartitionErrors[topic]
-	if !ok {
-		return ErrIncompleteResponse
-	}
-
-	if topicErr.Err != ErrNoError {
-		return topicErr
-	}
-
-	return nil
 }
 
 func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]int64) error {
-
 	if topic == "" {
 		return ErrInvalidTopic
 	}
@@ -380,11 +558,7 @@
 		if err != nil {
 			return err
 		}
-		if _, ok := partitionPerBroker[broker]; ok {
-			partitionPerBroker[broker] = append(partitionPerBroker[broker], partition)
-		} else {
-			partitionPerBroker[broker] = []int32{partition}
-		}
+		partitionPerBroker[broker] = append(partitionPerBroker[broker], partition)
 	}
 	errs := make([]error, 0)
 	for broker, partitions := range partitionPerBroker {
@@ -418,13 +592,19 @@
 	if len(errs) > 0 {
 		return ErrDeleteRecords{MultiError{&errs}}
 	}
-	//todo since we are dealing with couple of partitions it would be good if we return slice of errors
-	//for each partition instead of one error
+	// todo since we are dealing with couple of partitions it would be good if we return slice of errors
+	// for each partition instead of one error
 	return nil
 }
 
-func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) {
+// Returns a bool indicating whether the resource request needs to go to a
+// specific broker
+func dependsOnSpecificNode(resource ConfigResource) bool {
+	return (resource.Type == BrokerResource && resource.Name != "") ||
+		resource.Type == BrokerLoggerResource
+}
 
+func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) {
 	var entries []ConfigEntry
 	var resources []*ConfigResource
 	resources = append(resources, &resource)
@@ -433,11 +613,35 @@
 		Resources: resources,
 	}
 
-	b, err := ca.Controller()
+	if ca.conf.Version.IsAtLeast(V1_1_0_0) {
+		request.Version = 1
+	}
+
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 2
+	}
+
+	var (
+		b   *Broker
+		err error
+	)
+
+	// DescribeConfig of broker/broker logger must be sent to the broker in question
+	if dependsOnSpecificNode(resource) {
+		var id int64
+		id, err = strconv.ParseInt(resource.Name, 10, 32)
+		if err != nil {
+			return nil, err
+		}
+		b, err = ca.findBroker(int32(id))
+	} else {
+		b, err = ca.findAnyBroker()
+	}
 	if err != nil {
 		return nil, err
 	}
 
+	_ = b.Open(ca.client.Config())
 	rsp, err := b.DescribeConfigs(request)
 	if err != nil {
 		return nil, err
@@ -448,6 +652,9 @@
 			if rspResource.ErrorMsg != "" {
 				return nil, errors.New(rspResource.ErrorMsg)
 			}
+			if rspResource.ErrorCode != 0 {
+				return nil, KError(rspResource.ErrorCode)
+			}
 			for _, cfgEntry := range rspResource.Configs {
 				entries = append(entries, *cfgEntry)
 			}
@@ -457,7 +664,6 @@
 }
 
 func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error {
-
 	var resources []*AlterConfigsResource
 	resources = append(resources, &AlterConfigsResource{
 		Type:          resourceType,
@@ -470,11 +676,27 @@
 		ValidateOnly: validateOnly,
 	}
 
-	b, err := ca.Controller()
+	var (
+		b   *Broker
+		err error
+	)
+
+	// AlterConfig of broker/broker logger must be sent to the broker in question
+	if dependsOnSpecificNode(ConfigResource{Name: name, Type: resourceType}) {
+		var id int64
+		id, err = strconv.ParseInt(name, 10, 32)
+		if err != nil {
+			return err
+		}
+		b, err = ca.findBroker(int32(id))
+	} else {
+		b, err = ca.findAnyBroker()
+	}
 	if err != nil {
 		return err
 	}
 
+	_ = b.Open(ca.client.Config())
 	rsp, err := b.AlterConfigs(request)
 	if err != nil {
 		return err
@@ -485,6 +707,9 @@
 			if rspResource.ErrorMsg != "" {
 				return errors.New(rspResource.ErrorMsg)
 			}
+			if rspResource.ErrorCode != 0 {
+				return KError(rspResource.ErrorCode)
+			}
 		}
 	}
 	return nil
@@ -495,6 +720,10 @@
 	acls = append(acls, &AclCreation{resource, acl})
 	request := &CreateAclsRequest{AclCreations: acls}
 
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 1
+	}
+
 	b, err := ca.Controller()
 	if err != nil {
 		return err
@@ -505,9 +734,12 @@
 }
 
 func (ca *clusterAdmin) ListAcls(filter AclFilter) ([]ResourceAcls, error) {
-
 	request := &DescribeAclsRequest{AclFilter: filter}
 
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 1
+	}
+
 	b, err := ca.Controller()
 	if err != nil {
 		return nil, err
@@ -530,6 +762,10 @@
 	filters = append(filters, &filter)
 	request := &DeleteAclsRequest{Filters: filters}
 
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 1
+	}
+
 	b, err := ca.Controller()
 	if err != nil {
 		return nil, err
@@ -545,7 +781,6 @@
 		for _, mACL := range fr.MatchingAcls {
 			mAcls = append(mAcls, *mACL)
 		}
-
 	}
 	return mAcls, nil
 }
@@ -559,7 +794,6 @@
 			return nil, err
 		}
 		groupsPerBroker[controller] = append(groupsPerBroker[controller], group)
-
 	}
 
 	for broker, brokerGroups := range groupsPerBroker {
@@ -581,7 +815,7 @@
 	// Query brokers in parallel, since we have to query *all* brokers
 	brokers := ca.client.Brokers()
 	groupMaps := make(chan map[string]string, len(brokers))
-	errors := make(chan error, len(brokers))
+	errChan := make(chan error, len(brokers))
 	wg := sync.WaitGroup{}
 
 	for _, b := range brokers {
@@ -592,7 +826,7 @@
 
 			response, err := b.ListGroups(&ListGroupsRequest{})
 			if err != nil {
-				errors <- err
+				errChan <- err
 				return
 			}
 
@@ -602,13 +836,12 @@
 			}
 
 			groupMaps <- groups
-
 		}(b, ca.conf)
 	}
 
 	wg.Wait()
 	close(groupMaps)
-	close(errors)
+	close(errChan)
 
 	for groupMap := range groupMaps {
 		for group, protocolType := range groupMap {
@@ -617,7 +850,7 @@
 	}
 
 	// Intentionally return only the first error for simplicity
-	err = <-errors
+	err = <-errChan
 	return
 }
 
@@ -667,3 +900,106 @@
 
 	return nil
 }
+
+func (ca *clusterAdmin) DescribeLogDirs(brokerIds []int32) (allLogDirs map[int32][]DescribeLogDirsResponseDirMetadata, err error) {
+	allLogDirs = make(map[int32][]DescribeLogDirsResponseDirMetadata)
+
+	// Query brokers in parallel, since we may have to query multiple brokers
+	logDirsMaps := make(chan map[int32][]DescribeLogDirsResponseDirMetadata, len(brokerIds))
+	errChan := make(chan error, len(brokerIds))
+	wg := sync.WaitGroup{}
+
+	for _, b := range brokerIds {
+		wg.Add(1)
+		broker, err := ca.findBroker(b)
+		if err != nil {
+			Logger.Printf("Unable to find broker with ID = %v\n", b)
+			continue
+		}
+		go func(b *Broker, conf *Config) {
+			defer wg.Done()
+			_ = b.Open(conf) // Ensure that broker is opened
+
+			response, err := b.DescribeLogDirs(&DescribeLogDirsRequest{})
+			if err != nil {
+				errChan <- err
+				return
+			}
+			logDirs := make(map[int32][]DescribeLogDirsResponseDirMetadata)
+			logDirs[b.ID()] = response.LogDirs
+			logDirsMaps <- logDirs
+		}(broker, ca.conf)
+	}
+
+	wg.Wait()
+	close(logDirsMaps)
+	close(errChan)
+
+	for logDirsMap := range logDirsMaps {
+		for id, logDirs := range logDirsMap {
+			allLogDirs[id] = logDirs
+		}
+	}
+
+	// Intentionally return only the first error for simplicity
+	err = <-errChan
+	return
+}
+
+func (ca *clusterAdmin) DescribeUserScramCredentials(users []string) ([]*DescribeUserScramCredentialsResult, error) {
+	req := &DescribeUserScramCredentialsRequest{}
+	for _, u := range users {
+		req.DescribeUsers = append(req.DescribeUsers, DescribeUserScramCredentialsRequestUser{
+			Name: u,
+		})
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return nil, err
+	}
+
+	rsp, err := b.DescribeUserScramCredentials(req)
+	if err != nil {
+		return nil, err
+	}
+
+	return rsp.Results, nil
+}
+
+func (ca *clusterAdmin) UpsertUserScramCredentials(upsert []AlterUserScramCredentialsUpsert) ([]*AlterUserScramCredentialsResult, error) {
+	res, err := ca.AlterUserScramCredentials(upsert, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	return res, nil
+}
+
+func (ca *clusterAdmin) DeleteUserScramCredentials(delete []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error) {
+	res, err := ca.AlterUserScramCredentials(nil, delete)
+	if err != nil {
+		return nil, err
+	}
+
+	return res, nil
+}
+
+func (ca *clusterAdmin) AlterUserScramCredentials(u []AlterUserScramCredentialsUpsert, d []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error) {
+	req := &AlterUserScramCredentialsRequest{
+		Deletions:  d,
+		Upsertions: u,
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return nil, err
+	}
+
+	rsp, err := b.AlterUserScramCredentials(req)
+	if err != nil {
+		return nil, err
+	}
+
+	return rsp.Results, nil
+}
diff --git a/vendor/github.com/Shopify/sarama/alter_configs_request.go b/vendor/github.com/Shopify/sarama/alter_configs_request.go
index 26c275b..8b94b1f 100644
--- a/vendor/github.com/Shopify/sarama/alter_configs_request.go
+++ b/vendor/github.com/Shopify/sarama/alter_configs_request.go
@@ -1,12 +1,12 @@
 package sarama
 
-//AlterConfigsRequest is an alter config request type
+// AlterConfigsRequest is an alter config request type
 type AlterConfigsRequest struct {
 	Resources    []*AlterConfigsResource
 	ValidateOnly bool
 }
 
-//AlterConfigsResource is an alter config resource type
+// AlterConfigsResource is an alter config resource type
 type AlterConfigsResource struct {
 	Type          ConfigResourceType
 	Name          string
@@ -117,6 +117,10 @@
 	return 0
 }
 
+func (a *AlterConfigsRequest) headerVersion() int16 {
+	return 1
+}
+
 func (a *AlterConfigsRequest) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/alter_configs_response.go b/vendor/github.com/Shopify/sarama/alter_configs_response.go
index 3893663..cfb6369 100644
--- a/vendor/github.com/Shopify/sarama/alter_configs_response.go
+++ b/vendor/github.com/Shopify/sarama/alter_configs_response.go
@@ -2,13 +2,13 @@
 
 import "time"
 
-//AlterConfigsResponse is a reponse type for alter config
+// AlterConfigsResponse is a response type for alter config
 type AlterConfigsResponse struct {
 	ThrottleTime time.Duration
 	Resources    []*AlterConfigsResourceResponse
 }
 
-//AlterConfigsResourceResponse is a reponse type for alter config resource
+// AlterConfigsResourceResponse is a response type for alter config resource
 type AlterConfigsResourceResponse struct {
 	ErrorCode int16
 	ErrorMsg  string
@@ -23,16 +23,9 @@
 		return err
 	}
 
-	for i := range a.Resources {
-		pe.putInt16(a.Resources[i].ErrorCode)
-		err := pe.putString(a.Resources[i].ErrorMsg)
-		if err != nil {
-			return nil
-		}
-		pe.putInt8(int8(a.Resources[i].Type))
-		err = pe.putString(a.Resources[i].Name)
-		if err != nil {
-			return nil
+	for _, v := range a.Resources {
+		if err := v.encode(pe); err != nil {
+			return err
 		}
 	}
 
@@ -56,34 +49,56 @@
 	for i := range a.Resources {
 		a.Resources[i] = new(AlterConfigsResourceResponse)
 
-		errCode, err := pd.getInt16()
-		if err != nil {
+		if err := a.Resources[i].decode(pd, version); err != nil {
 			return err
 		}
-		a.Resources[i].ErrorCode = errCode
-
-		e, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		a.Resources[i].ErrorMsg = e
-
-		t, err := pd.getInt8()
-		if err != nil {
-			return err
-		}
-		a.Resources[i].Type = ConfigResourceType(t)
-
-		name, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		a.Resources[i].Name = name
 	}
 
 	return nil
 }
 
+func (a *AlterConfigsResourceResponse) encode(pe packetEncoder) error {
+	pe.putInt16(a.ErrorCode)
+	err := pe.putString(a.ErrorMsg)
+	if err != nil {
+		return nil
+	}
+	pe.putInt8(int8(a.Type))
+	err = pe.putString(a.Name)
+	if err != nil {
+		return nil
+	}
+	return nil
+}
+
+func (a *AlterConfigsResourceResponse) decode(pd packetDecoder, version int16) error {
+	errCode, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	a.ErrorCode = errCode
+
+	e, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	a.ErrorMsg = e
+
+	t, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.Type = ConfigResourceType(t)
+
+	name, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	a.Name = name
+
+	return nil
+}
+
 func (a *AlterConfigsResponse) key() int16 {
 	return 32
 }
@@ -92,6 +107,10 @@
 	return 0
 }
 
+func (a *AlterConfigsResponse) headerVersion() int16 {
+	return 0
+}
+
 func (a *AlterConfigsResponse) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go
new file mode 100644
index 0000000..f0a2f9d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go
@@ -0,0 +1,130 @@
+package sarama
+
+type alterPartitionReassignmentsBlock struct {
+	replicas []int32
+}
+
+func (b *alterPartitionReassignmentsBlock) encode(pe packetEncoder) error {
+	if err := pe.putNullableCompactInt32Array(b.replicas); err != nil {
+		return err
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (b *alterPartitionReassignmentsBlock) decode(pd packetDecoder) (err error) {
+	if b.replicas, err = pd.getCompactInt32Array(); err != nil {
+		return err
+	}
+	return nil
+}
+
+type AlterPartitionReassignmentsRequest struct {
+	TimeoutMs int32
+	blocks    map[string]map[int32]*alterPartitionReassignmentsBlock
+	Version   int16
+}
+
+func (r *AlterPartitionReassignmentsRequest) encode(pe packetEncoder) error {
+	pe.putInt32(r.TimeoutMs)
+
+	pe.putCompactArrayLength(len(r.blocks))
+
+	for topic, partitions := range r.blocks {
+		if err := pe.putCompactString(topic); err != nil {
+			return err
+		}
+		pe.putCompactArrayLength(len(partitions))
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+			if err := block.encode(pe); err != nil {
+				return err
+			}
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+
+	return nil
+}
+
+func (r *AlterPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.TimeoutMs, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	topicCount, err := pd.getCompactArrayLength()
+	if err != nil {
+		return err
+	}
+	if topicCount > 0 {
+		r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock)
+		for i := 0; i < topicCount; i++ {
+			topic, err := pd.getCompactString()
+			if err != nil {
+				return err
+			}
+			partitionCount, err := pd.getCompactArrayLength()
+			if err != nil {
+				return err
+			}
+			r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock)
+			for j := 0; j < partitionCount; j++ {
+				partition, err := pd.getInt32()
+				if err != nil {
+					return err
+				}
+				block := &alterPartitionReassignmentsBlock{}
+				if err := block.decode(pd); err != nil {
+					return err
+				}
+				r.blocks[topic][partition] = block
+
+				if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+					return err
+				}
+			}
+			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+		return err
+	}
+
+	return
+}
+
+func (r *AlterPartitionReassignmentsRequest) key() int16 {
+	return 45
+}
+
+func (r *AlterPartitionReassignmentsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *AlterPartitionReassignmentsRequest) headerVersion() int16 {
+	return 2
+}
+
+func (r *AlterPartitionReassignmentsRequest) requiredVersion() KafkaVersion {
+	return V2_4_0_0
+}
+
+func (r *AlterPartitionReassignmentsRequest) AddBlock(topic string, partitionID int32, replicas []int32) {
+	if r.blocks == nil {
+		r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock)
+	}
+
+	if r.blocks[topic] == nil {
+		r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock)
+	}
+
+	r.blocks[topic][partitionID] = &alterPartitionReassignmentsBlock{replicas}
+}
diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go
new file mode 100644
index 0000000..b3f9a15
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go
@@ -0,0 +1,157 @@
+package sarama
+
+type alterPartitionReassignmentsErrorBlock struct {
+	errorCode    KError
+	errorMessage *string
+}
+
+func (b *alterPartitionReassignmentsErrorBlock) encode(pe packetEncoder) error {
+	pe.putInt16(int16(b.errorCode))
+	if err := pe.putNullableCompactString(b.errorMessage); err != nil {
+		return err
+	}
+	pe.putEmptyTaggedFieldArray()
+
+	return nil
+}
+
+func (b *alterPartitionReassignmentsErrorBlock) decode(pd packetDecoder) (err error) {
+	errorCode, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	b.errorCode = KError(errorCode)
+	b.errorMessage, err = pd.getCompactNullableString()
+
+	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+		return err
+	}
+	return err
+}
+
+type AlterPartitionReassignmentsResponse struct {
+	Version        int16
+	ThrottleTimeMs int32
+	ErrorCode      KError
+	ErrorMessage   *string
+	Errors         map[string]map[int32]*alterPartitionReassignmentsErrorBlock
+}
+
+func (r *AlterPartitionReassignmentsResponse) AddError(topic string, partition int32, kerror KError, message *string) {
+	if r.Errors == nil {
+		r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock)
+	}
+	partitions := r.Errors[topic]
+	if partitions == nil {
+		partitions = make(map[int32]*alterPartitionReassignmentsErrorBlock)
+		r.Errors[topic] = partitions
+	}
+
+	partitions[partition] = &alterPartitionReassignmentsErrorBlock{errorCode: kerror, errorMessage: message}
+}
+
+func (r *AlterPartitionReassignmentsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(r.ThrottleTimeMs)
+	pe.putInt16(int16(r.ErrorCode))
+	if err := pe.putNullableCompactString(r.ErrorMessage); err != nil {
+		return err
+	}
+
+	pe.putCompactArrayLength(len(r.Errors))
+	for topic, partitions := range r.Errors {
+		if err := pe.putCompactString(topic); err != nil {
+			return err
+		}
+		pe.putCompactArrayLength(len(partitions))
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+
+			if err := block.encode(pe); err != nil {
+				return err
+			}
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *AlterPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	r.ErrorCode = KError(kerr)
+
+	if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil {
+		return err
+	}
+
+	numTopics, err := pd.getCompactArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if numTopics > 0 {
+		r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock, numTopics)
+		for i := 0; i < numTopics; i++ {
+			topic, err := pd.getCompactString()
+			if err != nil {
+				return err
+			}
+
+			ongoingPartitionReassignments, err := pd.getCompactArrayLength()
+			if err != nil {
+				return err
+			}
+
+			r.Errors[topic] = make(map[int32]*alterPartitionReassignmentsErrorBlock, ongoingPartitionReassignments)
+
+			for j := 0; j < ongoingPartitionReassignments; j++ {
+				partition, err := pd.getInt32()
+				if err != nil {
+					return err
+				}
+				block := &alterPartitionReassignmentsErrorBlock{}
+				if err := block.decode(pd); err != nil {
+					return err
+				}
+
+				r.Errors[topic][partition] = block
+			}
+			if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *AlterPartitionReassignmentsResponse) key() int16 {
+	return 45
+}
+
+func (r *AlterPartitionReassignmentsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *AlterPartitionReassignmentsResponse) headerVersion() int16 {
+	return 1
+}
+
+func (r *AlterPartitionReassignmentsResponse) requiredVersion() KafkaVersion {
+	return V2_4_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go b/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go
new file mode 100644
index 0000000..0530d89
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go
@@ -0,0 +1,142 @@
+package sarama
+
+type AlterUserScramCredentialsRequest struct {
+	Version int16
+
+	// Deletions represent list of SCRAM credentials to remove
+	Deletions []AlterUserScramCredentialsDelete
+
+	// Upsertions represent list of SCRAM credentials to update/insert
+	Upsertions []AlterUserScramCredentialsUpsert
+}
+
+type AlterUserScramCredentialsDelete struct {
+	Name      string
+	Mechanism ScramMechanismType
+}
+
+type AlterUserScramCredentialsUpsert struct {
+	Name           string
+	Mechanism      ScramMechanismType
+	Iterations     int32
+	Salt           []byte
+	saltedPassword []byte
+
+	// This field is never transmitted over the wire
+	// @see: https://tools.ietf.org/html/rfc5802
+	Password []byte
+}
+
+func (r *AlterUserScramCredentialsRequest) encode(pe packetEncoder) error {
+	pe.putCompactArrayLength(len(r.Deletions))
+	for _, d := range r.Deletions {
+		if err := pe.putCompactString(d.Name); err != nil {
+			return err
+		}
+		pe.putInt8(int8(d.Mechanism))
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putCompactArrayLength(len(r.Upsertions))
+	for _, u := range r.Upsertions {
+		if err := pe.putCompactString(u.Name); err != nil {
+			return err
+		}
+		pe.putInt8(int8(u.Mechanism))
+		pe.putInt32(u.Iterations)
+
+		if err := pe.putCompactBytes(u.Salt); err != nil {
+			return err
+		}
+
+		// do not transmit the password over the wire
+		formatter := scramFormatter{mechanism: u.Mechanism}
+		salted, err := formatter.saltedPassword(u.Password, u.Salt, int(u.Iterations))
+		if err != nil {
+			return err
+		}
+
+		if err := pe.putCompactBytes(salted); err != nil {
+			return err
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *AlterUserScramCredentialsRequest) decode(pd packetDecoder, version int16) error {
+	numDeletions, err := pd.getCompactArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Deletions = make([]AlterUserScramCredentialsDelete, numDeletions)
+	for i := 0; i < numDeletions; i++ {
+		r.Deletions[i] = AlterUserScramCredentialsDelete{}
+		if r.Deletions[i].Name, err = pd.getCompactString(); err != nil {
+			return err
+		}
+		mechanism, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+		r.Deletions[i].Mechanism = ScramMechanismType(mechanism)
+		if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
+	numUpsertions, err := pd.getCompactArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Upsertions = make([]AlterUserScramCredentialsUpsert, numUpsertions)
+	for i := 0; i < numUpsertions; i++ {
+		r.Upsertions[i] = AlterUserScramCredentialsUpsert{}
+		if r.Upsertions[i].Name, err = pd.getCompactString(); err != nil {
+			return err
+		}
+		mechanism, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+
+		r.Upsertions[i].Mechanism = ScramMechanismType(mechanism)
+		if r.Upsertions[i].Iterations, err = pd.getInt32(); err != nil {
+			return err
+		}
+		if r.Upsertions[i].Salt, err = pd.getCompactBytes(); err != nil {
+			return err
+		}
+		if r.Upsertions[i].saltedPassword, err = pd.getCompactBytes(); err != nil {
+			return err
+		}
+		if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
+	if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (r *AlterUserScramCredentialsRequest) key() int16 {
+	return 51
+}
+
+func (r *AlterUserScramCredentialsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *AlterUserScramCredentialsRequest) headerVersion() int16 {
+	return 2
+}
+
+func (r *AlterUserScramCredentialsRequest) requiredVersion() KafkaVersion {
+	return V2_7_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go b/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go
new file mode 100644
index 0000000..31e167b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go
@@ -0,0 +1,94 @@
+package sarama
+
+import "time"
+
+type AlterUserScramCredentialsResponse struct {
+	Version int16
+
+	ThrottleTime time.Duration
+
+	Results []*AlterUserScramCredentialsResult
+}
+
+type AlterUserScramCredentialsResult struct {
+	User string
+
+	ErrorCode    KError
+	ErrorMessage *string
+}
+
+func (r *AlterUserScramCredentialsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+	pe.putCompactArrayLength(len(r.Results))
+
+	for _, u := range r.Results {
+		if err := pe.putCompactString(u.User); err != nil {
+			return err
+		}
+		pe.putInt16(int16(u.ErrorCode))
+		if err := pe.putNullableCompactString(u.ErrorMessage); err != nil {
+			return err
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *AlterUserScramCredentialsResponse) decode(pd packetDecoder, version int16) error {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	numResults, err := pd.getCompactArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if numResults > 0 {
+		r.Results = make([]*AlterUserScramCredentialsResult, numResults)
+		for i := 0; i < numResults; i++ {
+			r.Results[i] = &AlterUserScramCredentialsResult{}
+			if r.Results[i].User, err = pd.getCompactString(); err != nil {
+				return err
+			}
+
+			kerr, err := pd.getInt16()
+			if err != nil {
+				return err
+			}
+
+			r.Results[i].ErrorCode = KError(kerr)
+			if r.Results[i].ErrorMessage, err = pd.getCompactNullableString(); err != nil {
+				return err
+			}
+			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (r *AlterUserScramCredentialsResponse) key() int16 {
+	return 51
+}
+
+func (r *AlterUserScramCredentialsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *AlterUserScramCredentialsResponse) headerVersion() int16 {
+	return 2
+}
+
+func (r *AlterUserScramCredentialsResponse) requiredVersion() KafkaVersion {
+	return V2_7_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go
index b33167c..bee92c0 100644
--- a/vendor/github.com/Shopify/sarama/api_versions_request.go
+++ b/vendor/github.com/Shopify/sarama/api_versions_request.go
@@ -1,8 +1,7 @@
 package sarama
 
-//ApiVersionsRequest ...
-type ApiVersionsRequest struct {
-}
+// ApiVersionsRequest ...
+type ApiVersionsRequest struct{}
 
 func (a *ApiVersionsRequest) encode(pe packetEncoder) error {
 	return nil
@@ -20,6 +19,10 @@
 	return 0
 }
 
+func (a *ApiVersionsRequest) headerVersion() int16 {
+	return 1
+}
+
 func (a *ApiVersionsRequest) requiredVersion() KafkaVersion {
 	return V0_10_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go
index bb1f0b3..0e72e39 100644
--- a/vendor/github.com/Shopify/sarama/api_versions_response.go
+++ b/vendor/github.com/Shopify/sarama/api_versions_response.go
@@ -1,6 +1,6 @@
 package sarama
 
-//ApiVersionsResponseBlock is an api version reponse block type
+// ApiVersionsResponseBlock is an api version response block type
 type ApiVersionsResponseBlock struct {
 	ApiKey     int16
 	MinVersion int16
@@ -32,7 +32,7 @@
 	return nil
 }
 
-//ApiVersionsResponse is an api version response type
+// ApiVersionsResponse is an api version response type
 type ApiVersionsResponse struct {
 	Err         KError
 	ApiVersions []*ApiVersionsResponseBlock
@@ -84,6 +84,10 @@
 	return 0
 }
 
+func (a *ApiVersionsResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *ApiVersionsResponse) requiredVersion() KafkaVersion {
 	return V0_10_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go
index 11e0849..5911f7b 100644
--- a/vendor/github.com/Shopify/sarama/async_producer.go
+++ b/vendor/github.com/Shopify/sarama/async_producer.go
@@ -60,13 +60,28 @@
 	noProducerEpoch = -1
 )
 
-func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) int32 {
+func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) (int32, int16) {
 	key := fmt.Sprintf("%s-%d", topic, partition)
 	t.mutex.Lock()
 	defer t.mutex.Unlock()
 	sequence := t.sequenceNumbers[key]
 	t.sequenceNumbers[key] = sequence + 1
-	return sequence
+	return sequence, t.producerEpoch
+}
+
+func (t *transactionManager) bumpEpoch() {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	t.producerEpoch++
+	for k := range t.sequenceNumbers {
+		t.sequenceNumbers[k] = 0
+	}
+}
+
+func (t *transactionManager) getProducerID() (int64, int16) {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	return t.producerID, t.producerEpoch
 }
 
 func newTransactionManager(conf *Config, client Client) (*transactionManager, error) {
@@ -208,6 +223,8 @@
 	flags          flagSet
 	expectation    chan *ProducerError
 	sequenceNumber int32
+	producerEpoch  int16
+	hasSequence    bool
 }
 
 const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
@@ -234,6 +251,9 @@
 func (m *ProducerMessage) clear() {
 	m.flags = 0
 	m.retries = 0
+	m.sequenceNumber = 0
+	m.producerEpoch = 0
+	m.hasSequence = false
 }
 
 // ProducerError is the type of error generated when the producer fails to deliver a message.
@@ -247,6 +267,10 @@
 	return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
 }
 
+func (pe ProducerError) Unwrap() error {
+	return pe.Err
+}
+
 // ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
 // It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
 // when closing a producer.
@@ -328,6 +352,10 @@
 			p.inFlight.Add(1)
 		}
 
+		for _, interceptor := range p.conf.Producer.Interceptors {
+			msg.safelyApplyInterceptor(interceptor)
+		}
+
 		version := 1
 		if p.conf.Version.IsAtLeast(V0_11_0_0) {
 			version = 2
@@ -388,10 +416,6 @@
 				continue
 			}
 		}
-		// All messages being retried (sent or not) have already had their retry count updated
-		if tp.parent.conf.Producer.Idempotent && msg.retries == 0 {
-			msg.sequenceNumber = tp.parent.txnmgr.getAndIncrementSequenceNumber(msg.Topic, msg.Partition)
-		}
 
 		handler := tp.handlers[msg.Partition]
 		if handler == nil {
@@ -411,7 +435,7 @@
 	var partitions []int32
 
 	err := tp.breaker.Run(func() (err error) {
-		var requiresConsistency = false
+		requiresConsistency := false
 		if ep, ok := tp.partitioner.(DynamicConsistencyPartitioner); ok {
 			requiresConsistency = ep.MessageRequiresConsistency(msg)
 		} else {
@@ -425,7 +449,6 @@
 		}
 		return
 	})
-
 	if err != nil {
 		return err
 	}
@@ -520,7 +543,6 @@
 	}()
 
 	for msg := range pp.input {
-
 		if pp.brokerProducer != nil && pp.brokerProducer.abandoned != nil {
 			select {
 			case <-pp.brokerProducer.abandoned:
@@ -571,6 +593,15 @@
 			Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
 		}
 
+		// Now that we know we have a broker to actually try and send this message to, generate the sequence
+		// number for it.
+		// All messages being retried (sent or not) have already had their retry count updated
+		// Also, ignore "special" syn/fin messages used to sync the brokerProducer and the topicProducer.
+		if pp.parent.conf.Producer.Idempotent && msg.retries == 0 && msg.flags == 0 {
+			msg.sequenceNumber, msg.producerEpoch = pp.parent.txnmgr.getAndIncrementSequenceNumber(msg.Topic, msg.Partition)
+			msg.hasSequence = true
+		}
+
 		pp.brokerProducer.input <- msg
 	}
 }
@@ -652,6 +683,7 @@
 		input:          input,
 		output:         bridge,
 		responses:      responses,
+		stopchan:       make(chan struct{}),
 		buffer:         newProduceSet(p),
 		currentRetries: make(map[string]map[int32]error),
 	}
@@ -696,6 +728,7 @@
 	output    chan<- *produceSet
 	responses <-chan *brokerProducerResponse
 	abandoned chan struct{}
+	stopchan  chan struct{}
 
 	buffer     *produceSet
 	timer      <-chan time.Time
@@ -711,12 +744,17 @@
 
 	for {
 		select {
-		case msg := <-bp.input:
-			if msg == nil {
+		case msg, ok := <-bp.input:
+			if !ok {
+				Logger.Printf("producer/broker/%d input chan closed\n", bp.broker.ID())
 				bp.shutdown()
 				return
 			}
 
+			if msg == nil {
+				continue
+			}
+
 			if msg.flags&syn == syn {
 				Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
 					bp.broker.ID(), msg.Topic, msg.Partition)
@@ -742,12 +780,21 @@
 			}
 
 			if bp.buffer.wouldOverflow(msg) {
-				if err := bp.waitForSpace(msg); err != nil {
+				Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
+				if err := bp.waitForSpace(msg, false); err != nil {
 					bp.parent.retryMessage(msg, err)
 					continue
 				}
 			}
 
+			if bp.parent.txnmgr.producerID != noProducerID && bp.buffer.producerEpoch != msg.producerEpoch {
+				// The epoch was reset, need to roll the buffer over
+				Logger.Printf("producer/broker/%d detected epoch rollover, waiting for new buffer\n", bp.broker.ID())
+				if err := bp.waitForSpace(msg, true); err != nil {
+					bp.parent.retryMessage(msg, err)
+					continue
+				}
+			}
 			if err := bp.buffer.add(msg); err != nil {
 				bp.parent.returnError(msg, err)
 				continue
@@ -760,8 +807,14 @@
 			bp.timerFired = true
 		case output <- bp.buffer:
 			bp.rollOver()
-		case response := <-bp.responses:
-			bp.handleResponse(response)
+		case response, ok := <-bp.responses:
+			if ok {
+				bp.handleResponse(response)
+			}
+		case <-bp.stopchan:
+			Logger.Printf(
+				"producer/broker/%d run loop asked to stop\n", bp.broker.ID())
+			return
 		}
 
 		if bp.timerFired || bp.buffer.readyToFlush() {
@@ -785,7 +838,7 @@
 	for response := range bp.responses {
 		bp.handleResponse(response)
 	}
-
+	close(bp.stopchan)
 	Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
 }
 
@@ -797,9 +850,7 @@
 	return bp.currentRetries[msg.Topic][msg.Partition]
 }
 
-func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error {
-	Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
-
+func (bp *brokerProducer) waitForSpace(msg *ProducerMessage, forceRollover bool) error {
 	for {
 		select {
 		case response := <-bp.responses:
@@ -807,7 +858,7 @@
 			// handling a response can change our state, so re-check some things
 			if reason := bp.needsRetry(msg); reason != nil {
 				return reason
-			} else if !bp.buffer.wouldOverflow(msg) {
+			} else if !bp.buffer.wouldOverflow(msg) && !forceRollover {
 				return nil
 			}
 		case bp.output <- bp.buffer:
@@ -1018,6 +1069,12 @@
 }
 
 func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
+	// We need to reset the producer ID epoch if we set a sequence number on it, because the broker
+	// will never see a message with this number, so we can never continue the sequence.
+	if msg.hasSequence {
+		Logger.Printf("producer/txnmanager rolling over epoch due to publish failure on %s/%d", msg.Topic, msg.Partition)
+		p.txnmgr.bumpEpoch()
+	}
 	msg.clear()
 	pErr := &ProducerError{Msg: msg, Err: err}
 	if p.conf.Producer.Return.Errors {
diff --git a/vendor/github.com/Shopify/sarama/balance_strategy.go b/vendor/github.com/Shopify/sarama/balance_strategy.go
index 2fce17f..9855bf4 100644
--- a/vendor/github.com/Shopify/sarama/balance_strategy.go
+++ b/vendor/github.com/Shopify/sarama/balance_strategy.go
@@ -1,8 +1,25 @@
 package sarama
 
 import (
+	"container/heap"
+	"errors"
+	"fmt"
 	"math"
 	"sort"
+	"strings"
+)
+
+const (
+	// RangeBalanceStrategyName identifies strategies that use the range partition assignment strategy
+	RangeBalanceStrategyName = "range"
+
+	// RoundRobinBalanceStrategyName identifies strategies that use the round-robin partition assignment strategy
+	RoundRobinBalanceStrategyName = "roundrobin"
+
+	// StickyBalanceStrategyName identifies strategies that use the sticky-partition assignment strategy
+	StickyBalanceStrategyName = "sticky"
+
+	defaultGeneration = -1
 )
 
 // BalanceStrategyPlan is the results of any BalanceStrategy.Plan attempt.
@@ -32,6 +49,10 @@
 	// Plan accepts a map of `memberID -> metadata` and a map of `topic -> partitions`
 	// and returns a distribution plan.
 	Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error)
+
+	// AssignmentData returns the serialized assignment data for the specified
+	// memberID
+	AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error)
 }
 
 // --------------------------------------------------------------------
@@ -41,7 +62,7 @@
 //   M1: {T: [0, 1, 2]}
 //   M2: {T: [3, 4, 5]}
 var BalanceStrategyRange = &balanceStrategy{
-	name: "range",
+	name: RangeBalanceStrategyName,
 	coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) {
 		step := float64(len(partitions)) / float64(len(memberIDs))
 
@@ -54,19 +75,18 @@
 	},
 }
 
-// BalanceStrategyRoundRobin assigns partitions to members in alternating order.
+// BalanceStrategySticky assigns partitions to members with an attempt to preserve earlier assignments
+// while maintain a balanced partition distribution.
 // Example with topic T with six partitions (0..5) and two members (M1, M2):
 //   M1: {T: [0, 2, 4]}
 //   M2: {T: [1, 3, 5]}
-var BalanceStrategyRoundRobin = &balanceStrategy{
-	name: "roundrobin",
-	coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) {
-		for i, part := range partitions {
-			memberID := memberIDs[i%len(memberIDs)]
-			plan.Add(memberID, topic, part)
-		}
-	},
-}
+//
+// On reassignment with an additional consumer, you might get an assignment plan like:
+//   M1: {T: [0, 2]}
+//   M2: {T: [1, 3]}
+//   M3: {T: [4, 5]}
+//
+var BalanceStrategySticky = &stickyBalanceStrategy{}
 
 // --------------------------------------------------------------------
 
@@ -104,6 +124,11 @@
 	return plan, nil
 }
 
+// AssignmentData simple strategies do not require any shared assignment data
+func (s *balanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) {
+	return nil, nil
+}
+
 type balanceStrategySortable struct {
 	topic     string
 	memberIDs []string
@@ -113,6 +138,7 @@
 func (p balanceStrategySortable) Swap(i, j int) {
 	p.memberIDs[i], p.memberIDs[j] = p.memberIDs[j], p.memberIDs[i]
 }
+
 func (p balanceStrategySortable) Less(i, j int) bool {
 	return balanceStrategyHashValue(p.topic, p.memberIDs[i]) < balanceStrategyHashValue(p.topic, p.memberIDs[j])
 }
@@ -127,3 +153,984 @@
 	}
 	return h
 }
+
+type stickyBalanceStrategy struct {
+	movements partitionMovements
+}
+
+// Name implements BalanceStrategy.
+func (s *stickyBalanceStrategy) Name() string { return StickyBalanceStrategyName }
+
+// Plan implements BalanceStrategy.
+func (s *stickyBalanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
+	// track partition movements during generation of the partition assignment plan
+	s.movements = partitionMovements{
+		Movements:                 make(map[topicPartitionAssignment]consumerPair),
+		PartitionMovementsByTopic: make(map[string]map[consumerPair]map[topicPartitionAssignment]bool),
+	}
+
+	// prepopulate the current assignment state from userdata on the consumer group members
+	currentAssignment, prevAssignment, err := prepopulateCurrentAssignments(members)
+	if err != nil {
+		return nil, err
+	}
+
+	// determine if we're dealing with a completely fresh assignment, or if there's existing assignment state
+	isFreshAssignment := false
+	if len(currentAssignment) == 0 {
+		isFreshAssignment = true
+	}
+
+	// create a mapping of all current topic partitions and the consumers that can be assigned to them
+	partition2AllPotentialConsumers := make(map[topicPartitionAssignment][]string)
+	for topic, partitions := range topics {
+		for _, partition := range partitions {
+			partition2AllPotentialConsumers[topicPartitionAssignment{Topic: topic, Partition: partition}] = []string{}
+		}
+	}
+
+	// create a mapping of all consumers to all potential topic partitions that can be assigned to them
+	// also, populate the mapping of partitions to potential consumers
+	consumer2AllPotentialPartitions := make(map[string][]topicPartitionAssignment, len(members))
+	for memberID, meta := range members {
+		consumer2AllPotentialPartitions[memberID] = make([]topicPartitionAssignment, 0)
+		for _, topicSubscription := range meta.Topics {
+			// only evaluate topic subscriptions that are present in the supplied topics map
+			if _, found := topics[topicSubscription]; found {
+				for _, partition := range topics[topicSubscription] {
+					topicPartition := topicPartitionAssignment{Topic: topicSubscription, Partition: partition}
+					consumer2AllPotentialPartitions[memberID] = append(consumer2AllPotentialPartitions[memberID], topicPartition)
+					partition2AllPotentialConsumers[topicPartition] = append(partition2AllPotentialConsumers[topicPartition], memberID)
+				}
+			}
+		}
+
+		// add this consumer to currentAssignment (with an empty topic partition assignment) if it does not already exist
+		if _, exists := currentAssignment[memberID]; !exists {
+			currentAssignment[memberID] = make([]topicPartitionAssignment, 0)
+		}
+	}
+
+	// create a mapping of each partition to its current consumer, where possible
+	currentPartitionConsumers := make(map[topicPartitionAssignment]string, len(currentAssignment))
+	unvisitedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers))
+	for partition := range partition2AllPotentialConsumers {
+		unvisitedPartitions[partition] = true
+	}
+	var unassignedPartitions []topicPartitionAssignment
+	for memberID, partitions := range currentAssignment {
+		var keepPartitions []topicPartitionAssignment
+		for _, partition := range partitions {
+			// If this partition no longer exists at all, likely due to the
+			// topic being deleted, we remove the partition from the member.
+			if _, exists := partition2AllPotentialConsumers[partition]; !exists {
+				continue
+			}
+			delete(unvisitedPartitions, partition)
+			currentPartitionConsumers[partition] = memberID
+
+			if !strsContains(members[memberID].Topics, partition.Topic) {
+				unassignedPartitions = append(unassignedPartitions, partition)
+				continue
+			}
+			keepPartitions = append(keepPartitions, partition)
+		}
+		currentAssignment[memberID] = keepPartitions
+	}
+	for unvisited := range unvisitedPartitions {
+		unassignedPartitions = append(unassignedPartitions, unvisited)
+	}
+
+	// sort the topic partitions in order of priority for reassignment
+	sortedPartitions := sortPartitions(currentAssignment, prevAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions)
+
+	// at this point we have preserved all valid topic partition to consumer assignments and removed
+	// all invalid topic partitions and invalid consumers. Now we need to assign unassignedPartitions
+	// to consumers so that the topic partition assignments are as balanced as possible.
+
+	// an ascending sorted set of consumers based on how many topic partitions are already assigned to them
+	sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment)
+	s.balance(currentAssignment, prevAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumers)
+
+	// Assemble plan
+	plan := make(BalanceStrategyPlan, len(currentAssignment))
+	for memberID, assignments := range currentAssignment {
+		if len(assignments) == 0 {
+			plan[memberID] = make(map[string][]int32)
+		} else {
+			for _, assignment := range assignments {
+				plan.Add(memberID, assignment.Topic, assignment.Partition)
+			}
+		}
+	}
+	return plan, nil
+}
+
+// AssignmentData serializes the set of topics currently assigned to the
+// specified member as part of the supplied balance plan
+func (s *stickyBalanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) {
+	return encode(&StickyAssignorUserDataV1{
+		Topics:     topics,
+		Generation: generationID,
+	}, nil)
+}
+
+func strsContains(s []string, value string) bool {
+	for _, entry := range s {
+		if entry == value {
+			return true
+		}
+	}
+	return false
+}
+
+// Balance assignments across consumers for maximum fairness and stickiness.
+func (s *stickyBalanceStrategy) balance(currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedPartitions []topicPartitionAssignment, unassignedPartitions []topicPartitionAssignment, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) {
+	initializing := false
+	if len(sortedCurrentSubscriptions) == 0 || len(currentAssignment[sortedCurrentSubscriptions[0]]) == 0 {
+		initializing = true
+	}
+
+	// assign all unassigned partitions
+	for _, partition := range unassignedPartitions {
+		// skip if there is no potential consumer for the partition
+		if len(partition2AllPotentialConsumers[partition]) == 0 {
+			continue
+		}
+		sortedCurrentSubscriptions = assignPartition(partition, sortedCurrentSubscriptions, currentAssignment, consumer2AllPotentialPartitions, currentPartitionConsumer)
+	}
+
+	// narrow down the reassignment scope to only those partitions that can actually be reassigned
+	for partition := range partition2AllPotentialConsumers {
+		if !canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) {
+			sortedPartitions = removeTopicPartitionFromMemberAssignments(sortedPartitions, partition)
+		}
+	}
+
+	// narrow down the reassignment scope to only those consumers that are subject to reassignment
+	fixedAssignments := make(map[string][]topicPartitionAssignment)
+	for memberID := range consumer2AllPotentialPartitions {
+		if !canConsumerParticipateInReassignment(memberID, currentAssignment, consumer2AllPotentialPartitions, partition2AllPotentialConsumers) {
+			fixedAssignments[memberID] = currentAssignment[memberID]
+			delete(currentAssignment, memberID)
+			sortedCurrentSubscriptions = sortMemberIDsByPartitionAssignments(currentAssignment)
+		}
+	}
+
+	// create a deep copy of the current assignment so we can revert to it if we do not get a more balanced assignment later
+	preBalanceAssignment := deepCopyAssignment(currentAssignment)
+	preBalancePartitionConsumers := make(map[topicPartitionAssignment]string, len(currentPartitionConsumer))
+	for k, v := range currentPartitionConsumer {
+		preBalancePartitionConsumers[k] = v
+	}
+
+	reassignmentPerformed := s.performReassignments(sortedPartitions, currentAssignment, prevAssignment, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer)
+
+	// if we are not preserving existing assignments and we have made changes to the current assignment
+	// make sure we are getting a more balanced assignment; otherwise, revert to previous assignment
+	if !initializing && reassignmentPerformed && getBalanceScore(currentAssignment) >= getBalanceScore(preBalanceAssignment) {
+		currentAssignment = deepCopyAssignment(preBalanceAssignment)
+		currentPartitionConsumer = make(map[topicPartitionAssignment]string, len(preBalancePartitionConsumers))
+		for k, v := range preBalancePartitionConsumers {
+			currentPartitionConsumer[k] = v
+		}
+	}
+
+	// add the fixed assignments (those that could not change) back
+	for consumer, assignments := range fixedAssignments {
+		currentAssignment[consumer] = assignments
+	}
+}
+
+// BalanceStrategyRoundRobin assigns partitions to members in alternating order.
+// For example, there are two topics (t0, t1) and two consumer (m0, m1), and each topic has three partitions (p0, p1, p2):
+// M0: [t0p0, t0p2, t1p1]
+// M1: [t0p1, t1p0, t1p2]
+var BalanceStrategyRoundRobin = new(roundRobinBalancer)
+
+type roundRobinBalancer struct{}
+
+func (b *roundRobinBalancer) Name() string {
+	return RoundRobinBalanceStrategyName
+}
+
+func (b *roundRobinBalancer) Plan(memberAndMetadata map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
+	if len(memberAndMetadata) == 0 || len(topics) == 0 {
+		return nil, errors.New("members and topics are not provided")
+	}
+	// sort partitions
+	var topicPartitions []topicAndPartition
+	for topic, partitions := range topics {
+		for _, partition := range partitions {
+			topicPartitions = append(topicPartitions, topicAndPartition{topic: topic, partition: partition})
+		}
+	}
+	sort.SliceStable(topicPartitions, func(i, j int) bool {
+		pi := topicPartitions[i]
+		pj := topicPartitions[j]
+		return pi.comparedValue() < pj.comparedValue()
+	})
+
+	// sort members
+	var members []memberAndTopic
+	for memberID, meta := range memberAndMetadata {
+		m := memberAndTopic{
+			memberID: memberID,
+			topics:   make(map[string]struct{}),
+		}
+		for _, t := range meta.Topics {
+			m.topics[t] = struct{}{}
+		}
+		members = append(members, m)
+	}
+	sort.SliceStable(members, func(i, j int) bool {
+		mi := members[i]
+		mj := members[j]
+		return mi.memberID < mj.memberID
+	})
+
+	// assign partitions
+	plan := make(BalanceStrategyPlan, len(members))
+	i := 0
+	n := len(members)
+	for _, tp := range topicPartitions {
+		m := members[i%n]
+		for !m.hasTopic(tp.topic) {
+			i++
+			m = members[i%n]
+		}
+		plan.Add(m.memberID, tp.topic, tp.partition)
+		i++
+	}
+	return plan, nil
+}
+
+func (b *roundRobinBalancer) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) {
+	return nil, nil // do nothing for now
+}
+
+type topicAndPartition struct {
+	topic     string
+	partition int32
+}
+
+func (tp *topicAndPartition) comparedValue() string {
+	return fmt.Sprintf("%s-%d", tp.topic, tp.partition)
+}
+
+type memberAndTopic struct {
+	memberID string
+	topics   map[string]struct{}
+}
+
+func (m *memberAndTopic) hasTopic(topic string) bool {
+	_, isExist := m.topics[topic]
+	return isExist
+}
+
+// Calculate the balance score of the given assignment, as the sum of assigned partitions size difference of all consumer pairs.
+// A perfectly balanced assignment (with all consumers getting the same number of partitions) has a balance score of 0.
+// Lower balance score indicates a more balanced assignment.
+func getBalanceScore(assignment map[string][]topicPartitionAssignment) int {
+	consumer2AssignmentSize := make(map[string]int, len(assignment))
+	for memberID, partitions := range assignment {
+		consumer2AssignmentSize[memberID] = len(partitions)
+	}
+
+	var score float64
+	for memberID, consumerAssignmentSize := range consumer2AssignmentSize {
+		delete(consumer2AssignmentSize, memberID)
+		for _, otherConsumerAssignmentSize := range consumer2AssignmentSize {
+			score += math.Abs(float64(consumerAssignmentSize - otherConsumerAssignmentSize))
+		}
+	}
+	return int(score)
+}
+
+// Determine whether the current assignment plan is balanced.
+func isBalanced(currentAssignment map[string][]topicPartitionAssignment, allSubscriptions map[string][]topicPartitionAssignment) bool {
+	sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment)
+	min := len(currentAssignment[sortedCurrentSubscriptions[0]])
+	max := len(currentAssignment[sortedCurrentSubscriptions[len(sortedCurrentSubscriptions)-1]])
+	if min >= max-1 {
+		// if minimum and maximum numbers of partitions assigned to consumers differ by at most one return true
+		return true
+	}
+
+	// create a mapping from partitions to the consumer assigned to them
+	allPartitions := make(map[topicPartitionAssignment]string)
+	for memberID, partitions := range currentAssignment {
+		for _, partition := range partitions {
+			if _, exists := allPartitions[partition]; exists {
+				Logger.Printf("Topic %s Partition %d is assigned more than one consumer", partition.Topic, partition.Partition)
+			}
+			allPartitions[partition] = memberID
+		}
+	}
+
+	// for each consumer that does not have all the topic partitions it can get make sure none of the topic partitions it
+	// could but did not get cannot be moved to it (because that would break the balance)
+	for _, memberID := range sortedCurrentSubscriptions {
+		consumerPartitions := currentAssignment[memberID]
+		consumerPartitionCount := len(consumerPartitions)
+
+		// skip if this consumer already has all the topic partitions it can get
+		if consumerPartitionCount == len(allSubscriptions[memberID]) {
+			continue
+		}
+
+		// otherwise make sure it cannot get any more
+		potentialTopicPartitions := allSubscriptions[memberID]
+		for _, partition := range potentialTopicPartitions {
+			if !memberAssignmentsIncludeTopicPartition(currentAssignment[memberID], partition) {
+				otherConsumer := allPartitions[partition]
+				otherConsumerPartitionCount := len(currentAssignment[otherConsumer])
+				if consumerPartitionCount < otherConsumerPartitionCount {
+					return false
+				}
+			}
+		}
+	}
+	return true
+}
+
+// Reassign all topic partitions that need reassignment until balanced.
+func (s *stickyBalanceStrategy) performReassignments(reassignablePartitions []topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) bool {
+	reassignmentPerformed := false
+	modified := false
+
+	// repeat reassignment until no partition can be moved to improve the balance
+	for {
+		modified = false
+		// reassign all reassignable partitions (starting from the partition with least potential consumers and if needed)
+		// until the full list is processed or a balance is achieved
+		for _, partition := range reassignablePartitions {
+			if isBalanced(currentAssignment, consumer2AllPotentialPartitions) {
+				break
+			}
+
+			// the partition must have at least two consumers
+			if len(partition2AllPotentialConsumers[partition]) <= 1 {
+				Logger.Printf("Expected more than one potential consumer for partition %s topic %d", partition.Topic, partition.Partition)
+			}
+
+			// the partition must have a consumer
+			consumer := currentPartitionConsumer[partition]
+			if consumer == "" {
+				Logger.Printf("Expected topic %s partition %d to be assigned to a consumer", partition.Topic, partition.Partition)
+			}
+
+			if _, exists := prevAssignment[partition]; exists {
+				if len(currentAssignment[consumer]) > (len(currentAssignment[prevAssignment[partition].MemberID]) + 1) {
+					sortedCurrentSubscriptions = s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, prevAssignment[partition].MemberID)
+					reassignmentPerformed = true
+					modified = true
+					continue
+				}
+			}
+
+			// check if a better-suited consumer exists for the partition; if so, reassign it
+			for _, otherConsumer := range partition2AllPotentialConsumers[partition] {
+				if len(currentAssignment[consumer]) > (len(currentAssignment[otherConsumer]) + 1) {
+					sortedCurrentSubscriptions = s.reassignPartitionToNewConsumer(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, consumer2AllPotentialPartitions)
+					reassignmentPerformed = true
+					modified = true
+					break
+				}
+			}
+		}
+		if !modified {
+			return reassignmentPerformed
+		}
+	}
+}
+
+// Identify a new consumer for a topic partition and reassign it.
+func (s *stickyBalanceStrategy) reassignPartitionToNewConsumer(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []string {
+	for _, anotherConsumer := range sortedCurrentSubscriptions {
+		if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[anotherConsumer], partition) {
+			return s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, anotherConsumer)
+		}
+	}
+	return sortedCurrentSubscriptions
+}
+
+// Reassign a specific partition to a new consumer
+func (s *stickyBalanceStrategy) reassignPartition(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, newConsumer string) []string {
+	consumer := currentPartitionConsumer[partition]
+	// find the correct partition movement considering the stickiness requirement
+	partitionToBeMoved := s.movements.getTheActualPartitionToBeMoved(partition, consumer, newConsumer)
+	return s.processPartitionMovement(partitionToBeMoved, newConsumer, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer)
+}
+
+// Track the movement of a topic partition after assignment
+func (s *stickyBalanceStrategy) processPartitionMovement(partition topicPartitionAssignment, newConsumer string, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string) []string {
+	oldConsumer := currentPartitionConsumer[partition]
+	s.movements.movePartition(partition, oldConsumer, newConsumer)
+
+	currentAssignment[oldConsumer] = removeTopicPartitionFromMemberAssignments(currentAssignment[oldConsumer], partition)
+	currentAssignment[newConsumer] = append(currentAssignment[newConsumer], partition)
+	currentPartitionConsumer[partition] = newConsumer
+	return sortMemberIDsByPartitionAssignments(currentAssignment)
+}
+
+// Determine whether a specific consumer should be considered for topic partition assignment.
+func canConsumerParticipateInReassignment(memberID string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool {
+	currentPartitions := currentAssignment[memberID]
+	currentAssignmentSize := len(currentPartitions)
+	maxAssignmentSize := len(consumer2AllPotentialPartitions[memberID])
+	if currentAssignmentSize > maxAssignmentSize {
+		Logger.Printf("The consumer %s is assigned more partitions than the maximum possible", memberID)
+	}
+	if currentAssignmentSize < maxAssignmentSize {
+		// if a consumer is not assigned all its potential partitions it is subject to reassignment
+		return true
+	}
+	for _, partition := range currentPartitions {
+		if canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) {
+			return true
+		}
+	}
+	return false
+}
+
+// Only consider reassigning those topic partitions that have two or more potential consumers.
+func canTopicPartitionParticipateInReassignment(partition topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool {
+	return len(partition2AllPotentialConsumers[partition]) >= 2
+}
+
+// The assignment should improve the overall balance of the partition assignments to consumers.
+func assignPartition(partition topicPartitionAssignment, sortedCurrentSubscriptions []string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, currentPartitionConsumer map[topicPartitionAssignment]string) []string {
+	for _, memberID := range sortedCurrentSubscriptions {
+		if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[memberID], partition) {
+			currentAssignment[memberID] = append(currentAssignment[memberID], partition)
+			currentPartitionConsumer[partition] = memberID
+			break
+		}
+	}
+	return sortMemberIDsByPartitionAssignments(currentAssignment)
+}
+
+// Deserialize topic partition assignment data to aid with creation of a sticky assignment.
+func deserializeTopicPartitionAssignment(userDataBytes []byte) (StickyAssignorUserData, error) {
+	userDataV1 := &StickyAssignorUserDataV1{}
+	if err := decode(userDataBytes, userDataV1); err != nil {
+		userDataV0 := &StickyAssignorUserDataV0{}
+		if err := decode(userDataBytes, userDataV0); err != nil {
+			return nil, err
+		}
+		return userDataV0, nil
+	}
+	return userDataV1, nil
+}
+
+// filterAssignedPartitions returns a map of consumer group members to their list of previously-assigned topic partitions, limited
+// to those topic partitions currently reported by the Kafka cluster.
+func filterAssignedPartitions(currentAssignment map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) map[string][]topicPartitionAssignment {
+	assignments := deepCopyAssignment(currentAssignment)
+	for memberID, partitions := range assignments {
+		// perform in-place filtering
+		i := 0
+		for _, partition := range partitions {
+			if _, exists := partition2AllPotentialConsumers[partition]; exists {
+				partitions[i] = partition
+				i++
+			}
+		}
+		assignments[memberID] = partitions[:i]
+	}
+	return assignments
+}
+
+func removeTopicPartitionFromMemberAssignments(assignments []topicPartitionAssignment, topic topicPartitionAssignment) []topicPartitionAssignment {
+	for i, assignment := range assignments {
+		if assignment == topic {
+			return append(assignments[:i], assignments[i+1:]...)
+		}
+	}
+	return assignments
+}
+
+func memberAssignmentsIncludeTopicPartition(assignments []topicPartitionAssignment, topic topicPartitionAssignment) bool {
+	for _, assignment := range assignments {
+		if assignment == topic {
+			return true
+		}
+	}
+	return false
+}
+
+func sortPartitions(currentAssignment map[string][]topicPartitionAssignment, partitionsWithADifferentPreviousAssignment map[topicPartitionAssignment]consumerGenerationPair, isFreshAssignment bool, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []topicPartitionAssignment {
+	unassignedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers))
+	for partition := range partition2AllPotentialConsumers {
+		unassignedPartitions[partition] = true
+	}
+
+	sortedPartitions := make([]topicPartitionAssignment, 0)
+	if !isFreshAssignment && areSubscriptionsIdentical(partition2AllPotentialConsumers, consumer2AllPotentialPartitions) {
+		// if this is a reassignment and the subscriptions are identical (all consumers can consumer from all topics)
+		// then we just need to simply list partitions in a round robin fashion (from consumers with
+		// most assigned partitions to those with least)
+		assignments := filterAssignedPartitions(currentAssignment, partition2AllPotentialConsumers)
+
+		// use priority-queue to evaluate consumer group members in descending-order based on
+		// the number of topic partition assignments (i.e. consumers with most assignments first)
+		pq := make(assignmentPriorityQueue, len(assignments))
+		i := 0
+		for consumerID, consumerAssignments := range assignments {
+			pq[i] = &consumerGroupMember{
+				id:          consumerID,
+				assignments: consumerAssignments,
+			}
+			i++
+		}
+		heap.Init(&pq)
+
+		for {
+			// loop until no consumer-group members remain
+			if pq.Len() == 0 {
+				break
+			}
+			member := pq[0]
+
+			// partitions that were assigned to a different consumer last time
+			var prevPartitionIndex int
+			for i, partition := range member.assignments {
+				if _, exists := partitionsWithADifferentPreviousAssignment[partition]; exists {
+					prevPartitionIndex = i
+					break
+				}
+			}
+
+			if len(member.assignments) > 0 {
+				partition := member.assignments[prevPartitionIndex]
+				sortedPartitions = append(sortedPartitions, partition)
+				delete(unassignedPartitions, partition)
+				if prevPartitionIndex == 0 {
+					member.assignments = member.assignments[1:]
+				} else {
+					member.assignments = append(member.assignments[:prevPartitionIndex], member.assignments[prevPartitionIndex+1:]...)
+				}
+				heap.Fix(&pq, 0)
+			} else {
+				heap.Pop(&pq)
+			}
+		}
+
+		for partition := range unassignedPartitions {
+			sortedPartitions = append(sortedPartitions, partition)
+		}
+	} else {
+		// an ascending sorted set of topic partitions based on how many consumers can potentially use them
+		sortedPartitions = sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers)
+	}
+	return sortedPartitions
+}
+
+func sortMemberIDsByPartitionAssignments(assignments map[string][]topicPartitionAssignment) []string {
+	// sort the members by the number of partition assignments in ascending order
+	sortedMemberIDs := make([]string, 0, len(assignments))
+	for memberID := range assignments {
+		sortedMemberIDs = append(sortedMemberIDs, memberID)
+	}
+	sort.SliceStable(sortedMemberIDs, func(i, j int) bool {
+		ret := len(assignments[sortedMemberIDs[i]]) - len(assignments[sortedMemberIDs[j]])
+		if ret == 0 {
+			return sortedMemberIDs[i] < sortedMemberIDs[j]
+		}
+		return len(assignments[sortedMemberIDs[i]]) < len(assignments[sortedMemberIDs[j]])
+	})
+	return sortedMemberIDs
+}
+
+func sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers map[topicPartitionAssignment][]string) []topicPartitionAssignment {
+	// sort the members by the number of partition assignments in descending order
+	sortedPartionIDs := make([]topicPartitionAssignment, len(partition2AllPotentialConsumers))
+	i := 0
+	for partition := range partition2AllPotentialConsumers {
+		sortedPartionIDs[i] = partition
+		i++
+	}
+	sort.Slice(sortedPartionIDs, func(i, j int) bool {
+		if len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) == len(partition2AllPotentialConsumers[sortedPartionIDs[j]]) {
+			ret := strings.Compare(sortedPartionIDs[i].Topic, sortedPartionIDs[j].Topic)
+			if ret == 0 {
+				return sortedPartionIDs[i].Partition < sortedPartionIDs[j].Partition
+			}
+			return ret < 0
+		}
+		return len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) < len(partition2AllPotentialConsumers[sortedPartionIDs[j]])
+	})
+	return sortedPartionIDs
+}
+
+func deepCopyAssignment(assignment map[string][]topicPartitionAssignment) map[string][]topicPartitionAssignment {
+	m := make(map[string][]topicPartitionAssignment, len(assignment))
+	for memberID, subscriptions := range assignment {
+		m[memberID] = append(subscriptions[:0:0], subscriptions...)
+	}
+	return m
+}
+
+func areSubscriptionsIdentical(partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) bool {
+	curMembers := make(map[string]int)
+	for _, cur := range partition2AllPotentialConsumers {
+		if len(curMembers) == 0 {
+			for _, curMembersElem := range cur {
+				curMembers[curMembersElem]++
+			}
+			continue
+		}
+
+		if len(curMembers) != len(cur) {
+			return false
+		}
+
+		yMap := make(map[string]int)
+		for _, yElem := range cur {
+			yMap[yElem]++
+		}
+
+		for curMembersMapKey, curMembersMapVal := range curMembers {
+			if yMap[curMembersMapKey] != curMembersMapVal {
+				return false
+			}
+		}
+	}
+
+	curPartitions := make(map[topicPartitionAssignment]int)
+	for _, cur := range consumer2AllPotentialPartitions {
+		if len(curPartitions) == 0 {
+			for _, curPartitionElem := range cur {
+				curPartitions[curPartitionElem]++
+			}
+			continue
+		}
+
+		if len(curPartitions) != len(cur) {
+			return false
+		}
+
+		yMap := make(map[topicPartitionAssignment]int)
+		for _, yElem := range cur {
+			yMap[yElem]++
+		}
+
+		for curMembersMapKey, curMembersMapVal := range curPartitions {
+			if yMap[curMembersMapKey] != curMembersMapVal {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// We need to process subscriptions' user data with each consumer's reported generation in mind
+// higher generations overwrite lower generations in case of a conflict
+// note that a conflict could exist only if user data is for different generations
+func prepopulateCurrentAssignments(members map[string]ConsumerGroupMemberMetadata) (map[string][]topicPartitionAssignment, map[topicPartitionAssignment]consumerGenerationPair, error) {
+	currentAssignment := make(map[string][]topicPartitionAssignment)
+	prevAssignment := make(map[topicPartitionAssignment]consumerGenerationPair)
+
+	// for each partition we create a sorted map of its consumers by generation
+	sortedPartitionConsumersByGeneration := make(map[topicPartitionAssignment]map[int]string)
+	for memberID, meta := range members {
+		consumerUserData, err := deserializeTopicPartitionAssignment(meta.UserData)
+		if err != nil {
+			return nil, nil, err
+		}
+		for _, partition := range consumerUserData.partitions() {
+			if consumers, exists := sortedPartitionConsumersByGeneration[partition]; exists {
+				if consumerUserData.hasGeneration() {
+					if _, generationExists := consumers[consumerUserData.generation()]; generationExists {
+						// same partition is assigned to two consumers during the same rebalance.
+						// log a warning and skip this record
+						Logger.Printf("Topic %s Partition %d is assigned to multiple consumers following sticky assignment generation %d", partition.Topic, partition.Partition, consumerUserData.generation())
+						continue
+					} else {
+						consumers[consumerUserData.generation()] = memberID
+					}
+				} else {
+					consumers[defaultGeneration] = memberID
+				}
+			} else {
+				generation := defaultGeneration
+				if consumerUserData.hasGeneration() {
+					generation = consumerUserData.generation()
+				}
+				sortedPartitionConsumersByGeneration[partition] = map[int]string{generation: memberID}
+			}
+		}
+	}
+
+	// prevAssignment holds the prior ConsumerGenerationPair (before current) of each partition
+	// current and previous consumers are the last two consumers of each partition in the above sorted map
+	for partition, consumers := range sortedPartitionConsumersByGeneration {
+		// sort consumers by generation in decreasing order
+		var generations []int
+		for generation := range consumers {
+			generations = append(generations, generation)
+		}
+		sort.Sort(sort.Reverse(sort.IntSlice(generations)))
+
+		consumer := consumers[generations[0]]
+		if _, exists := currentAssignment[consumer]; !exists {
+			currentAssignment[consumer] = []topicPartitionAssignment{partition}
+		} else {
+			currentAssignment[consumer] = append(currentAssignment[consumer], partition)
+		}
+
+		// check for previous assignment, if any
+		if len(generations) > 1 {
+			prevAssignment[partition] = consumerGenerationPair{
+				MemberID:   consumers[generations[1]],
+				Generation: generations[1],
+			}
+		}
+	}
+	return currentAssignment, prevAssignment, nil
+}
+
+type consumerGenerationPair struct {
+	MemberID   string
+	Generation int
+}
+
+// consumerPair represents a pair of Kafka consumer ids involved in a partition reassignment.
+type consumerPair struct {
+	SrcMemberID string
+	DstMemberID string
+}
+
+// partitionMovements maintains some data structures to simplify lookup of partition movements among consumers.
+type partitionMovements struct {
+	PartitionMovementsByTopic map[string]map[consumerPair]map[topicPartitionAssignment]bool
+	Movements                 map[topicPartitionAssignment]consumerPair
+}
+
+func (p *partitionMovements) removeMovementRecordOfPartition(partition topicPartitionAssignment) consumerPair {
+	pair := p.Movements[partition]
+	delete(p.Movements, partition)
+
+	partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
+	delete(partitionMovementsForThisTopic[pair], partition)
+	if len(partitionMovementsForThisTopic[pair]) == 0 {
+		delete(partitionMovementsForThisTopic, pair)
+	}
+	if len(p.PartitionMovementsByTopic[partition.Topic]) == 0 {
+		delete(p.PartitionMovementsByTopic, partition.Topic)
+	}
+	return pair
+}
+
+func (p *partitionMovements) addPartitionMovementRecord(partition topicPartitionAssignment, pair consumerPair) {
+	p.Movements[partition] = pair
+	if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists {
+		p.PartitionMovementsByTopic[partition.Topic] = make(map[consumerPair]map[topicPartitionAssignment]bool)
+	}
+	partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
+	if _, exists := partitionMovementsForThisTopic[pair]; !exists {
+		partitionMovementsForThisTopic[pair] = make(map[topicPartitionAssignment]bool)
+	}
+	partitionMovementsForThisTopic[pair][partition] = true
+}
+
+func (p *partitionMovements) movePartition(partition topicPartitionAssignment, oldConsumer, newConsumer string) {
+	pair := consumerPair{
+		SrcMemberID: oldConsumer,
+		DstMemberID: newConsumer,
+	}
+	if _, exists := p.Movements[partition]; exists {
+		// this partition has previously moved
+		existingPair := p.removeMovementRecordOfPartition(partition)
+		if existingPair.DstMemberID != oldConsumer {
+			Logger.Printf("Existing pair DstMemberID %s was not equal to the oldConsumer ID %s", existingPair.DstMemberID, oldConsumer)
+		}
+		if existingPair.SrcMemberID != newConsumer {
+			// the partition is not moving back to its previous consumer
+			p.addPartitionMovementRecord(partition, consumerPair{
+				SrcMemberID: existingPair.SrcMemberID,
+				DstMemberID: newConsumer,
+			})
+		}
+	} else {
+		p.addPartitionMovementRecord(partition, pair)
+	}
+}
+
+func (p *partitionMovements) getTheActualPartitionToBeMoved(partition topicPartitionAssignment, oldConsumer, newConsumer string) topicPartitionAssignment {
+	if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists {
+		return partition
+	}
+	if _, exists := p.Movements[partition]; exists {
+		// this partition has previously moved
+		if oldConsumer != p.Movements[partition].DstMemberID {
+			Logger.Printf("Partition movement DstMemberID %s was not equal to the oldConsumer ID %s", p.Movements[partition].DstMemberID, oldConsumer)
+		}
+		oldConsumer = p.Movements[partition].SrcMemberID
+	}
+
+	partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
+	reversePair := consumerPair{
+		SrcMemberID: newConsumer,
+		DstMemberID: oldConsumer,
+	}
+	if _, exists := partitionMovementsForThisTopic[reversePair]; !exists {
+		return partition
+	}
+	var reversePairPartition topicPartitionAssignment
+	for otherPartition := range partitionMovementsForThisTopic[reversePair] {
+		reversePairPartition = otherPartition
+	}
+	return reversePairPartition
+}
+
+func (p *partitionMovements) isLinked(src, dst string, pairs []consumerPair, currentPath []string) ([]string, bool) {
+	if src == dst {
+		return currentPath, false
+	}
+	if len(pairs) == 0 {
+		return currentPath, false
+	}
+	for _, pair := range pairs {
+		if src == pair.SrcMemberID && dst == pair.DstMemberID {
+			currentPath = append(currentPath, src, dst)
+			return currentPath, true
+		}
+	}
+
+	for _, pair := range pairs {
+		if pair.SrcMemberID == src {
+			// create a deep copy of the pairs, excluding the current pair
+			reducedSet := make([]consumerPair, len(pairs)-1)
+			i := 0
+			for _, p := range pairs {
+				if p != pair {
+					reducedSet[i] = pair
+					i++
+				}
+			}
+
+			currentPath = append(currentPath, pair.SrcMemberID)
+			return p.isLinked(pair.DstMemberID, dst, reducedSet, currentPath)
+		}
+	}
+	return currentPath, false
+}
+
+func (p *partitionMovements) in(cycle []string, cycles [][]string) bool {
+	superCycle := make([]string, len(cycle)-1)
+	for i := 0; i < len(cycle)-1; i++ {
+		superCycle[i] = cycle[i]
+	}
+	superCycle = append(superCycle, cycle...)
+	for _, foundCycle := range cycles {
+		if len(foundCycle) == len(cycle) && indexOfSubList(superCycle, foundCycle) != -1 {
+			return true
+		}
+	}
+	return false
+}
+
+func (p *partitionMovements) hasCycles(pairs []consumerPair) bool {
+	cycles := make([][]string, 0)
+	for _, pair := range pairs {
+		// create a deep copy of the pairs, excluding the current pair
+		reducedPairs := make([]consumerPair, len(pairs)-1)
+		i := 0
+		for _, p := range pairs {
+			if p != pair {
+				reducedPairs[i] = pair
+				i++
+			}
+		}
+		if path, linked := p.isLinked(pair.DstMemberID, pair.SrcMemberID, reducedPairs, []string{pair.SrcMemberID}); linked {
+			if !p.in(path, cycles) {
+				cycles = append(cycles, path)
+				Logger.Printf("A cycle of length %d was found: %v", len(path)-1, path)
+			}
+		}
+	}
+
+	// for now we want to make sure there is no partition movements of the same topic between a pair of consumers.
+	// the odds of finding a cycle among more than two consumers seem to be very low (according to various randomized
+	// tests with the given sticky algorithm) that it should not worth the added complexity of handling those cases.
+	for _, cycle := range cycles {
+		if len(cycle) == 3 {
+			return true
+		}
+	}
+	return false
+}
+
+func (p *partitionMovements) isSticky() bool {
+	for topic, movements := range p.PartitionMovementsByTopic {
+		movementPairs := make([]consumerPair, len(movements))
+		i := 0
+		for pair := range movements {
+			movementPairs[i] = pair
+			i++
+		}
+		if p.hasCycles(movementPairs) {
+			Logger.Printf("Stickiness is violated for topic %s", topic)
+			Logger.Printf("Partition movements for this topic occurred among the following consumer pairs: %v", movements)
+			return false
+		}
+	}
+	return true
+}
+
+func indexOfSubList(source []string, target []string) int {
+	targetSize := len(target)
+	maxCandidate := len(source) - targetSize
+nextCand:
+	for candidate := 0; candidate <= maxCandidate; candidate++ {
+		j := candidate
+		for i := 0; i < targetSize; i++ {
+			if target[i] != source[j] {
+				// Element mismatch, try next cand
+				continue nextCand
+			}
+			j++
+		}
+		// All elements of candidate matched target
+		return candidate
+	}
+	return -1
+}
+
+type consumerGroupMember struct {
+	id          string
+	assignments []topicPartitionAssignment
+}
+
+// assignmentPriorityQueue is a priority-queue of consumer group members that is sorted
+// in descending order (most assignments to least assignments).
+type assignmentPriorityQueue []*consumerGroupMember
+
+func (pq assignmentPriorityQueue) Len() int { return len(pq) }
+
+func (pq assignmentPriorityQueue) Less(i, j int) bool {
+	// order asssignment priority queue in descending order using assignment-count/member-id
+	if len(pq[i].assignments) == len(pq[j].assignments) {
+		return strings.Compare(pq[i].id, pq[j].id) > 0
+	}
+	return len(pq[i].assignments) > len(pq[j].assignments)
+}
+
+func (pq assignmentPriorityQueue) Swap(i, j int) {
+	pq[i], pq[j] = pq[j], pq[i]
+}
+
+func (pq *assignmentPriorityQueue) Push(x interface{}) {
+	member := x.(*consumerGroupMember)
+	*pq = append(*pq, member)
+}
+
+func (pq *assignmentPriorityQueue) Pop() interface{} {
+	old := *pq
+	n := len(old)
+	member := old[n-1]
+	*pq = old[0 : n-1]
+	return member
+}
diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go
index 9c3e5a0..dd01e4e 100644
--- a/vendor/github.com/Shopify/sarama/broker.go
+++ b/vendor/github.com/Shopify/sarama/broker.go
@@ -13,7 +13,7 @@
 	"sync/atomic"
 	"time"
 
-	metrics "github.com/rcrowley/go-metrics"
+	"github.com/rcrowley/go-metrics"
 )
 
 // Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
@@ -40,6 +40,7 @@
 	outgoingByteRate       metrics.Meter
 	responseRate           metrics.Meter
 	responseSize           metrics.Histogram
+	requestsInFlight       metrics.Counter
 	brokerIncomingByteRate metrics.Meter
 	brokerRequestRate      metrics.Meter
 	brokerRequestSize      metrics.Histogram
@@ -47,6 +48,7 @@
 	brokerOutgoingByteRate metrics.Meter
 	brokerResponseRate     metrics.Meter
 	brokerResponseSize     metrics.Histogram
+	brokerRequestsInFlight metrics.Counter
 
 	kerberosAuthenticator GSSAPIKerberosAuth
 }
@@ -71,7 +73,7 @@
 	// server negotiate SASL by wrapping tokens with Kafka protocol headers.
 	SASLHandshakeV1 = int16(1)
 	// SASLExtKeyAuth is the reserved extension key name sent as part of the
-	// SASL/OAUTHBEARER intial client response
+	// SASL/OAUTHBEARER initial client response
 	SASLExtKeyAuth = "auth"
 )
 
@@ -117,6 +119,7 @@
 type responsePromise struct {
 	requestTime   time.Time
 	correlationID int32
+	headerVersion int16
 	packets       chan []byte
 	errors        chan error
 }
@@ -151,27 +154,19 @@
 	go withRecover(func() {
 		defer b.lock.Unlock()
 
-		dialer := net.Dialer{
-			Timeout:   conf.Net.DialTimeout,
-			KeepAlive: conf.Net.KeepAlive,
-			LocalAddr: conf.Net.LocalAddr,
-		}
-
-		if conf.Net.TLS.Enable {
-			b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config)
-		} else if conf.Net.Proxy.Enable {
-			b.conn, b.connErr = conf.Net.Proxy.Dialer.Dial("tcp", b.addr)
-		} else {
-			b.conn, b.connErr = dialer.Dial("tcp", b.addr)
-		}
+		dialer := conf.getDialer()
+		b.conn, b.connErr = dialer.Dial("tcp", b.addr)
 		if b.connErr != nil {
 			Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr)
 			b.conn = nil
 			atomic.StoreInt32(&b.opened, 0)
 			return
 		}
-		b.conn = newBufConn(b.conn)
+		if conf.Net.TLS.Enable {
+			b.conn = tls.Client(b.conn, validServerNameTLS(b.addr, conf.Net.TLS.Config))
+		}
 
+		b.conn = newBufConn(b.conn)
 		b.conf = conf
 
 		// Create or reuse the global metrics shared between brokers
@@ -182,6 +177,7 @@
 		b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry)
 		b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry)
 		b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry)
+		b.requestsInFlight = metrics.GetOrRegisterCounter("requests-in-flight", conf.MetricRegistry)
 		// Do not gather metrics for seeded broker (only used during bootstrap) because they share
 		// the same id (-1) and are already exposed through the global metrics above
 		if b.id >= 0 {
@@ -189,7 +185,6 @@
 		}
 
 		if conf.Net.SASL.Enable {
-
 			b.connErr = b.authenticateViaSASL()
 
 			if b.connErr != nil {
@@ -228,7 +223,7 @@
 	return b.conn != nil, b.connErr
 }
 
-//Close closes the broker resources
+// Close closes the broker resources
 func (b *Broker) Close() error {
 	b.lock.Lock()
 	defer b.lock.Unlock()
@@ -281,12 +276,11 @@
 	return *b.rack
 }
 
-//GetMetadata send a metadata request and returns a metadata response or error
+// GetMetadata send a metadata request and returns a metadata response or error
 func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) {
 	response := new(MetadataResponse)
 
 	err := b.sendAndReceive(request, response)
-
 	if err != nil {
 		return nil, err
 	}
@@ -294,12 +288,11 @@
 	return response, nil
 }
 
-//GetConsumerMetadata send a consumer metadata request and returns a consumer metadata response or error
+// GetConsumerMetadata send a consumer metadata request and returns a consumer metadata response or error
 func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
 	response := new(ConsumerMetadataResponse)
 
 	err := b.sendAndReceive(request, response)
-
 	if err != nil {
 		return nil, err
 	}
@@ -307,12 +300,11 @@
 	return response, nil
 }
 
-//FindCoordinator sends a find coordinate request and returns a response or error
+// FindCoordinator sends a find coordinate request and returns a response or error
 func (b *Broker) FindCoordinator(request *FindCoordinatorRequest) (*FindCoordinatorResponse, error) {
 	response := new(FindCoordinatorResponse)
 
 	err := b.sendAndReceive(request, response)
-
 	if err != nil {
 		return nil, err
 	}
@@ -320,12 +312,11 @@
 	return response, nil
 }
 
-//GetAvailableOffsets return an offset response or error
+// GetAvailableOffsets return an offset response or error
 func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
 	response := new(OffsetResponse)
 
 	err := b.sendAndReceive(request, response)
-
 	if err != nil {
 		return nil, err
 	}
@@ -333,7 +324,7 @@
 	return response, nil
 }
 
-//Produce returns a produce response or error
+// Produce returns a produce response or error
 func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) {
 	var (
 		response *ProduceResponse
@@ -354,7 +345,7 @@
 	return response, nil
 }
 
-//Fetch returns a FetchResponse or error
+// Fetch returns a FetchResponse or error
 func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
 	response := new(FetchResponse)
 
@@ -366,7 +357,7 @@
 	return response, nil
 }
 
-//CommitOffset return an Offset commit reponse or error
+// CommitOffset return an Offset commit response or error
 func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
 	response := new(OffsetCommitResponse)
 
@@ -378,9 +369,10 @@
 	return response, nil
 }
 
-//FetchOffset returns an offset fetch response or error
+// FetchOffset returns an offset fetch response or error
 func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
 	response := new(OffsetFetchResponse)
+	response.Version = request.Version // needed to handle the two header versions
 
 	err := b.sendAndReceive(request, response)
 	if err != nil {
@@ -390,7 +382,7 @@
 	return response, nil
 }
 
-//JoinGroup returns a join group response or error
+// JoinGroup returns a join group response or error
 func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) {
 	response := new(JoinGroupResponse)
 
@@ -402,7 +394,7 @@
 	return response, nil
 }
 
-//SyncGroup returns a sync group response or error
+// SyncGroup returns a sync group response or error
 func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) {
 	response := new(SyncGroupResponse)
 
@@ -414,7 +406,7 @@
 	return response, nil
 }
 
-//LeaveGroup return a leave group response or error
+// LeaveGroup return a leave group response or error
 func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) {
 	response := new(LeaveGroupResponse)
 
@@ -426,7 +418,7 @@
 	return response, nil
 }
 
-//Heartbeat returns a heartbeat response or error
+// Heartbeat returns a heartbeat response or error
 func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) {
 	response := new(HeartbeatResponse)
 
@@ -438,7 +430,7 @@
 	return response, nil
 }
 
-//ListGroups return a list group response or error
+// ListGroups return a list group response or error
 func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) {
 	response := new(ListGroupsResponse)
 
@@ -450,7 +442,7 @@
 	return response, nil
 }
 
-//DescribeGroups return describe group response or error
+// DescribeGroups return describe group response or error
 func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) {
 	response := new(DescribeGroupsResponse)
 
@@ -462,7 +454,7 @@
 	return response, nil
 }
 
-//ApiVersions return api version response or error
+// ApiVersions return api version response or error
 func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) {
 	response := new(ApiVersionsResponse)
 
@@ -474,7 +466,7 @@
 	return response, nil
 }
 
-//CreateTopics send a create topic request and returns create topic response
+// CreateTopics send a create topic request and returns create topic response
 func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) {
 	response := new(CreateTopicsResponse)
 
@@ -486,7 +478,7 @@
 	return response, nil
 }
 
-//DeleteTopics sends a delete topic request and returns delete topic response
+// DeleteTopics sends a delete topic request and returns delete topic response
 func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsResponse, error) {
 	response := new(DeleteTopicsResponse)
 
@@ -498,8 +490,8 @@
 	return response, nil
 }
 
-//CreatePartitions sends a create partition request and returns create
-//partitions response or error
+// CreatePartitions sends a create partition request and returns create
+// partitions response or error
 func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePartitionsResponse, error) {
 	response := new(CreatePartitionsResponse)
 
@@ -511,8 +503,34 @@
 	return response, nil
 }
 
-//DeleteRecords send a request to delete records and return delete record
-//response or error
+// AlterPartitionReassignments sends a alter partition reassignments request and
+// returns alter partition reassignments response
+func (b *Broker) AlterPartitionReassignments(request *AlterPartitionReassignmentsRequest) (*AlterPartitionReassignmentsResponse, error) {
+	response := new(AlterPartitionReassignmentsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// ListPartitionReassignments sends a list partition reassignments request and
+// returns list partition reassignments response
+func (b *Broker) ListPartitionReassignments(request *ListPartitionReassignmentsRequest) (*ListPartitionReassignmentsResponse, error) {
+	response := new(ListPartitionReassignmentsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// DeleteRecords send a request to delete records and return delete record
+// response or error
 func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsResponse, error) {
 	response := new(DeleteRecordsResponse)
 
@@ -524,7 +542,7 @@
 	return response, nil
 }
 
-//DescribeAcls sends a describe acl request and returns a response or error
+// DescribeAcls sends a describe acl request and returns a response or error
 func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsResponse, error) {
 	response := new(DescribeAclsResponse)
 
@@ -536,7 +554,7 @@
 	return response, nil
 }
 
-//CreateAcls sends a create acl request and returns a response or error
+// CreateAcls sends a create acl request and returns a response or error
 func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, error) {
 	response := new(CreateAclsResponse)
 
@@ -548,7 +566,7 @@
 	return response, nil
 }
 
-//DeleteAcls sends a delete acl request and returns a response or error
+// DeleteAcls sends a delete acl request and returns a response or error
 func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, error) {
 	response := new(DeleteAclsResponse)
 
@@ -560,7 +578,7 @@
 	return response, nil
 }
 
-//InitProducerID sends an init producer request and returns a response or error
+// InitProducerID sends an init producer request and returns a response or error
 func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerIDResponse, error) {
 	response := new(InitProducerIDResponse)
 
@@ -572,8 +590,8 @@
 	return response, nil
 }
 
-//AddPartitionsToTxn send a request to add partition to txn and returns
-//a response or error
+// AddPartitionsToTxn send a request to add partition to txn and returns
+// a response or error
 func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPartitionsToTxnResponse, error) {
 	response := new(AddPartitionsToTxnResponse)
 
@@ -585,8 +603,8 @@
 	return response, nil
 }
 
-//AddOffsetsToTxn sends a request to add offsets to txn and returns a response
-//or error
+// AddOffsetsToTxn sends a request to add offsets to txn and returns a response
+// or error
 func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsToTxnResponse, error) {
 	response := new(AddOffsetsToTxnResponse)
 
@@ -598,7 +616,7 @@
 	return response, nil
 }
 
-//EndTxn sends a request to end txn and returns a response or error
+// EndTxn sends a request to end txn and returns a response or error
 func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) {
 	response := new(EndTxnResponse)
 
@@ -610,8 +628,8 @@
 	return response, nil
 }
 
-//TxnOffsetCommit sends a request to commit transaction offsets and returns
-//a response or error
+// TxnOffsetCommit sends a request to commit transaction offsets and returns
+// a response or error
 func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCommitResponse, error) {
 	response := new(TxnOffsetCommitResponse)
 
@@ -623,8 +641,8 @@
 	return response, nil
 }
 
-//DescribeConfigs sends a request to describe config and returns a response or
-//error
+// DescribeConfigs sends a request to describe config and returns a response or
+// error
 func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConfigsResponse, error) {
 	response := new(DescribeConfigsResponse)
 
@@ -636,7 +654,7 @@
 	return response, nil
 }
 
-//AlterConfigs sends a request to alter config and return a response or error
+// AlterConfigs sends a request to alter config and return a response or error
 func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsResponse, error) {
 	response := new(AlterConfigsResponse)
 
@@ -648,7 +666,19 @@
 	return response, nil
 }
 
-//DeleteGroups sends a request to delete groups and returns a response or error
+// IncrementalAlterConfigs sends a request to incremental alter config and return a response or error
+func (b *Broker) IncrementalAlterConfigs(request *IncrementalAlterConfigsRequest) (*IncrementalAlterConfigsResponse, error) {
+	response := new(IncrementalAlterConfigsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// DeleteGroups sends a request to delete groups and returns a response or error
 func (b *Broker) DeleteGroups(request *DeleteGroupsRequest) (*DeleteGroupsResponse, error) {
 	response := new(DeleteGroupsResponse)
 
@@ -659,7 +689,62 @@
 	return response, nil
 }
 
-func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) {
+// DescribeLogDirs sends a request to get the broker's log dir paths and sizes
+func (b *Broker) DescribeLogDirs(request *DescribeLogDirsRequest) (*DescribeLogDirsResponse, error) {
+	response := new(DescribeLogDirsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// DescribeUserScramCredentials sends a request to get SCRAM users
+func (b *Broker) DescribeUserScramCredentials(req *DescribeUserScramCredentialsRequest) (*DescribeUserScramCredentialsResponse, error) {
+	res := new(DescribeUserScramCredentialsResponse)
+
+	err := b.sendAndReceive(req, res)
+	if err != nil {
+		return nil, err
+	}
+
+	return res, err
+}
+
+func (b *Broker) AlterUserScramCredentials(req *AlterUserScramCredentialsRequest) (*AlterUserScramCredentialsResponse, error) {
+	res := new(AlterUserScramCredentialsResponse)
+
+	err := b.sendAndReceive(req, res)
+	if err != nil {
+		return nil, err
+	}
+
+	return res, nil
+}
+
+// readFull ensures the conn ReadDeadline has been setup before making a
+// call to io.ReadFull
+func (b *Broker) readFull(buf []byte) (n int, err error) {
+	if err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout)); err != nil {
+		return 0, err
+	}
+
+	return io.ReadFull(b.conn, buf)
+}
+
+// write  ensures the conn WriteDeadline has been setup before making a
+// call to conn.Write
+func (b *Broker) write(buf []byte) (n int, err error) {
+	if err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)); err != nil {
+		return 0, err
+	}
+
+	return b.conn.Write(buf)
+}
+
+func (b *Broker) send(rb protocolBody, promiseResponse bool, responseHeaderVersion int16) (*responsePromise, error) {
 	b.lock.Lock()
 	defer b.lock.Unlock()
 
@@ -680,33 +765,36 @@
 		return nil, err
 	}
 
-	err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
-	if err != nil {
-		return nil, err
-	}
-
 	requestTime := time.Now()
-	bytes, err := b.conn.Write(buf)
-	b.updateOutgoingCommunicationMetrics(bytes) //TODO: should it be after error check
+	// Will be decremented in responseReceiver (except error or request with NoResponse)
+	b.addRequestInFlightMetrics(1)
+	bytes, err := b.write(buf)
+	b.updateOutgoingCommunicationMetrics(bytes)
 	if err != nil {
+		b.addRequestInFlightMetrics(-1)
 		return nil, err
 	}
 	b.correlationID++
 
 	if !promiseResponse {
 		// Record request latency without the response
-		b.updateRequestLatencyMetrics(time.Since(requestTime))
+		b.updateRequestLatencyAndInFlightMetrics(time.Since(requestTime))
 		return nil, nil
 	}
 
-	promise := responsePromise{requestTime, req.correlationID, make(chan []byte), make(chan error)}
+	promise := responsePromise{requestTime, req.correlationID, responseHeaderVersion, make(chan []byte), make(chan error)}
 	b.responses <- promise
 
 	return &promise, nil
 }
 
-func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error {
-	promise, err := b.send(req, res != nil)
+func (b *Broker) sendAndReceive(req protocolBody, res protocolBody) error {
+	responseHeaderVersion := int16(-1)
+	if res != nil {
+		responseHeaderVersion = res.headerVersion()
+	}
+
+	promise, err := b.send(req, res != nil, responseHeaderVersion)
 	if err != nil {
 		return err
 	}
@@ -760,7 +848,7 @@
 		return err
 	}
 
-	port, err := strconv.Atoi(portstr)
+	port, err := strconv.ParseInt(portstr, 10, 32)
 	if err != nil {
 		return err
 	}
@@ -786,22 +874,20 @@
 
 func (b *Broker) responseReceiver() {
 	var dead error
-	header := make([]byte, 8)
 
 	for response := range b.responses {
 		if dead != nil {
+			// This was previously incremented in send() and
+			// we are not calling updateIncomingCommunicationMetrics()
+			b.addRequestInFlightMetrics(-1)
 			response.errors <- dead
 			continue
 		}
 
-		err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout))
-		if err != nil {
-			dead = err
-			response.errors <- err
-			continue
-		}
+		headerLength := getHeaderLength(response.headerVersion)
+		header := make([]byte, headerLength)
 
-		bytesReadHeader, err := io.ReadFull(b.conn, header)
+		bytesReadHeader, err := b.readFull(header)
 		requestLatency := time.Since(response.requestTime)
 		if err != nil {
 			b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
@@ -811,7 +897,7 @@
 		}
 
 		decodedHeader := responseHeader{}
-		err = decode(header, &decodedHeader)
+		err = versionedDecode(header, &decodedHeader, response.headerVersion)
 		if err != nil {
 			b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
 			dead = err
@@ -827,8 +913,8 @@
 			continue
 		}
 
-		buf := make([]byte, decodedHeader.length-4)
-		bytesReadBody, err := io.ReadFull(b.conn, buf)
+		buf := make([]byte, decodedHeader.length-int32(headerLength)+4)
+		bytesReadBody, err := b.readFull(buf)
 		b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency)
 		if err != nil {
 			dead = err
@@ -841,6 +927,15 @@
 	close(b.done)
 }
 
+func getHeaderLength(headerVersion int16) int8 {
+	if headerVersion < 1 {
+		return 8
+	} else {
+		// header contains additional tagged field length (0), we don't support actual tags yet.
+		return 9
+	}
+}
+
 func (b *Broker) authenticateViaSASL() error {
 	switch b.conf.Net.SASL.Mechanism {
 	case SASLTypeOAuth:
@@ -871,31 +966,31 @@
 		return err
 	}
 
-	err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
-	if err != nil {
-		return err
-	}
-
 	requestTime := time.Now()
-	bytes, err := b.conn.Write(buf)
+	// Will be decremented in updateIncomingCommunicationMetrics (except error)
+	b.addRequestInFlightMetrics(1)
+	bytes, err := b.write(buf)
 	b.updateOutgoingCommunicationMetrics(bytes)
 	if err != nil {
+		b.addRequestInFlightMetrics(-1)
 		Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error())
 		return err
 	}
 	b.correlationID++
-	//wait for the response
+
 	header := make([]byte, 8) // response header
-	_, err = io.ReadFull(b.conn, header)
+	_, err = b.readFull(header)
 	if err != nil {
+		b.addRequestInFlightMetrics(-1)
 		Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error())
 		return err
 	}
 
 	length := binary.BigEndian.Uint32(header[:4])
 	payload := make([]byte, length-4)
-	n, err := io.ReadFull(b.conn, payload)
+	n, err := b.readFull(payload)
 	if err != nil {
+		b.addRequestInFlightMetrics(-1)
 		Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error())
 		return err
 	}
@@ -943,10 +1038,9 @@
 // When credentials are invalid, Kafka replies with a SaslAuthenticate response
 // containing an error code and message detailing the authentication failure.
 func (b *Broker) sendAndReceiveSASLPlainAuth() error {
-	// default to V0 to allow for backward compatability when SASL is enabled
+	// default to V0 to allow for backward compatibility when SASL is enabled
 	// but not the handshake
 	if b.conf.Net.SASL.Handshake {
-
 		handshakeErr := b.sendAndReceiveSASLHandshake(SASLTypePlaintext, b.conf.Net.SASL.Version)
 		if handshakeErr != nil {
 			Logger.Printf("Error while performing SASL handshake %s\n", b.addr)
@@ -962,28 +1056,24 @@
 
 // sendAndReceiveV0SASLPlainAuth flows the v0 sasl auth NOT wrapped in the kafka protocol
 func (b *Broker) sendAndReceiveV0SASLPlainAuth() error {
-
-	length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password)
-	authBytes := make([]byte, length+4) //4 byte length header + auth data
+	length := len(b.conf.Net.SASL.AuthIdentity) + 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password)
+	authBytes := make([]byte, length+4) // 4 byte length header + auth data
 	binary.BigEndian.PutUint32(authBytes, uint32(length))
-	copy(authBytes[4:], []byte("\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password))
-
-	err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
-	if err != nil {
-		Logger.Printf("Failed to set write deadline when doing SASL auth with broker %s: %s\n", b.addr, err.Error())
-		return err
-	}
+	copy(authBytes[4:], b.conf.Net.SASL.AuthIdentity+"\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password)
 
 	requestTime := time.Now()
-	bytesWritten, err := b.conn.Write(authBytes)
+	// Will be decremented in updateIncomingCommunicationMetrics (except error)
+	b.addRequestInFlightMetrics(1)
+	bytesWritten, err := b.write(authBytes)
 	b.updateOutgoingCommunicationMetrics(bytesWritten)
 	if err != nil {
+		b.addRequestInFlightMetrics(-1)
 		Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
 		return err
 	}
 
 	header := make([]byte, 4)
-	n, err := io.ReadFull(b.conn, header)
+	n, err := b.readFull(header)
 	b.updateIncomingCommunicationMetrics(n, time.Since(requestTime))
 	// If the credentials are valid, we would get a 4 byte response filled with null characters.
 	// Otherwise, the broker closes the connection and we get an EOF
@@ -1002,11 +1092,13 @@
 
 	requestTime := time.Now()
 
+	// Will be decremented in updateIncomingCommunicationMetrics (except error)
+	b.addRequestInFlightMetrics(1)
 	bytesWritten, err := b.sendSASLPlainAuthClientResponse(correlationID)
-
 	b.updateOutgoingCommunicationMetrics(bytesWritten)
 
 	if err != nil {
+		b.addRequestInFlightMetrics(-1)
 		Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
 		return err
 	}
@@ -1059,16 +1151,18 @@
 // if the broker responds with a challenge, in which case the token is
 // rejected.
 func (b *Broker) sendClientMessage(message []byte) (bool, error) {
-
 	requestTime := time.Now()
+	// Will be decremented in updateIncomingCommunicationMetrics (except error)
+	b.addRequestInFlightMetrics(1)
 	correlationID := b.correlationID
 
 	bytesWritten, err := b.sendSASLOAuthBearerClientMessage(message, correlationID)
+	b.updateOutgoingCommunicationMetrics(bytesWritten)
 	if err != nil {
+		b.addRequestInFlightMetrics(-1)
 		return false, err
 	}
 
-	b.updateOutgoingCommunicationMetrics(bytesWritten)
 	b.correlationID++
 
 	res := &SaslAuthenticateResponse{}
@@ -1099,22 +1193,25 @@
 	msg, err := scramClient.Step("")
 	if err != nil {
 		return fmt.Errorf("failed to advance the SCRAM exchange: %s", err.Error())
-
 	}
 
 	for !scramClient.Done() {
 		requestTime := time.Now()
+		// Will be decremented in updateIncomingCommunicationMetrics (except error)
+		b.addRequestInFlightMetrics(1)
 		correlationID := b.correlationID
 		bytesWritten, err := b.sendSaslAuthenticateRequest(correlationID, []byte(msg))
+		b.updateOutgoingCommunicationMetrics(bytesWritten)
 		if err != nil {
+			b.addRequestInFlightMetrics(-1)
 			Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
 			return err
 		}
 
-		b.updateOutgoingCommunicationMetrics(bytesWritten)
 		b.correlationID++
 		challenge, err := b.receiveSaslAuthenticateResponse(correlationID)
 		if err != nil {
+			b.addRequestInFlightMetrics(-1)
 			Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
 			return err
 		}
@@ -1139,22 +1236,18 @@
 		return 0, err
 	}
 
-	if err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)); err != nil {
-		return 0, err
-	}
-
-	return b.conn.Write(buf)
+	return b.write(buf)
 }
 
 func (b *Broker) receiveSaslAuthenticateResponse(correlationID int32) ([]byte, error) {
 	buf := make([]byte, responseLengthSize+correlationIDSize)
-	_, err := io.ReadFull(b.conn, buf)
+	_, err := b.readFull(buf)
 	if err != nil {
 		return nil, err
 	}
 
 	header := responseHeader{}
-	err = decode(buf, &header)
+	err = versionedDecode(buf, &header, 0)
 	if err != nil {
 		return nil, err
 	}
@@ -1164,7 +1257,7 @@
 	}
 
 	buf = make([]byte, header.length-correlationIDSize)
-	_, err = io.ReadFull(b.conn, buf)
+	_, err = b.readFull(buf)
 	if err != nil {
 		return nil, err
 	}
@@ -1211,7 +1304,7 @@
 }
 
 func (b *Broker) sendSASLPlainAuthClientResponse(correlationID int32) (int, error) {
-	authBytes := []byte("\x00" + b.conf.Net.SASL.User + "\x00" + b.conf.Net.SASL.Password)
+	authBytes := []byte(b.conf.Net.SASL.AuthIdentity + "\x00" + b.conf.Net.SASL.User + "\x00" + b.conf.Net.SASL.Password)
 	rb := &SaslAuthenticateRequest{authBytes}
 	req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb}
 	buf, err := encode(req, b.conf.MetricRegistry)
@@ -1219,16 +1312,10 @@
 		return 0, err
 	}
 
-	err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
-	if err != nil {
-		Logger.Printf("Failed to set write deadline when doing SASL auth with broker %s: %s\n", b.addr, err.Error())
-		return 0, err
-	}
-	return b.conn.Write(buf)
+	return b.write(buf)
 }
 
 func (b *Broker) sendSASLOAuthBearerClientMessage(initialResp []byte, correlationID int32) (int, error) {
-
 	rb := &SaslAuthenticateRequest{initialResp}
 
 	req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb}
@@ -1238,25 +1325,18 @@
 		return 0, err
 	}
 
-	if err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)); err != nil {
-		return 0, err
-	}
-
-	return b.conn.Write(buf)
+	return b.write(buf)
 }
 
 func (b *Broker) receiveSASLServerResponse(res *SaslAuthenticateResponse, correlationID int32) (int, error) {
-
 	buf := make([]byte, responseLengthSize+correlationIDSize)
-
-	bytesRead, err := io.ReadFull(b.conn, buf)
+	bytesRead, err := b.readFull(buf)
 	if err != nil {
 		return bytesRead, err
 	}
 
 	header := responseHeader{}
-
-	err = decode(buf, &header)
+	err = versionedDecode(buf, &header, 0)
 	if err != nil {
 		return bytesRead, err
 	}
@@ -1266,8 +1346,7 @@
 	}
 
 	buf = make([]byte, header.length-correlationIDSize)
-
-	c, err := io.ReadFull(b.conn, buf)
+	c, err := b.readFull(buf)
 	bytesRead += c
 	if err != nil {
 		return bytesRead, err
@@ -1285,7 +1364,7 @@
 }
 
 func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) {
-	b.updateRequestLatencyMetrics(requestLatency)
+	b.updateRequestLatencyAndInFlightMetrics(requestLatency)
 	b.responseRate.Mark(1)
 
 	if b.brokerResponseRate != nil {
@@ -1304,7 +1383,7 @@
 	}
 }
 
-func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) {
+func (b *Broker) updateRequestLatencyAndInFlightMetrics(requestLatency time.Duration) {
 	requestLatencyInMs := int64(requestLatency / time.Millisecond)
 	b.requestLatency.Update(requestLatencyInMs)
 
@@ -1312,6 +1391,14 @@
 		b.brokerRequestLatency.Update(requestLatencyInMs)
 	}
 
+	b.addRequestInFlightMetrics(-1)
+}
+
+func (b *Broker) addRequestInFlightMetrics(i int64) {
+	b.requestsInFlight.Inc(i)
+	if b.brokerRequestsInFlight != nil {
+		b.brokerRequestsInFlight.Inc(i)
+	}
 }
 
 func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) {
@@ -1330,7 +1417,6 @@
 	if b.brokerRequestSize != nil {
 		b.brokerRequestSize.Update(requestSize)
 	}
-
 }
 
 func (b *Broker) registerMetrics() {
@@ -1341,12 +1427,14 @@
 	b.brokerOutgoingByteRate = b.registerMeter("outgoing-byte-rate")
 	b.brokerResponseRate = b.registerMeter("response-rate")
 	b.brokerResponseSize = b.registerHistogram("response-size")
+	b.brokerRequestsInFlight = b.registerCounter("requests-in-flight")
 }
 
 func (b *Broker) unregisterMetrics() {
 	for _, name := range b.registeredMetrics {
 		b.conf.MetricRegistry.Unregister(name)
 	}
+	b.registeredMetrics = nil
 }
 
 func (b *Broker) registerMeter(name string) metrics.Meter {
@@ -1360,3 +1448,28 @@
 	b.registeredMetrics = append(b.registeredMetrics, nameForBroker)
 	return getOrRegisterHistogram(nameForBroker, b.conf.MetricRegistry)
 }
+
+func (b *Broker) registerCounter(name string) metrics.Counter {
+	nameForBroker := getMetricNameForBroker(name, b)
+	b.registeredMetrics = append(b.registeredMetrics, nameForBroker)
+	return metrics.GetOrRegisterCounter(nameForBroker, b.conf.MetricRegistry)
+}
+
+func validServerNameTLS(addr string, cfg *tls.Config) *tls.Config {
+	if cfg == nil {
+		cfg = &tls.Config{
+			MinVersion: tls.VersionTLS12,
+		}
+	}
+	if cfg.ServerName != "" {
+		return cfg
+	}
+
+	c := cfg.Clone()
+	sn, _, err := net.SplitHostPort(addr)
+	if err != nil {
+		Logger.Println(fmt.Errorf("failed to get ServerName from addr %w", err))
+	}
+	c.ServerName = sn
+	return c
+}
diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go
index c4c54b2..c0918ba 100644
--- a/vendor/github.com/Shopify/sarama/client.go
+++ b/vendor/github.com/Shopify/sarama/client.go
@@ -17,12 +17,21 @@
 	// altered after it has been created.
 	Config() *Config
 
-	// Controller returns the cluster controller broker. Requires Kafka 0.10 or higher.
+	// Controller returns the cluster controller broker. It will return a
+	// locally cached value if it's available. You can call RefreshController
+	// to update the cached value. Requires Kafka 0.10 or higher.
 	Controller() (*Broker, error)
 
+	// RefreshController retrieves the cluster controller from fresh metadata
+	// and stores it in the local cache. Requires Kafka 0.10 or higher.
+	RefreshController() (*Broker, error)
+
 	// Brokers returns the current set of active brokers as retrieved from cluster metadata.
 	Brokers() []*Broker
 
+	// Broker returns the active Broker if available for the broker ID.
+	Broker(brokerID int32) (*Broker, error)
+
 	// Topics returns the set of available topics as retrieved from cluster metadata.
 	Topics() ([]string, error)
 
@@ -50,6 +59,11 @@
 	// partition. Offline replicas are replicas which are offline
 	OfflineReplicas(topic string, partitionID int32) ([]int32, error)
 
+	// RefreshBrokers takes a list of addresses to be used as seed brokers.
+	// Existing broker connections are closed and the updated list of seed brokers
+	// will be used for the next metadata fetch.
+	RefreshBrokers(addrs []string) error
+
 	// RefreshMetadata takes a list of topics and queries the cluster to refresh the
 	// available metadata for those topics. If no topics are provided, it will refresh
 	// metadata for all topics.
@@ -149,10 +163,7 @@
 		coordinators:            make(map[string]int32),
 	}
 
-	random := rand.New(rand.NewSource(time.Now().UnixNano()))
-	for _, index := range random.Perm(len(addrs)) {
-		client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
-	}
+	client.randomizeSeedBrokers(addrs)
 
 	if conf.Metadata.Full {
 		// do an initial fetch of all cluster metadata by specifying an empty list of topics
@@ -190,10 +201,20 @@
 	return brokers
 }
 
+func (client *client) Broker(brokerID int32) (*Broker, error) {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+	broker, ok := client.brokers[brokerID]
+	if !ok {
+		return nil, ErrBrokerNotFound
+	}
+	_ = broker.Open(client.conf)
+	return broker, nil
+}
+
 func (client *client) InitProducerID() (*InitProducerIDResponse, error) {
 	var err error
 	for broker := client.any(); broker != nil; broker = client.any() {
-
 		req := &InitProducerIDRequest{}
 
 		response, err := broker.InitProducerID(req)
@@ -242,6 +263,9 @@
 }
 
 func (client *client) Closed() bool {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
 	return client.brokers == nil
 }
 
@@ -421,6 +445,27 @@
 	return leader, err
 }
 
+func (client *client) RefreshBrokers(addrs []string) error {
+	if client.Closed() {
+		return ErrClosedClient
+	}
+
+	client.lock.Lock()
+	defer client.lock.Unlock()
+
+	for _, broker := range client.brokers {
+		_ = broker.Close()
+		delete(client.brokers, broker.ID())
+	}
+
+	client.seedBrokers = nil
+	client.deadSeeds = nil
+
+	client.randomizeSeedBrokers(addrs)
+
+	return nil
+}
+
 func (client *client) RefreshMetadata(topics ...string) error {
 	if client.Closed() {
 		return ErrClosedClient
@@ -430,7 +475,7 @@
 	// error. This handles the case by returning an error instead of sending it
 	// off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310
 	for _, topic := range topics {
-		if len(topic) == 0 {
+		if topic == "" {
 			return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return
 		}
 	}
@@ -448,7 +493,6 @@
 	}
 
 	offset, err := client.getOffset(topic, partitionID, time)
-
 	if err != nil {
 		if err := client.RefreshMetadata(topic); err != nil {
 			return -1, err
@@ -484,6 +528,35 @@
 	return controller, nil
 }
 
+// deregisterController removes the cached controllerID
+func (client *client) deregisterController() {
+	client.lock.Lock()
+	defer client.lock.Unlock()
+	delete(client.brokers, client.controllerID)
+}
+
+// RefreshController retrieves the cluster controller from fresh metadata
+// and stores it in the local cache. Requires Kafka 0.10 or higher.
+func (client *client) RefreshController() (*Broker, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	client.deregisterController()
+
+	if err := client.refreshMetadata(); err != nil {
+		return nil, err
+	}
+
+	controller := client.cachedController()
+	if controller == nil {
+		return nil, ErrControllerNotAvailable
+	}
+
+	_ = controller.Open(client.conf)
+	return controller, nil
+}
+
 func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
 	if client.Closed() {
 		return nil, ErrClosedClient
@@ -525,10 +598,46 @@
 
 // private broker management helpers
 
+func (client *client) randomizeSeedBrokers(addrs []string) {
+	random := rand.New(rand.NewSource(time.Now().UnixNano()))
+	for _, index := range random.Perm(len(addrs)) {
+		client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
+	}
+}
+
+func (client *client) updateBroker(brokers []*Broker) {
+	currentBroker := make(map[int32]*Broker, len(brokers))
+
+	for _, broker := range brokers {
+		currentBroker[broker.ID()] = broker
+		if client.brokers[broker.ID()] == nil { // add new broker
+			client.brokers[broker.ID()] = broker
+			Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
+		} else if broker.Addr() != client.brokers[broker.ID()].Addr() { // replace broker with new address
+			safeAsyncClose(client.brokers[broker.ID()])
+			client.brokers[broker.ID()] = broker
+			Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
+		}
+	}
+
+	for id, broker := range client.brokers {
+		if _, exist := currentBroker[id]; !exist { // remove old broker
+			safeAsyncClose(broker)
+			delete(client.brokers, id)
+			Logger.Printf("client/broker remove invalid broker #%d with %s", broker.ID(), broker.Addr())
+		}
+	}
+}
+
 // registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
 // in the brokers map. It returns the broker that is registered, which may be the provided broker,
 // or a previously registered Broker instance. You must hold the write lock before calling this function.
 func (client *client) registerBroker(broker *Broker) {
+	if client.brokers == nil {
+		Logger.Printf("cannot register broker #%d at %s, client already closed", broker.ID(), broker.Addr())
+		return
+	}
+
 	if client.brokers[broker.ID()] == nil {
 		client.brokers[broker.ID()] = broker
 		Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
@@ -722,7 +831,7 @@
 }
 
 func (client *client) refreshMetadata() error {
-	topics := []string{}
+	var topics []string
 
 	if !client.conf.Metadata.Full {
 		if specificTopics, err := client.MetadataTopics(); err != nil {
@@ -756,7 +865,7 @@
 				Logger.Println("client/metadata skipping last retries as we would go past the metadata timeout")
 				return err
 			}
-			Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
+			Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining)
 			if backoff > 0 {
 				time.Sleep(backoff)
 			}
@@ -782,7 +891,7 @@
 			req.Version = 1
 		}
 		response, err := broker.GetMetadata(req)
-		switch err.(type) {
+		switch err := err.(type) {
 		case nil:
 			allKnownMetaData := len(topics) == 0
 			// valid response, use it
@@ -799,10 +908,15 @@
 
 		case KError:
 			// if SASL auth error return as this _should_ be a non retryable err for all brokers
-			if err.(KError) == ErrSASLAuthenticationFailed {
+			if err == ErrSASLAuthenticationFailed {
 				Logger.Println("client/metadata failed SASL authentication")
 				return err
 			}
+
+			if err == ErrTopicAuthorizationFailed {
+				Logger.Println("client is not authorized to access this topic. The topics were: ", topics)
+				return err
+			}
 			// else remove that broker and try again
 			Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
 			_ = broker.Close()
@@ -817,7 +931,7 @@
 	}
 
 	if broker != nil {
-		Logger.Println("client/metadata not fetching metadata from broker %s as we would go past the metadata timeout\n", broker.addr)
+		Logger.Printf("client/metadata not fetching metadata from broker %s as we would go past the metadata timeout\n", broker.addr)
 		return retry(ErrOutOfBrokers)
 	}
 
@@ -828,16 +942,19 @@
 
 // if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
 func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bool) (retry bool, err error) {
+	if client.Closed() {
+		return
+	}
+
 	client.lock.Lock()
 	defer client.lock.Unlock()
 
 	// For all the brokers we received:
 	// - if it is a new ID, save it
 	// - if it is an existing ID, but the address we have is stale, discard the old one and save it
+	// - if some brokers is not exist in it, remove old broker
 	// - otherwise ignore it, replacing our existing one would just bounce the connection
-	for _, broker := range data.Brokers {
-		client.registerBroker(broker)
-	}
+	client.updateBroker(data.Brokers)
 
 	client.controllerID = data.ControllerID
 
@@ -935,7 +1052,6 @@
 		request.CoordinatorType = CoordinatorGroup
 
 		response, err := broker.FindCoordinator(request)
-
 		if err != nil {
 			Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
 
@@ -966,6 +1082,10 @@
 			}
 
 			return retry(ErrConsumerCoordinatorNotAvailable)
+		case ErrGroupAuthorizationFailed:
+			Logger.Printf("client was not authorized to access group %s while attempting to find coordinator", consumerGroup)
+			return retry(ErrGroupAuthorizationFailed)
+
 		default:
 			return nil, response.Err
 		}
diff --git a/vendor/github.com/Shopify/sarama/compress.go b/vendor/github.com/Shopify/sarama/compress.go
index 94b716e..12cd7c3 100644
--- a/vendor/github.com/Shopify/sarama/compress.go
+++ b/vendor/github.com/Shopify/sarama/compress.go
@@ -6,7 +6,7 @@
 	"fmt"
 	"sync"
 
-	"github.com/eapache/go-xerial-snappy"
+	snappy "github.com/eapache/go-xerial-snappy"
 	"github.com/pierrec/lz4"
 )
 
@@ -22,6 +22,87 @@
 			return gzip.NewWriter(nil)
 		},
 	}
+	gzipWriterPoolForCompressionLevel1 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 1)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel2 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 2)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel3 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 3)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel4 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 4)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel5 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 5)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel6 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 6)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel7 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 7)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel8 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 8)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel9 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 9)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
 )
 
 func compress(cc CompressionCodec, level int, data []byte) ([]byte, error) {
@@ -34,15 +115,53 @@
 			buf    bytes.Buffer
 			writer *gzip.Writer
 		)
-		if level != CompressionLevelDefault {
+
+		switch level {
+		case CompressionLevelDefault:
+			writer = gzipWriterPool.Get().(*gzip.Writer)
+			defer gzipWriterPool.Put(writer)
+			writer.Reset(&buf)
+		case 1:
+			writer = gzipWriterPoolForCompressionLevel1.Get().(*gzip.Writer)
+			defer gzipWriterPoolForCompressionLevel1.Put(writer)
+			writer.Reset(&buf)
+		case 2:
+			writer = gzipWriterPoolForCompressionLevel2.Get().(*gzip.Writer)
+			defer gzipWriterPoolForCompressionLevel2.Put(writer)
+			writer.Reset(&buf)
+		case 3:
+			writer = gzipWriterPoolForCompressionLevel3.Get().(*gzip.Writer)
+			defer gzipWriterPoolForCompressionLevel3.Put(writer)
+			writer.Reset(&buf)
+		case 4:
+			writer = gzipWriterPoolForCompressionLevel4.Get().(*gzip.Writer)
+			defer gzipWriterPoolForCompressionLevel4.Put(writer)
+			writer.Reset(&buf)
+		case 5:
+			writer = gzipWriterPoolForCompressionLevel5.Get().(*gzip.Writer)
+			defer gzipWriterPoolForCompressionLevel5.Put(writer)
+			writer.Reset(&buf)
+		case 6:
+			writer = gzipWriterPoolForCompressionLevel6.Get().(*gzip.Writer)
+			defer gzipWriterPoolForCompressionLevel6.Put(writer)
+			writer.Reset(&buf)
+		case 7:
+			writer = gzipWriterPoolForCompressionLevel7.Get().(*gzip.Writer)
+			defer gzipWriterPoolForCompressionLevel7.Put(writer)
+			writer.Reset(&buf)
+		case 8:
+			writer = gzipWriterPoolForCompressionLevel8.Get().(*gzip.Writer)
+			defer gzipWriterPoolForCompressionLevel8.Put(writer)
+			writer.Reset(&buf)
+		case 9:
+			writer = gzipWriterPoolForCompressionLevel9.Get().(*gzip.Writer)
+			defer gzipWriterPoolForCompressionLevel9.Put(writer)
+			writer.Reset(&buf)
+		default:
 			writer, err = gzip.NewWriterLevel(&buf, level)
 			if err != nil {
 				return nil, err
 			}
-		} else {
-			writer = gzipWriterPool.Get().(*gzip.Writer)
-			defer gzipWriterPool.Put(writer)
-			writer.Reset(&buf)
 		}
 		if _, err := writer.Write(data); err != nil {
 			return nil, err
@@ -68,7 +187,7 @@
 		}
 		return buf.Bytes(), nil
 	case CompressionZSTD:
-		return zstdCompressLevel(nil, data, level)
+		return zstdCompress(nil, data)
 	default:
 		return nil, PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", cc)}
 	}
diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go
index e2e6513..43e739c 100644
--- a/vendor/github.com/Shopify/sarama/config.go
+++ b/vendor/github.com/Shopify/sarama/config.go
@@ -21,6 +21,13 @@
 type Config struct {
 	// Admin is the namespace for ClusterAdmin properties used by the administrative Kafka client.
 	Admin struct {
+		Retry struct {
+			// The total number of times to retry sending (retriable) admin requests (default 5).
+			// Similar to the `retries` setting of the JVM AdminClientConfig.
+			Max int
+			// Backoff time between retries of a failed request (default 100ms)
+			Backoff time.Duration
+		}
 		// The maximum duration the administrative Kafka client will wait for ClusterAdmin operations,
 		// including topics, brokers, configurations and ACLs (defaults to 3 seconds).
 		Timeout time.Duration
@@ -65,8 +72,15 @@
 			// (defaults to true). You should only set this to false if you're using
 			// a non-Kafka SASL proxy.
 			Handshake bool
-			//username and password for SASL/PLAIN  or SASL/SCRAM authentication
-			User     string
+			// AuthIdentity is an (optional) authorization identity (authzid) to
+			// use for SASL/PLAIN authentication (if different from User) when
+			// an authenticated user is permitted to act as the presented
+			// alternative user. See RFC4616 for details.
+			AuthIdentity string
+			// User is the authentication identity (authcid) to present for
+			// SASL/PLAIN or SASL/SCRAM authentication
+			User string
+			// Password for SASL/PLAIN authentication
 			Password string
 			// authz id used for SASL/SCRAM authentication
 			SCRAMAuthzID string
@@ -82,8 +96,9 @@
 			GSSAPI GSSAPIConfig
 		}
 
-		// KeepAlive specifies the keep-alive period for an active network connection.
-		// If zero, keep-alives are disabled. (default is 0: disabled).
+		// KeepAlive specifies the keep-alive period for an active network connection (defaults to 0).
+		// If zero or positive, keep-alives are enabled.
+		// If negative, keep-alives are disabled.
 		KeepAlive time.Duration
 
 		// LocalAddr is the local address to use when dialing an
@@ -214,6 +229,14 @@
 			// `Backoff` if set.
 			BackoffFunc func(retries, maxRetries int) time.Duration
 		}
+
+		// Interceptors to be called when the producer dispatcher reads the
+		// message for the first time. Interceptors allows to intercept and
+		// possible mutate the message before they are published to Kafka
+		// cluster. *ProducerMessage modified by the first interceptor's
+		// OnSend() is passed to the second interceptor OnSend(), and so on in
+		// the interceptor chain.
+		Interceptors []ProducerInterceptor
 	}
 
 	// Consumer is the namespace for configuration related to consuming messages,
@@ -312,7 +335,7 @@
 		// than this, that partition will stop fetching more messages until it
 		// can proceed again.
 		// Note that, since the Messages channel is buffered, the actual grace time is
-		// (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms.
+		// (MaxProcessingTime * ChannelBufferSize). Defaults to 100ms.
 		// If a message is not written to the Messages channel between two ticks
 		// of the expiryTicker then a timeout is detected.
 		// Using a ticker instead of a timer to detect timeouts should typically
@@ -338,9 +361,21 @@
 		// offsets. This currently requires the manual use of an OffsetManager
 		// but will eventually be automated.
 		Offsets struct {
-			// How frequently to commit updated offsets. Defaults to 1s.
+			// Deprecated: CommitInterval exists for historical compatibility
+			// and should not be used. Please use Consumer.Offsets.AutoCommit
 			CommitInterval time.Duration
 
+			// AutoCommit specifies configuration for commit messages automatically.
+			AutoCommit struct {
+				// Whether or not to auto-commit updated offsets back to the broker.
+				// (default enabled).
+				Enable bool
+
+				// How frequently to commit updated offsets. Ineffective unless
+				// auto-commit is enabled (default 1s)
+				Interval time.Duration
+			}
+
 			// The initial offset to use if no offset was previously committed.
 			// Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
 			Initial int64
@@ -364,12 +399,24 @@
 		// 	- use `ReadUncommitted` (default) to consume and return all messages in message channel
 		//	- use `ReadCommitted` to hide messages that are part of an aborted transaction
 		IsolationLevel IsolationLevel
+
+		// Interceptors to be called just before the record is sent to the
+		// messages channel. Interceptors allows to intercept and possible
+		// mutate the message before they are returned to the client.
+		// *ConsumerMessage modified by the first interceptor's OnConsume() is
+		// passed to the second interceptor OnConsume(), and so on in the
+		// interceptor chain.
+		Interceptors []ConsumerInterceptor
 	}
 
 	// A user-provided string sent with every request to the brokers for logging,
 	// debugging, and auditing purposes. Defaults to "sarama", but you should
 	// probably set it to something specific to your application.
 	ClientID string
+	// A rack identifier for this client. This can be any string value which
+	// indicates where this client is physically located.
+	// It corresponds with the broker config 'broker.rack'
+	RackID string
 	// The number of events to buffer in internal and external channels. This
 	// permits the producer and consumer to continue processing some messages
 	// in the background while user code is working, greatly improving throughput.
@@ -394,6 +441,8 @@
 func NewConfig() *Config {
 	c := &Config{}
 
+	c.Admin.Retry.Max = 5
+	c.Admin.Retry.Backoff = 100 * time.Millisecond
 	c.Admin.Timeout = 3 * time.Second
 
 	c.Net.MaxOpenRequests = 5
@@ -423,7 +472,8 @@
 	c.Consumer.MaxWaitTime = 250 * time.Millisecond
 	c.Consumer.MaxProcessingTime = 100 * time.Millisecond
 	c.Consumer.Return.Errors = false
-	c.Consumer.Offsets.CommitInterval = 1 * time.Second
+	c.Consumer.Offsets.AutoCommit.Enable = true
+	c.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second
 	c.Consumer.Offsets.Initial = OffsetNewest
 	c.Consumer.Offsets.Retry.Max = 3
 
@@ -436,7 +486,7 @@
 
 	c.ClientID = defaultClientID
 	c.ChannelBufferSize = 256
-	c.Version = MinVersion
+	c.Version = DefaultVersion
 	c.MetricRegistry = metrics.NewRegistry()
 
 	return c
@@ -504,8 +554,6 @@
 		return ConfigurationError("Net.ReadTimeout must be > 0")
 	case c.Net.WriteTimeout <= 0:
 		return ConfigurationError("Net.WriteTimeout must be > 0")
-	case c.Net.KeepAlive < 0:
-		return ConfigurationError("Net.KeepAlive must be >= 0")
 	case c.Net.SASL.Enable:
 		if c.Net.SASL.Mechanism == "" {
 			c.Net.SASL.Mechanism = SASLTypePlaintext
@@ -621,6 +669,10 @@
 		}
 	}
 
+	if c.Producer.Compression == CompressionZSTD && !c.Version.IsAtLeast(V2_1_0_0) {
+		return ConfigurationError("zstd compression requires Version >= V2_1_0_0")
+	}
+
 	if c.Producer.Idempotent {
 		if !c.Version.IsAtLeast(V0_11_0_0) {
 			return ConfigurationError("Idempotent producer requires Version >= V0_11_0_0")
@@ -650,8 +702,8 @@
 		return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
 	case c.Consumer.Retry.Backoff < 0:
 		return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
-	case c.Consumer.Offsets.CommitInterval <= 0:
-		return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0")
+	case c.Consumer.Offsets.AutoCommit.Interval <= 0:
+		return ConfigurationError("Consumer.Offsets.AutoCommit.Interval must be > 0")
 	case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
 		return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
 	case c.Consumer.Offsets.Retry.Max < 0:
@@ -660,6 +712,11 @@
 		return ConfigurationError("Consumer.IsolationLevel must be ReadUncommitted or ReadCommitted")
 	}
 
+	if c.Consumer.Offsets.CommitInterval != 0 {
+		Logger.Println("Deprecation warning: Consumer.Offsets.CommitInterval exists for historical compatibility" +
+			" and should not be used. Please use Consumer.Offsets.AutoCommit, the current value will be ignored")
+	}
+
 	// validate IsolationLevel
 	if c.Consumer.IsolationLevel == ReadCommitted && !c.Version.IsAtLeast(V0_11_0_0) {
 		return ConfigurationError("ReadCommitted requires Version >= V0_11_0_0")
@@ -693,3 +750,16 @@
 
 	return nil
 }
+
+func (c *Config) getDialer() proxy.Dialer {
+	if c.Net.Proxy.Enable {
+		Logger.Printf("using proxy %s", c.Net.Proxy.Dialer)
+		return c.Net.Proxy.Dialer
+	} else {
+		return &net.Dialer{
+			Timeout:   c.Net.DialTimeout,
+			KeepAlive: c.Net.KeepAlive,
+			LocalAddr: c.Net.LocalAddr,
+		}
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/config_resource_type.go b/vendor/github.com/Shopify/sarama/config_resource_type.go
index 5399d75..bef1053 100644
--- a/vendor/github.com/Shopify/sarama/config_resource_type.go
+++ b/vendor/github.com/Shopify/sarama/config_resource_type.go
@@ -1,22 +1,18 @@
 package sarama
 
-//ConfigResourceType is a type for config resource
+// ConfigResourceType is a type for resources that have configs.
 type ConfigResourceType int8
 
-// Taken from :
-// https://cwiki.apache.org/confluence/display/KAFKA/KIP-133%3A+Describe+and+Alter+Configs+Admin+APIs#KIP-133:DescribeandAlterConfigsAdminAPIs-WireFormattypes
+// Taken from:
+// https://github.com/apache/kafka/blob/ed7c071e07f1f90e4c2895582f61ca090ced3c42/clients/src/main/java/org/apache/kafka/common/config/ConfigResource.java#L32-L55
 
 const (
-	//UnknownResource constant type
-	UnknownResource ConfigResourceType = iota
-	//AnyResource constant type
-	AnyResource
-	//TopicResource constant type
-	TopicResource
-	//GroupResource constant type
-	GroupResource
-	//ClusterResource constant type
-	ClusterResource
-	//BrokerResource constant type
-	BrokerResource
+	// UnknownResource constant type
+	UnknownResource ConfigResourceType = 0
+	// TopicResource constant type
+	TopicResource ConfigResourceType = 2
+	// BrokerResource constant type
+	BrokerResource ConfigResourceType = 4
+	// BrokerLoggerResource constant type
+	BrokerLoggerResource ConfigResourceType = 8
 )
diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go
index 72c4d7c..f9cd172 100644
--- a/vendor/github.com/Shopify/sarama/consumer.go
+++ b/vendor/github.com/Shopify/sarama/consumer.go
@@ -35,6 +35,10 @@
 	return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
 }
 
+func (ce ConsumerError) Unwrap() error {
+	return ce.Err
+}
+
 // ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
 // It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
 // when stopping.
@@ -299,6 +303,8 @@
 	errors   chan *ConsumerError
 	feeder   chan *FetchResponse
 
+	preferredReadReplica int32
+
 	trigger, dying chan none
 	closeOnce      sync.Once
 	topic          string
@@ -359,18 +365,29 @@
 	close(child.feeder)
 }
 
+func (child *partitionConsumer) preferredBroker() (*Broker, error) {
+	if child.preferredReadReplica >= 0 {
+		broker, err := child.consumer.client.Broker(child.preferredReadReplica)
+		if err == nil {
+			return broker, nil
+		}
+	}
+
+	// if prefered replica cannot be found fallback to leader
+	return child.consumer.client.Leader(child.topic, child.partition)
+}
+
 func (child *partitionConsumer) dispatch() error {
 	if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
 		return err
 	}
 
-	var leader *Broker
-	var err error
-	if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil {
+	broker, err := child.preferredBroker()
+	if err != nil {
 		return err
 	}
 
-	child.broker = child.consumer.refBrokerConsumer(leader)
+	child.broker = child.consumer.refBrokerConsumer(broker)
 
 	child.broker.input <- child
 
@@ -422,13 +439,13 @@
 func (child *partitionConsumer) Close() error {
 	child.AsyncClose()
 
-	var errors ConsumerErrors
+	var consumerErrors ConsumerErrors
 	for err := range child.errors {
-		errors = append(errors, err)
+		consumerErrors = append(consumerErrors, err)
 	}
 
-	if len(errors) > 0 {
-		return errors
+	if len(consumerErrors) > 0 {
+		return consumerErrors
 	}
 	return nil
 }
@@ -451,6 +468,7 @@
 		}
 
 		for i, msg := range msgs {
+			child.interceptors(msg)
 		messageSelect:
 			select {
 			case <-child.dying:
@@ -464,6 +482,7 @@
 					child.broker.acks.Done()
 				remainingLoop:
 					for _, msg = range msgs[i:] {
+						child.interceptors(msg)
 						select {
 						case child.messages <- msg:
 						case <-child.dying:
@@ -586,6 +605,8 @@
 
 	consumerBatchSizeMetric.Update(int64(nRecs))
 
+	child.preferredReadReplica = block.PreferredReadReplica
+
 	if nRecs == 0 {
 		partialTrailingMessage, err := block.isPartial()
 		if err != nil {
@@ -623,7 +644,7 @@
 	abortedProducerIDs := make(map[int64]struct{}, len(block.AbortedTransactions))
 	abortedTransactions := block.getAbortedTransactions()
 
-	messages := []*ConsumerMessage{}
+	var messages []*ConsumerMessage
 	for _, records := range block.RecordsSet {
 		switch records.recordsType {
 		case legacyRecords:
@@ -693,6 +714,12 @@
 	return messages, nil
 }
 
+func (child *partitionConsumer) interceptors(msg *ConsumerMessage) {
+	for _, interceptor := range child.conf.Consumer.Interceptors {
+		msg.safelyApplyInterceptor(interceptor)
+	}
+}
+
 type brokerConsumer struct {
 	consumer         *consumer
 	broker           *Broker
@@ -761,7 +788,7 @@
 	close(bc.newSubscriptions)
 }
 
-//subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
+// subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
 func (bc *brokerConsumer) subscriptionConsumer() {
 	<-bc.wait // wait for our first piece of work
 
@@ -776,7 +803,6 @@
 		}
 
 		response, err := bc.fetchNewMessages()
-
 		if err != nil {
 			Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
 			bc.abort(err)
@@ -810,15 +836,27 @@
 	}
 }
 
-//handleResponses handles the response codes left for us by our subscriptions, and abandons ones that have been closed
+// handleResponses handles the response codes left for us by our subscriptions, and abandons ones that have been closed
 func (bc *brokerConsumer) handleResponses() {
 	for child := range bc.subscriptions {
 		result := child.responseResult
 		child.responseResult = nil
 
+		if result == nil {
+			if preferredBroker, err := child.preferredBroker(); err == nil {
+				if bc.broker.ID() != preferredBroker.ID() {
+					// not an error but needs redispatching to consume from prefered replica
+					child.trigger <- none{}
+					delete(bc.subscriptions, child)
+				}
+			}
+			continue
+		}
+
+		// Discard any replica preference.
+		child.preferredReadReplica = -1
+
 		switch result {
-		case nil:
-			// no-op
 		case errTimedOut:
 			Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
 				bc.broker.ID(), child.topic, child.partition)
@@ -887,6 +925,21 @@
 		request.Version = 4
 		request.Isolation = bc.consumer.conf.Consumer.IsolationLevel
 	}
+	if bc.consumer.conf.Version.IsAtLeast(V1_1_0_0) {
+		request.Version = 7
+		// We do not currently implement KIP-227 FetchSessions. Setting the id to 0
+		// and the epoch to -1 tells the broker not to generate as session ID we're going
+		// to just ignore anyway.
+		request.SessionID = 0
+		request.SessionEpoch = -1
+	}
+	if bc.consumer.conf.Version.IsAtLeast(V2_1_0_0) {
+		request.Version = 10
+	}
+	if bc.consumer.conf.Version.IsAtLeast(V2_3_0_0) {
+		request.Version = 11
+		request.RackID = bc.consumer.conf.RackID
+	}
 
 	for child := range bc.subscriptions {
 		request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
diff --git a/vendor/github.com/Shopify/sarama/consumer_group.go b/vendor/github.com/Shopify/sarama/consumer_group.go
index 8de9513..2bf236a 100644
--- a/vendor/github.com/Shopify/sarama/consumer_group.go
+++ b/vendor/github.com/Shopify/sarama/consumer_group.go
@@ -38,6 +38,9 @@
 	// as quickly as possible to allow time for Cleanup() and the final offset commit. If the timeout
 	// is exceeded, the consumer will be removed from the group by Kafka, which will cause offset
 	// commit failures.
+	// This method should be called inside an infinite loop, when a
+	// server-side rebalance happens, the consumer session will need to be
+	// recreated to get the new claims.
 	Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error
 
 	// Errors returns a read channel of errors that occurred during the consumer life-cycle.
@@ -63,6 +66,8 @@
 	lock      sync.Mutex
 	closed    chan none
 	closeOnce sync.Once
+
+	userData []byte
 }
 
 // NewConsumerGroup creates a new consumer group the given broker addresses and configuration.
@@ -118,9 +123,6 @@
 	c.closeOnce.Do(func() {
 		close(c.closed)
 
-		c.lock.Lock()
-		defer c.lock.Unlock()
-
 		// leave group
 		if e := c.leave(); e != nil {
 			err = e
@@ -171,6 +173,11 @@
 		return err
 	}
 
+	// loop check topic partition numbers changed
+	// will trigger rebalance when any topic partitions number had changed
+	// avoid Consume function called again that will generate more than loopCheckPartitionNumbers coroutine
+	go c.loopCheckPartitionNumbers(topics, sess)
+
 	// Wait for session exit signal
 	<-sess.ctx.Done()
 
@@ -248,40 +255,41 @@
 	}
 
 	// Sync consumer group
-	sync, err := c.syncGroupRequest(coordinator, plan, join.GenerationId)
+	groupRequest, err := c.syncGroupRequest(coordinator, plan, join.GenerationId)
 	if err != nil {
 		_ = coordinator.Close()
 		return nil, err
 	}
-	switch sync.Err {
+	switch groupRequest.Err {
 	case ErrNoError:
 	case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately
 		c.memberID = ""
 		return c.newSession(ctx, topics, handler, retries)
 	case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh
 		if retries <= 0 {
-			return nil, sync.Err
+			return nil, groupRequest.Err
 		}
 
 		return c.retryNewSession(ctx, topics, handler, retries, true)
 	case ErrRebalanceInProgress: // retry after backoff
 		if retries <= 0 {
-			return nil, sync.Err
+			return nil, groupRequest.Err
 		}
 
 		return c.retryNewSession(ctx, topics, handler, retries, false)
 	default:
-		return nil, sync.Err
+		return nil, groupRequest.Err
 	}
 
 	// Retrieve and sort claims
 	var claims map[string][]int32
-	if len(sync.MemberAssignment) > 0 {
-		members, err := sync.GetMemberAssignment()
+	if len(groupRequest.MemberAssignment) > 0 {
+		members, err := groupRequest.GetMemberAssignment()
 		if err != nil {
 			return nil, err
 		}
 		claims = members.Topics
+		c.userData = members.UserData
 
 		for _, partitions := range claims {
 			sort.Sort(int32Slice(partitions))
@@ -303,9 +311,14 @@
 		req.RebalanceTimeout = int32(c.config.Consumer.Group.Rebalance.Timeout / time.Millisecond)
 	}
 
+	// use static user-data if configured, otherwise use consumer-group userdata from the last sync
+	userData := c.config.Consumer.Group.Member.UserData
+	if len(userData) == 0 {
+		userData = c.userData
+	}
 	meta := &ConsumerGroupMemberMetadata{
 		Topics:   topics,
-		UserData: c.config.Consumer.Group.Member.UserData,
+		UserData: userData,
 	}
 	strategy := c.config.Consumer.Group.Rebalance.Strategy
 	if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil {
@@ -321,13 +334,17 @@
 		MemberId:     c.memberID,
 		GenerationId: generationID,
 	}
+	strategy := c.config.Consumer.Group.Rebalance.Strategy
 	for memberID, topics := range plan {
-		err := req.AddGroupAssignmentMember(memberID, &ConsumerGroupMemberAssignment{
-			Topics: topics,
-		})
+		assignment := &ConsumerGroupMemberAssignment{Topics: topics}
+		userDataBytes, err := strategy.AssignmentData(memberID, topics, generationID)
 		if err != nil {
 			return nil, err
 		}
+		assignment.UserData = userDataBytes
+		if err := req.AddGroupAssignmentMember(memberID, assignment); err != nil {
+			return nil, err
+		}
 	}
 	return coordinator.SyncGroup(req)
 }
@@ -362,8 +379,10 @@
 	return strategy.Plan(members, topics)
 }
 
-// Leaves the cluster, called by Close, protected by lock.
+// Leaves the cluster, called by Close.
 func (c *consumerGroup) leave() error {
+	c.lock.Lock()
+	defer c.lock.Unlock()
 	if c.memberID == "" {
 		return nil
 	}
@@ -395,12 +414,6 @@
 }
 
 func (c *consumerGroup) handleError(err error, topic string, partition int32) {
-	select {
-	case <-c.closed:
-		return
-	default:
-	}
-
 	if _, ok := err.(*ConsumerError); !ok && topic != "" && partition > -1 {
 		err = &ConsumerError{
 			Topic:     topic,
@@ -409,14 +422,67 @@
 		}
 	}
 
-	if c.config.Consumer.Return.Errors {
-		select {
-		case c.errors <- err:
-		default:
-		}
-	} else {
+	if !c.config.Consumer.Return.Errors {
 		Logger.Println(err)
+		return
 	}
+
+	select {
+	case <-c.closed:
+		// consumer is closed
+		return
+	default:
+	}
+
+	select {
+	case c.errors <- err:
+	default:
+		// no error listener
+	}
+}
+
+func (c *consumerGroup) loopCheckPartitionNumbers(topics []string, session *consumerGroupSession) {
+	pause := time.NewTicker(c.config.Metadata.RefreshFrequency)
+	defer session.cancel()
+	defer pause.Stop()
+	var oldTopicToPartitionNum map[string]int
+	var err error
+	if oldTopicToPartitionNum, err = c.topicToPartitionNumbers(topics); err != nil {
+		return
+	}
+	for {
+		if newTopicToPartitionNum, err := c.topicToPartitionNumbers(topics); err != nil {
+			return
+		} else {
+			for topic, num := range oldTopicToPartitionNum {
+				if newTopicToPartitionNum[topic] != num {
+					return // trigger the end of the session on exit
+				}
+			}
+		}
+		select {
+		case <-pause.C:
+		case <-session.ctx.Done():
+			Logger.Printf("loop check partition number coroutine will exit, topics %s", topics)
+			// if session closed by other, should be exited
+			return
+		case <-c.closed:
+			return
+		}
+	}
+}
+
+func (c *consumerGroup) topicToPartitionNumbers(topics []string) (map[string]int, error) {
+	topicToPartitionNum := make(map[string]int, len(topics))
+	for _, topic := range topics {
+		if partitionNum, err := c.client.Partitions(topic); err != nil {
+			Logger.Printf("Consumer Group topic %s get partition number failed %v", topic, err)
+			return nil, err
+		} else {
+			topicToPartitionNum[topic] = len(partitionNum)
+		}
+	}
+	return topicToPartitionNum, nil
 }
 
 // --------------------------------------------------------------------
@@ -447,6 +513,11 @@
 	// message twice, and your processing should ideally be idempotent.
 	MarkOffset(topic string, partition int32, offset int64, metadata string)
 
+	// Commit the offset to the backend
+	//
+	// Note: calling Commit performs a blocking synchronous operation.
+	Commit()
+
 	// ResetOffset resets to the provided offset, alongside a metadata string that
 	// represents the state of the partition consumer at that point in time. Reset
 	// acts as a counterpart to MarkOffset, the difference being that it allows to
@@ -558,6 +629,10 @@
 	}
 }
 
+func (s *consumerGroupSession) Commit() {
+	s.offsets.Commit()
+}
+
 func (s *consumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) {
 	if pom := s.offsets.findPOM(topic, partition); pom != nil {
 		pom.ResetOffset(offset, metadata)
@@ -657,6 +732,9 @@
 	pause := time.NewTicker(s.parent.config.Consumer.Group.Heartbeat.Interval)
 	defer pause.Stop()
 
+	retryBackoff := time.NewTimer(s.parent.config.Metadata.Retry.Backoff)
+	defer retryBackoff.Stop()
+
 	retries := s.parent.config.Metadata.Retry.Max
 	for {
 		coordinator, err := s.parent.client.Coordinator(s.parent.groupID)
@@ -665,11 +743,11 @@
 				s.parent.handleError(err, "", -1)
 				return
 			}
-
+			retryBackoff.Reset(s.parent.config.Metadata.Retry.Backoff)
 			select {
 			case <-s.hbDying:
 				return
-			case <-time.After(s.parent.config.Metadata.Retry.Backoff):
+			case <-retryBackoff.C:
 				retries--
 			}
 			continue
@@ -694,7 +772,7 @@
 		case ErrRebalanceInProgress, ErrUnknownMemberId, ErrIllegalGeneration:
 			return
 		default:
-			s.parent.handleError(err, "", -1)
+			s.parent.handleError(resp.Err, "", -1)
 			return
 		}
 
diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/Shopify/sarama/consumer_group_members.go
index 2d02cc3..21b11e9 100644
--- a/vendor/github.com/Shopify/sarama/consumer_group_members.go
+++ b/vendor/github.com/Shopify/sarama/consumer_group_members.go
@@ -1,6 +1,6 @@
 package sarama
 
-//ConsumerGroupMemberMetadata holds the metadata for consumer group
+// ConsumerGroupMemberMetadata holds the metadata for consumer group
 type ConsumerGroupMemberMetadata struct {
 	Version  int16
 	Topics   []string
@@ -37,7 +37,7 @@
 	return nil
 }
 
-//ConsumerGroupMemberAssignment holds the member assignment for a consume group
+// ConsumerGroupMemberAssignment holds the member assignment for a consume group
 type ConsumerGroupMemberAssignment struct {
 	Version  int16
 	Topics   map[string][]int32
diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go
index a8dcaef..5c18e04 100644
--- a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go
+++ b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go
@@ -1,6 +1,6 @@
 package sarama
 
-//ConsumerMetadataRequest is used for metadata requests
+// ConsumerMetadataRequest is used for metadata requests
 type ConsumerMetadataRequest struct {
 	ConsumerGroup string
 }
@@ -29,6 +29,10 @@
 	return 0
 }
 
+func (r *ConsumerMetadataRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion {
 	return V0_8_2_0
 }
diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go
index f39a871..7fe0cf9 100644
--- a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go
+++ b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go
@@ -5,7 +5,7 @@
 	"strconv"
 )
 
-//ConsumerMetadataResponse holds the response for a consumer group meta data requests
+// ConsumerMetadataResponse holds the response for a consumer group meta data requests
 type ConsumerMetadataResponse struct {
 	Err             KError
 	Coordinator     *Broker
@@ -73,6 +73,10 @@
 	return 0
 }
 
+func (r *ConsumerMetadataResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion {
 	return V0_8_2_0
 }
diff --git a/vendor/github.com/Shopify/sarama/control_record.go b/vendor/github.com/Shopify/sarama/control_record.go
index 9b75ab5..244a821 100644
--- a/vendor/github.com/Shopify/sarama/control_record.go
+++ b/vendor/github.com/Shopify/sarama/control_record.go
@@ -1,14 +1,14 @@
 package sarama
 
-//ControlRecordType ...
+// ControlRecordType ...
 type ControlRecordType int
 
 const (
-	//ControlRecordAbort is a control record for abort
+	// ControlRecordAbort is a control record for abort
 	ControlRecordAbort ControlRecordType = iota
-	//ControlRecordCommit is a control record for commit
+	// ControlRecordCommit is a control record for commit
 	ControlRecordCommit
-	//ControlRecordUnknown is a control record of unknown type
+	// ControlRecordUnknown is a control record of unknown type
 	ControlRecordUnknown
 )
 
@@ -23,16 +23,6 @@
 
 func (cr *ControlRecord) decode(key, value packetDecoder) error {
 	var err error
-	cr.Version, err = value.getInt16()
-	if err != nil {
-		return err
-	}
-
-	cr.CoordinatorEpoch, err = value.getInt32()
-	if err != nil {
-		return err
-	}
-
 	// There a version for the value part AND the key part. And I have no idea if they are supposed to match or not
 	// Either way, all these version can only be 0 for now
 	cr.Version, err = key.getInt16()
@@ -55,6 +45,18 @@
 		// UNKNOWN is used to indicate a control type which the client is not aware of and should be ignored
 		cr.Type = ControlRecordUnknown
 	}
+	// we want to parse value only if we are decoding control record of known type
+	if cr.Type != ControlRecordUnknown {
+		cr.Version, err = value.getInt16()
+		if err != nil {
+			return err
+		}
+
+		cr.CoordinatorEpoch, err = value.getInt32()
+		if err != nil {
+			return err
+		}
+	}
 	return nil
 }
 
diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/Shopify/sarama/crc32_field.go
index 38189a3..32236e5 100644
--- a/vendor/github.com/Shopify/sarama/crc32_field.go
+++ b/vendor/github.com/Shopify/sarama/crc32_field.go
@@ -72,6 +72,7 @@
 
 	return nil
 }
+
 func (c *crc32Field) crc(curOffset int, buf []byte) (uint32, error) {
 	var tab *crc32.Table
 	switch c.polynomial {
diff --git a/vendor/github.com/Shopify/sarama/create_partitions_request.go b/vendor/github.com/Shopify/sarama/create_partitions_request.go
index af321e9..46fb044 100644
--- a/vendor/github.com/Shopify/sarama/create_partitions_request.go
+++ b/vendor/github.com/Shopify/sarama/create_partitions_request.go
@@ -67,6 +67,10 @@
 	return 0
 }
 
+func (r *CreatePartitionsRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion {
 	return V1_0_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/Shopify/sarama/create_partitions_response.go
index bb18204..12ce788 100644
--- a/vendor/github.com/Shopify/sarama/create_partitions_response.go
+++ b/vendor/github.com/Shopify/sarama/create_partitions_response.go
@@ -63,6 +63,10 @@
 	return 0
 }
 
+func (r *CreatePartitionsResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion {
 	return V1_0_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/create_topics_request.go b/vendor/github.com/Shopify/sarama/create_topics_request.go
index 709c0a4..287acd0 100644
--- a/vendor/github.com/Shopify/sarama/create_topics_request.go
+++ b/vendor/github.com/Shopify/sarama/create_topics_request.go
@@ -79,6 +79,10 @@
 	return c.Version
 }
 
+func (r *CreateTopicsRequest) headerVersion() int16 {
+	return 1
+}
+
 func (c *CreateTopicsRequest) requiredVersion() KafkaVersion {
 	switch c.Version {
 	case 2:
diff --git a/vendor/github.com/Shopify/sarama/create_topics_response.go b/vendor/github.com/Shopify/sarama/create_topics_response.go
index a493e02..7e1448a 100644
--- a/vendor/github.com/Shopify/sarama/create_topics_response.go
+++ b/vendor/github.com/Shopify/sarama/create_topics_response.go
@@ -70,6 +70,10 @@
 	return c.Version
 }
 
+func (c *CreateTopicsResponse) headerVersion() int16 {
+	return 0
+}
+
 func (c *CreateTopicsResponse) requiredVersion() KafkaVersion {
 	switch c.Version {
 	case 2:
diff --git a/vendor/github.com/Shopify/sarama/decompress.go b/vendor/github.com/Shopify/sarama/decompress.go
index eaccbfc..af45fda 100644
--- a/vendor/github.com/Shopify/sarama/decompress.go
+++ b/vendor/github.com/Shopify/sarama/decompress.go
@@ -7,7 +7,7 @@
 	"io/ioutil"
 	"sync"
 
-	"github.com/eapache/go-xerial-snappy"
+	snappy "github.com/eapache/go-xerial-snappy"
 	"github.com/pierrec/lz4"
 )
 
@@ -26,34 +26,32 @@
 	case CompressionNone:
 		return data, nil
 	case CompressionGZIP:
-		var (
-			err        error
-			reader     *gzip.Reader
-			readerIntf = gzipReaderPool.Get()
-		)
-		if readerIntf != nil {
-			reader = readerIntf.(*gzip.Reader)
-		} else {
+		var err error
+		reader, ok := gzipReaderPool.Get().(*gzip.Reader)
+		if !ok {
 			reader, err = gzip.NewReader(bytes.NewReader(data))
-			if err != nil {
-				return nil, err
-			}
+		} else {
+			err = reader.Reset(bytes.NewReader(data))
+		}
+
+		if err != nil {
+			return nil, err
 		}
 
 		defer gzipReaderPool.Put(reader)
 
-		if err := reader.Reset(bytes.NewReader(data)); err != nil {
-			return nil, err
-		}
-
 		return ioutil.ReadAll(reader)
 	case CompressionSnappy:
 		return snappy.Decode(data)
 	case CompressionLZ4:
-		reader := lz4ReaderPool.Get().(*lz4.Reader)
+		reader, ok := lz4ReaderPool.Get().(*lz4.Reader)
+		if !ok {
+			reader = lz4.NewReader(bytes.NewReader(data))
+		} else {
+			reader.Reset(bytes.NewReader(data))
+		}
 		defer lz4ReaderPool.Put(reader)
 
-		reader.Reset(bytes.NewReader(data))
 		return ioutil.ReadAll(reader)
 	case CompressionZSTD:
 		return zstdDecompress(nil, data)
diff --git a/vendor/github.com/Shopify/sarama/delete_groups_request.go b/vendor/github.com/Shopify/sarama/delete_groups_request.go
index 305a324..4ac8bbe 100644
--- a/vendor/github.com/Shopify/sarama/delete_groups_request.go
+++ b/vendor/github.com/Shopify/sarama/delete_groups_request.go
@@ -21,6 +21,10 @@
 	return 0
 }
 
+func (r *DeleteGroupsRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion {
 	return V1_1_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/delete_groups_response.go b/vendor/github.com/Shopify/sarama/delete_groups_response.go
index c067ebb..5e7b1ed 100644
--- a/vendor/github.com/Shopify/sarama/delete_groups_response.go
+++ b/vendor/github.com/Shopify/sarama/delete_groups_response.go
@@ -65,6 +65,10 @@
 	return 0
 }
 
+func (r *DeleteGroupsResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion {
 	return V1_1_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/delete_records_request.go b/vendor/github.com/Shopify/sarama/delete_records_request.go
index 93efafd..dc106b1 100644
--- a/vendor/github.com/Shopify/sarama/delete_records_request.go
+++ b/vendor/github.com/Shopify/sarama/delete_records_request.go
@@ -77,6 +77,10 @@
 	return 0
 }
 
+func (d *DeleteRecordsRequest) headerVersion() int16 {
+	return 1
+}
+
 func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/delete_records_response.go b/vendor/github.com/Shopify/sarama/delete_records_response.go
index 733a58b..d530b4c 100644
--- a/vendor/github.com/Shopify/sarama/delete_records_response.go
+++ b/vendor/github.com/Shopify/sarama/delete_records_response.go
@@ -80,6 +80,10 @@
 	return 0
 }
 
+func (d *DeleteRecordsResponse) headerVersion() int16 {
+	return 0
+}
+
 func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/delete_topics_request.go b/vendor/github.com/Shopify/sarama/delete_topics_request.go
index 911f67d..ba6780a 100644
--- a/vendor/github.com/Shopify/sarama/delete_topics_request.go
+++ b/vendor/github.com/Shopify/sarama/delete_topics_request.go
@@ -38,6 +38,10 @@
 	return d.Version
 }
 
+func (d *DeleteTopicsRequest) headerVersion() int16 {
+	return 1
+}
+
 func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion {
 	switch d.Version {
 	case 1:
diff --git a/vendor/github.com/Shopify/sarama/delete_topics_response.go b/vendor/github.com/Shopify/sarama/delete_topics_response.go
index 3422546..733961a 100644
--- a/vendor/github.com/Shopify/sarama/delete_topics_response.go
+++ b/vendor/github.com/Shopify/sarama/delete_topics_response.go
@@ -68,6 +68,10 @@
 	return d.Version
 }
 
+func (d *DeleteTopicsResponse) headerVersion() int16 {
+	return 0
+}
+
 func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion {
 	switch d.Version {
 	case 1:
diff --git a/vendor/github.com/Shopify/sarama/describe_configs_request.go b/vendor/github.com/Shopify/sarama/describe_configs_request.go
index ccb587b..4c34880 100644
--- a/vendor/github.com/Shopify/sarama/describe_configs_request.go
+++ b/vendor/github.com/Shopify/sarama/describe_configs_request.go
@@ -61,7 +61,6 @@
 		r.Resources[i].Name = name
 
 		confLength, err := pd.getArrayLength()
-
 		if err != nil {
 			return err
 		}
@@ -100,6 +99,10 @@
 	return r.Version
 }
 
+func (r *DescribeConfigsRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion {
 	switch r.Version {
 	case 1:
diff --git a/vendor/github.com/Shopify/sarama/describe_configs_response.go b/vendor/github.com/Shopify/sarama/describe_configs_response.go
index 5737232..928f5a5 100644
--- a/vendor/github.com/Shopify/sarama/describe_configs_response.go
+++ b/vendor/github.com/Shopify/sarama/describe_configs_response.go
@@ -112,6 +112,10 @@
 	return r.Version
 }
 
+func (r *DescribeConfigsResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion {
 	switch r.Version {
 	case 1:
@@ -220,7 +224,7 @@
 	return nil
 }
 
-//https://cwiki.apache.org/confluence/display/KAFKA/KIP-226+-+Dynamic+Broker+Configuration
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-226+-+Dynamic+Broker+Configuration
 func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) {
 	if version == 0 {
 		r.Source = SourceUnknown
@@ -249,12 +253,16 @@
 			return err
 		}
 		r.Default = defaultB
+		if defaultB {
+			r.Source = SourceDefault
+		}
 	} else {
 		source, err := pd.getInt8()
 		if err != nil {
 			return err
 		}
 		r.Source = ConfigSource(source)
+		r.Default = r.Source == SourceDefault
 	}
 
 	sensitive, err := pd.getBool()
@@ -277,7 +285,6 @@
 			}
 			r.Synonyms[i] = s
 		}
-
 	}
 	return nil
 }
diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/Shopify/sarama/describe_groups_request.go
index 1fb3567..f8962da 100644
--- a/vendor/github.com/Shopify/sarama/describe_groups_request.go
+++ b/vendor/github.com/Shopify/sarama/describe_groups_request.go
@@ -21,6 +21,10 @@
 	return 0
 }
 
+func (r *DescribeGroupsRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion {
 	return V0_9_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/Shopify/sarama/describe_groups_response.go
index 542b3a9..bc242e4 100644
--- a/vendor/github.com/Shopify/sarama/describe_groups_response.go
+++ b/vendor/github.com/Shopify/sarama/describe_groups_response.go
@@ -43,6 +43,10 @@
 	return 0
 }
 
+func (r *DescribeGroupsResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion {
 	return V0_9_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go b/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go
new file mode 100644
index 0000000..c0bf04e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go
@@ -0,0 +1,87 @@
+package sarama
+
+// DescribeLogDirsRequest is a describe request to get partitions' log size
+type DescribeLogDirsRequest struct {
+	// Version 0 and 1 are equal
+	// The version number is bumped to indicate that on quota violation brokers send out responses before throttling.
+	Version int16
+
+	// If this is an empty array, all topics will be queried
+	DescribeTopics []DescribeLogDirsRequestTopic
+}
+
+// DescribeLogDirsRequestTopic is a describe request about the log dir of one or more partitions within a Topic
+type DescribeLogDirsRequestTopic struct {
+	Topic        string
+	PartitionIDs []int32
+}
+
+func (r *DescribeLogDirsRequest) encode(pe packetEncoder) error {
+	length := len(r.DescribeTopics)
+	if length == 0 {
+		// In order to query all topics we must send null
+		length = -1
+	}
+
+	if err := pe.putArrayLength(length); err != nil {
+		return err
+	}
+
+	for _, d := range r.DescribeTopics {
+		if err := pe.putString(d.Topic); err != nil {
+			return err
+		}
+
+		if err := pe.putInt32Array(d.PartitionIDs); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *DescribeLogDirsRequest) decode(pd packetDecoder, version int16) error {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == -1 {
+		n = 0
+	}
+
+	topics := make([]DescribeLogDirsRequestTopic, n)
+	for i := 0; i < n; i++ {
+		topics[i] = DescribeLogDirsRequestTopic{}
+
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		topics[i].Topic = topic
+
+		pIDs, err := pd.getInt32Array()
+		if err != nil {
+			return err
+		}
+		topics[i].PartitionIDs = pIDs
+	}
+	r.DescribeTopics = topics
+
+	return nil
+}
+
+func (r *DescribeLogDirsRequest) key() int16 {
+	return 35
+}
+
+func (r *DescribeLogDirsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeLogDirsRequest) headerVersion() int16 {
+	return 1
+}
+
+func (r *DescribeLogDirsRequest) requiredVersion() KafkaVersion {
+	return V1_0_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go b/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go
new file mode 100644
index 0000000..411da38
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go
@@ -0,0 +1,229 @@
+package sarama
+
+import "time"
+
+type DescribeLogDirsResponse struct {
+	ThrottleTime time.Duration
+
+	// Version 0 and 1 are equal
+	// The version number is bumped to indicate that on quota violation brokers send out responses before throttling.
+	Version int16
+
+	LogDirs []DescribeLogDirsResponseDirMetadata
+}
+
+func (r *DescribeLogDirsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+
+	if err := pe.putArrayLength(len(r.LogDirs)); err != nil {
+		return err
+	}
+
+	for _, dir := range r.LogDirs {
+		if err := dir.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *DescribeLogDirsResponse) decode(pd packetDecoder, version int16) error {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	// Decode array of DescribeLogDirsResponseDirMetadata
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.LogDirs = make([]DescribeLogDirsResponseDirMetadata, n)
+	for i := 0; i < n; i++ {
+		dir := DescribeLogDirsResponseDirMetadata{}
+		if err := dir.decode(pd, version); err != nil {
+			return err
+		}
+		r.LogDirs[i] = dir
+	}
+
+	return nil
+}
+
+func (r *DescribeLogDirsResponse) key() int16 {
+	return 35
+}
+
+func (r *DescribeLogDirsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeLogDirsResponse) headerVersion() int16 {
+	return 0
+}
+
+func (r *DescribeLogDirsResponse) requiredVersion() KafkaVersion {
+	return V1_0_0_0
+}
+
+type DescribeLogDirsResponseDirMetadata struct {
+	ErrorCode KError
+
+	// The absolute log directory path
+	Path   string
+	Topics []DescribeLogDirsResponseTopic
+}
+
+func (r *DescribeLogDirsResponseDirMetadata) encode(pe packetEncoder) error {
+	pe.putInt16(int16(r.ErrorCode))
+
+	if err := pe.putString(r.Path); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(r.Topics)); err != nil {
+		return err
+	}
+	for _, topic := range r.Topics {
+		if err := topic.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *DescribeLogDirsResponseDirMetadata) decode(pd packetDecoder, version int16) error {
+	errCode, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	r.ErrorCode = KError(errCode)
+
+	path, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.Path = path
+
+	// Decode array of DescribeLogDirsResponseTopic
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Topics = make([]DescribeLogDirsResponseTopic, n)
+	for i := 0; i < n; i++ {
+		t := DescribeLogDirsResponseTopic{}
+
+		if err := t.decode(pd, version); err != nil {
+			return err
+		}
+
+		r.Topics[i] = t
+	}
+
+	return nil
+}
+
+// DescribeLogDirsResponseTopic contains a topic's partitions descriptions
+type DescribeLogDirsResponseTopic struct {
+	Topic      string
+	Partitions []DescribeLogDirsResponsePartition
+}
+
+func (r *DescribeLogDirsResponseTopic) encode(pe packetEncoder) error {
+	if err := pe.putString(r.Topic); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(r.Partitions)); err != nil {
+		return err
+	}
+	for _, partition := range r.Partitions {
+		if err := partition.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *DescribeLogDirsResponseTopic) decode(pd packetDecoder, version int16) error {
+	t, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.Topic = t
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	r.Partitions = make([]DescribeLogDirsResponsePartition, n)
+	for i := 0; i < n; i++ {
+		p := DescribeLogDirsResponsePartition{}
+		if err := p.decode(pd, version); err != nil {
+			return err
+		}
+		r.Partitions[i] = p
+	}
+
+	return nil
+}
+
+// DescribeLogDirsResponsePartition describes a partition's log directory
+type DescribeLogDirsResponsePartition struct {
+	PartitionID int32
+
+	// The size of the log segments of the partition in bytes.
+	Size int64
+
+	// The lag of the log's LEO w.r.t. partition's HW (if it is the current log for the partition) or
+	// current replica's LEO (if it is the future log for the partition)
+	OffsetLag int64
+
+	// True if this log is created by AlterReplicaLogDirsRequest and will replace the current log of
+	// the replica in the future.
+	IsTemporary bool
+}
+
+func (r *DescribeLogDirsResponsePartition) encode(pe packetEncoder) error {
+	pe.putInt32(r.PartitionID)
+	pe.putInt64(r.Size)
+	pe.putInt64(r.OffsetLag)
+	pe.putBool(r.IsTemporary)
+
+	return nil
+}
+
+func (r *DescribeLogDirsResponsePartition) decode(pd packetDecoder, version int16) error {
+	pID, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	r.PartitionID = pID
+
+	size, err := pd.getInt64()
+	if err != nil {
+		return err
+	}
+	r.Size = size
+
+	lag, err := pd.getInt64()
+	if err != nil {
+		return err
+	}
+	r.OffsetLag = lag
+
+	isTemp, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+	r.IsTemporary = isTemp
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go b/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go
new file mode 100644
index 0000000..b5b5940
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go
@@ -0,0 +1,70 @@
+package sarama
+
+// DescribeUserScramCredentialsRequest is a request to get list of SCRAM user names
+type DescribeUserScramCredentialsRequest struct {
+	// Version 0 is currently only supported
+	Version int16
+
+	// If this is an empty array, all users will be queried
+	DescribeUsers []DescribeUserScramCredentialsRequestUser
+}
+
+// DescribeUserScramCredentialsRequestUser is a describe request about specific user name
+type DescribeUserScramCredentialsRequestUser struct {
+	Name string
+}
+
+func (r *DescribeUserScramCredentialsRequest) encode(pe packetEncoder) error {
+	pe.putCompactArrayLength(len(r.DescribeUsers))
+	for _, d := range r.DescribeUsers {
+		if err := pe.putCompactString(d.Name); err != nil {
+			return err
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *DescribeUserScramCredentialsRequest) decode(pd packetDecoder, version int16) error {
+	n, err := pd.getCompactArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == -1 {
+		n = 0
+	}
+
+	r.DescribeUsers = make([]DescribeUserScramCredentialsRequestUser, n)
+	for i := 0; i < n; i++ {
+		r.DescribeUsers[i] = DescribeUserScramCredentialsRequestUser{}
+		if r.DescribeUsers[i].Name, err = pd.getCompactString(); err != nil {
+			return err
+		}
+		if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
+	if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (r *DescribeUserScramCredentialsRequest) key() int16 {
+	return 50
+}
+
+func (r *DescribeUserScramCredentialsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeUserScramCredentialsRequest) headerVersion() int16 {
+	return 2
+}
+
+func (r *DescribeUserScramCredentialsRequest) requiredVersion() KafkaVersion {
+	return V2_7_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go b/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go
new file mode 100644
index 0000000..2656c2f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go
@@ -0,0 +1,168 @@
+package sarama
+
+import "time"
+
+type ScramMechanismType int8
+
+const (
+	SCRAM_MECHANISM_UNKNOWN ScramMechanismType = iota // 0
+	SCRAM_MECHANISM_SHA_256                           // 1
+	SCRAM_MECHANISM_SHA_512                           // 2
+)
+
+func (s ScramMechanismType) String() string {
+	switch s {
+	case 1:
+		return SASLTypeSCRAMSHA256
+	case 2:
+		return SASLTypeSCRAMSHA512
+	default:
+		return "Unknown"
+	}
+}
+
+type DescribeUserScramCredentialsResponse struct {
+	// Version 0 is currently only supported
+	Version int16
+
+	ThrottleTime time.Duration
+
+	ErrorCode    KError
+	ErrorMessage *string
+
+	Results []*DescribeUserScramCredentialsResult
+}
+
+type DescribeUserScramCredentialsResult struct {
+	User string
+
+	ErrorCode    KError
+	ErrorMessage *string
+
+	CredentialInfos []*UserScramCredentialsResponseInfo
+}
+
+type UserScramCredentialsResponseInfo struct {
+	Mechanism  ScramMechanismType
+	Iterations int32
+}
+
+func (r *DescribeUserScramCredentialsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+
+	pe.putInt16(int16(r.ErrorCode))
+	if err := pe.putNullableCompactString(r.ErrorMessage); err != nil {
+		return err
+	}
+
+	pe.putCompactArrayLength(len(r.Results))
+	for _, u := range r.Results {
+		if err := pe.putCompactString(u.User); err != nil {
+			return err
+		}
+		pe.putInt16(int16(u.ErrorCode))
+		if err := pe.putNullableCompactString(u.ErrorMessage); err != nil {
+			return err
+		}
+
+		pe.putCompactArrayLength(len(u.CredentialInfos))
+		for _, c := range u.CredentialInfos {
+			pe.putInt8(int8(c.Mechanism))
+			pe.putInt32(c.Iterations)
+			pe.putEmptyTaggedFieldArray()
+		}
+
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *DescribeUserScramCredentialsResponse) decode(pd packetDecoder, version int16) error {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	r.ErrorCode = KError(kerr)
+	if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil {
+		return err
+	}
+
+	numUsers, err := pd.getCompactArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if numUsers > 0 {
+		r.Results = make([]*DescribeUserScramCredentialsResult, numUsers)
+		for i := 0; i < numUsers; i++ {
+			r.Results[i] = &DescribeUserScramCredentialsResult{}
+			if r.Results[i].User, err = pd.getCompactString(); err != nil {
+				return err
+			}
+
+			errorCode, err := pd.getInt16()
+			if err != nil {
+				return err
+			}
+			r.Results[i].ErrorCode = KError(errorCode)
+			if r.Results[i].ErrorMessage, err = pd.getCompactNullableString(); err != nil {
+				return err
+			}
+
+			numCredentialInfos, err := pd.getCompactArrayLength()
+			if err != nil {
+				return err
+			}
+
+			r.Results[i].CredentialInfos = make([]*UserScramCredentialsResponseInfo, numCredentialInfos)
+			for j := 0; j < numCredentialInfos; j++ {
+				r.Results[i].CredentialInfos[j] = &UserScramCredentialsResponseInfo{}
+				scramMechanism, err := pd.getInt8()
+				if err != nil {
+					return err
+				}
+				r.Results[i].CredentialInfos[j].Mechanism = ScramMechanismType(scramMechanism)
+				if r.Results[i].CredentialInfos[j].Iterations, err = pd.getInt32(); err != nil {
+					return err
+				}
+				if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+					return err
+				}
+			}
+
+			if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (r *DescribeUserScramCredentialsResponse) key() int16 {
+	return 50
+}
+
+func (r *DescribeUserScramCredentialsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeUserScramCredentialsResponse) headerVersion() int16 {
+	return 2
+}
+
+func (r *DescribeUserScramCredentialsResponse) requiredVersion() KafkaVersion {
+	return V2_7_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml
index 3f4d569..7bf9ff9 100644
--- a/vendor/github.com/Shopify/sarama/dev.yml
+++ b/vendor/github.com/Shopify/sarama/dev.yml
@@ -2,7 +2,7 @@
 
 up:
   - go:
-      version: '1.12'
+      version: '1.16'
 
 commands:
   test:
diff --git a/vendor/github.com/Shopify/sarama/docker-compose.yml b/vendor/github.com/Shopify/sarama/docker-compose.yml
new file mode 100644
index 0000000..8e9c24e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/docker-compose.yml
@@ -0,0 +1,134 @@
+version: '3.7'
+services:
+  zookeeper-1:
+    image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
+    restart: always
+    environment:
+      ZOOKEEPER_SERVER_ID: '1'
+      ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888'
+      ZOOKEEPER_CLIENT_PORT: '2181'
+      ZOOKEEPER_PEER_PORT: '2888'
+      ZOOKEEPER_LEADER_PORT: '3888'
+      ZOOKEEPER_INIT_LIMIT: '10'
+      ZOOKEEPER_SYNC_LIMIT: '5'
+      ZOOKEEPER_MAX_CLIENT_CONNS: '0'
+  zookeeper-2:
+    image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
+    restart: always
+    environment:
+      ZOOKEEPER_SERVER_ID: '2'
+      ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888'
+      ZOOKEEPER_CLIENT_PORT: '2181'
+      ZOOKEEPER_PEER_PORT: '2888'
+      ZOOKEEPER_LEADER_PORT: '3888'
+      ZOOKEEPER_INIT_LIMIT: '10'
+      ZOOKEEPER_SYNC_LIMIT: '5'
+      ZOOKEEPER_MAX_CLIENT_CONNS: '0'
+  zookeeper-3:
+    image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
+    restart: always
+    environment:
+      ZOOKEEPER_SERVER_ID: '3'
+      ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888'
+      ZOOKEEPER_CLIENT_PORT: '2181'
+      ZOOKEEPER_PEER_PORT: '2888'
+      ZOOKEEPER_LEADER_PORT: '3888'
+      ZOOKEEPER_INIT_LIMIT: '10'
+      ZOOKEEPER_SYNC_LIMIT: '5'
+      ZOOKEEPER_MAX_CLIENT_CONNS: '0'
+  kafka-1:
+    image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
+    restart: always
+    environment:
+      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
+      KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29091'
+      KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-1:9091,LISTENER_LOCAL://localhost:29091'
+      KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
+      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
+      KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
+      KAFKA_BROKER_ID: '1'
+      KAFKA_BROKER_RACK: '1'
+      KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
+      KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
+      KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
+      KAFKA_DELETE_TOPIC_ENABLE: 'true'
+      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
+  kafka-2:
+    image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
+    restart: always
+    environment:
+      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
+      KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29092'
+      KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-2:9091,LISTENER_LOCAL://localhost:29092'
+      KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
+      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
+      KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
+      KAFKA_BROKER_ID: '2'
+      KAFKA_BROKER_RACK: '2'
+      KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
+      KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
+      KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
+      KAFKA_DELETE_TOPIC_ENABLE: 'true'
+      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
+  kafka-3:
+    image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
+    restart: always
+    environment:
+      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
+      KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29093'
+      KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-3:9091,LISTENER_LOCAL://localhost:29093'
+      KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
+      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
+      KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
+      KAFKA_BROKER_ID: '3'
+      KAFKA_BROKER_RACK: '3'
+      KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
+      KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
+      KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
+      KAFKA_DELETE_TOPIC_ENABLE: 'true'
+      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
+  kafka-4:
+    image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
+    restart: always
+    environment:
+      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
+      KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29094'
+      KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-4:9091,LISTENER_LOCAL://localhost:29094'
+      KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
+      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
+      KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
+      KAFKA_BROKER_ID: '4'
+      KAFKA_BROKER_RACK: '4'
+      KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
+      KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
+      KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
+      KAFKA_DELETE_TOPIC_ENABLE: 'true'
+      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
+  kafka-5:
+    image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
+    restart: always
+    environment:
+      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
+      KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29095'
+      KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-5:9091,LISTENER_LOCAL://localhost:29095'
+      KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
+      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
+      KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
+      KAFKA_BROKER_ID: '5'
+      KAFKA_BROKER_RACK: '5'
+      KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
+      KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
+      KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
+      KAFKA_DELETE_TOPIC_ENABLE: 'true'
+      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
+  toxiproxy:
+    image: 'shopify/toxiproxy:2.1.4'
+    ports:
+      # The tests themselves actually start the proxies on these ports
+      - '29091:29091'
+      - '29092:29092'
+      - '29093:29093'
+      - '29094:29094'
+      - '29095:29095'
+      # This is the toxiproxy API port
+      - '8474:8474'
diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/Shopify/sarama/encoder_decoder.go
index 7ce3bc0..dab54f8 100644
--- a/vendor/github.com/Shopify/sarama/encoder_decoder.go
+++ b/vendor/github.com/Shopify/sarama/encoder_decoder.go
@@ -12,6 +12,11 @@
 	encode(pe packetEncoder) error
 }
 
+type encoderWithHeader interface {
+	encoder
+	headerVersion() int16
+}
+
 // Encode takes an Encoder and turns it into bytes while potentially recording metrics.
 func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) {
 	if e == nil {
@@ -40,7 +45,7 @@
 	return realEnc.raw, nil
 }
 
-// Decoder is the interface that wraps the basic Decode method.
+// decoder is the interface that wraps the basic Decode method.
 // Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules.
 type decoder interface {
 	decode(pd packetDecoder) error
@@ -50,7 +55,7 @@
 	decode(pd packetDecoder, version int16) error
 }
 
-// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes,
+// decode takes bytes and a decoder and fills the fields of the decoder from the bytes,
 // interpreted using Kafka's encoding rules.
 func decode(buf []byte, in decoder) error {
 	if buf == nil {
diff --git a/vendor/github.com/Shopify/sarama/end_txn_request.go b/vendor/github.com/Shopify/sarama/end_txn_request.go
index 2cd9b50..6635425 100644
--- a/vendor/github.com/Shopify/sarama/end_txn_request.go
+++ b/vendor/github.com/Shopify/sarama/end_txn_request.go
@@ -45,6 +45,10 @@
 	return 0
 }
 
+func (r *EndTxnRequest) headerVersion() int16 {
+	return 1
+}
+
 func (a *EndTxnRequest) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/end_txn_response.go b/vendor/github.com/Shopify/sarama/end_txn_response.go
index 33b27e3..7639767 100644
--- a/vendor/github.com/Shopify/sarama/end_txn_response.go
+++ b/vendor/github.com/Shopify/sarama/end_txn_response.go
@@ -39,6 +39,10 @@
 	return 0
 }
 
+func (r *EndTxnResponse) headerVersion() int16 {
+	return 0
+}
+
 func (e *EndTxnResponse) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go
index c6a8be7..0fca0a3 100644
--- a/vendor/github.com/Shopify/sarama/errors.go
+++ b/vendor/github.com/Shopify/sarama/errors.go
@@ -9,6 +9,9 @@
 // or otherwise failed to respond.
 var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)")
 
+// ErrBrokerNotFound is the error returned when there's no broker found for the requested ID.
+var ErrBrokerNotFound = errors.New("kafka: broker for ID is not found")
+
 // ErrClosedClient is the error returned when a method is called on a client that has been closed.
 var ErrClosedClient = errors.New("kafka: tried to use a client that was closed")
 
@@ -49,6 +52,9 @@
 // the metadata.
 var ErrNoTopicsToUpdateMetadata = errors.New("kafka: no specific topics to update metadata")
 
+// ErrUnknownScramMechanism is returned when user tries to AlterUserScramCredentials with unknown SCRAM mechanism
+var ErrUnknownScramMechanism = errors.New("kafka: unknown SCRAM mechanism provided")
+
 // PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
 // if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
 type PacketEncodingError struct {
@@ -87,13 +93,21 @@
 }
 
 func (mErr MultiError) Error() string {
-	var errString = ""
+	errString := ""
 	for _, err := range *mErr.Errors {
 		errString += err.Error() + ","
 	}
 	return errString
 }
 
+func (mErr MultiError) PrettyError() string {
+	errString := ""
+	for _, err := range *mErr.Errors {
+		errString += err.Error() + "\n"
+	}
+	return errString
+}
+
 // ErrDeleteRecords is the type of error returned when fail to delete the required records
 type ErrDeleteRecords struct {
 	MultiError
@@ -103,6 +117,14 @@
 	return "kafka server: failed to delete records " + err.MultiError.Error()
 }
 
+type ErrReassignPartitions struct {
+	MultiError
+}
+
+func (err ErrReassignPartitions) Error() string {
+	return fmt.Sprintf("failed to reassign partitions for topic: \n%s", err.MultiError.PrettyError())
+}
+
 // Numeric error codes returned by the Kafka server.
 const (
 	ErrNoError                            KError = 0
@@ -188,6 +210,13 @@
 	ErrMemberIdRequired                   KError = 79
 	ErrPreferredLeaderNotAvailable        KError = 80
 	ErrGroupMaxSizeReached                KError = 81
+	ErrFencedInstancedId                  KError = 82
+	ErrEligibleLeadersNotAvailable        KError = 83
+	ErrElectionNotNeeded                  KError = 84
+	ErrNoReassignmentInProgress           KError = 85
+	ErrGroupSubscribedToTopic             KError = 86
+	ErrInvalidRecord                      KError = 87
+	ErrUnstableOffsetCommit               KError = 88
 )
 
 func (err KError) Error() string {
@@ -360,6 +389,20 @@
 		return "kafka server: The preferred leader was not available"
 	case ErrGroupMaxSizeReached:
 		return "kafka server: Consumer group The consumer group has reached its max size. already has the configured maximum number of members."
+	case ErrFencedInstancedId:
+		return "kafka server: The broker rejected this static consumer since another consumer with the same group.instance.id has registered with a different member.id."
+	case ErrEligibleLeadersNotAvailable:
+		return "kafka server: Eligible topic partition leaders are not available."
+	case ErrElectionNotNeeded:
+		return "kafka server: Leader election not needed for topic partition."
+	case ErrNoReassignmentInProgress:
+		return "kafka server: No partition reassignment is in progress."
+	case ErrGroupSubscribedToTopic:
+		return "kafka server: Deleting offsets of a topic is forbidden while the consumer group is actively subscribed to it."
+	case ErrInvalidRecord:
+		return "kafka server: This record has failed the validation on broker and hence will be rejected."
+	case ErrUnstableOffsetCommit:
+		return "kafka server: There are unstable offsets that need to be cleared."
 	}
 
 	return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go
index 4db9ddd..f893aef 100644
--- a/vendor/github.com/Shopify/sarama/fetch_request.go
+++ b/vendor/github.com/Shopify/sarama/fetch_request.go
@@ -1,20 +1,41 @@
 package sarama
 
 type fetchRequestBlock struct {
-	fetchOffset int64
-	maxBytes    int32
+	Version            int16
+	currentLeaderEpoch int32
+	fetchOffset        int64
+	logStartOffset     int64
+	maxBytes           int32
 }
 
-func (b *fetchRequestBlock) encode(pe packetEncoder) error {
+func (b *fetchRequestBlock) encode(pe packetEncoder, version int16) error {
+	b.Version = version
+	if b.Version >= 9 {
+		pe.putInt32(b.currentLeaderEpoch)
+	}
 	pe.putInt64(b.fetchOffset)
+	if b.Version >= 5 {
+		pe.putInt64(b.logStartOffset)
+	}
 	pe.putInt32(b.maxBytes)
 	return nil
 }
 
-func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) {
+func (b *fetchRequestBlock) decode(pd packetDecoder, version int16) (err error) {
+	b.Version = version
+	if b.Version >= 9 {
+		if b.currentLeaderEpoch, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
 	if b.fetchOffset, err = pd.getInt64(); err != nil {
 		return err
 	}
+	if b.Version >= 5 {
+		if b.logStartOffset, err = pd.getInt64(); err != nil {
+			return err
+		}
+	}
 	if b.maxBytes, err = pd.getInt32(); err != nil {
 		return err
 	}
@@ -25,12 +46,16 @@
 // https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that.  The KIP is at
 // https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
 type FetchRequest struct {
-	MaxWaitTime int32
-	MinBytes    int32
-	MaxBytes    int32
-	Version     int16
-	Isolation   IsolationLevel
-	blocks      map[string]map[int32]*fetchRequestBlock
+	MaxWaitTime  int32
+	MinBytes     int32
+	MaxBytes     int32
+	Version      int16
+	Isolation    IsolationLevel
+	SessionID    int32
+	SessionEpoch int32
+	blocks       map[string]map[int32]*fetchRequestBlock
+	forgotten    map[string][]int32
+	RackID       string
 }
 
 type IsolationLevel int8
@@ -50,6 +75,10 @@
 	if r.Version >= 4 {
 		pe.putInt8(int8(r.Isolation))
 	}
+	if r.Version >= 7 {
+		pe.putInt32(r.SessionID)
+		pe.putInt32(r.SessionEpoch)
+	}
 	err = pe.putArrayLength(len(r.blocks))
 	if err != nil {
 		return err
@@ -65,17 +94,44 @@
 		}
 		for partition, block := range blocks {
 			pe.putInt32(partition)
-			err = block.encode(pe)
+			err = block.encode(pe, r.Version)
 			if err != nil {
 				return err
 			}
 		}
 	}
+	if r.Version >= 7 {
+		err = pe.putArrayLength(len(r.forgotten))
+		if err != nil {
+			return err
+		}
+		for topic, partitions := range r.forgotten {
+			err = pe.putString(topic)
+			if err != nil {
+				return err
+			}
+			err = pe.putArrayLength(len(partitions))
+			if err != nil {
+				return err
+			}
+			for _, partition := range partitions {
+				pe.putInt32(partition)
+			}
+		}
+	}
+	if r.Version >= 11 {
+		err = pe.putString(r.RackID)
+		if err != nil {
+			return err
+		}
+	}
+
 	return nil
 }
 
 func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
 	r.Version = version
+
 	if _, err = pd.getInt32(); err != nil {
 		return err
 	}
@@ -97,6 +153,16 @@
 		}
 		r.Isolation = IsolationLevel(isolation)
 	}
+	if r.Version >= 7 {
+		r.SessionID, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+		r.SessionEpoch, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	}
 	topicCount, err := pd.getArrayLength()
 	if err != nil {
 		return err
@@ -121,12 +187,47 @@
 				return err
 			}
 			fetchBlock := &fetchRequestBlock{}
-			if err = fetchBlock.decode(pd); err != nil {
+			if err = fetchBlock.decode(pd, r.Version); err != nil {
 				return err
 			}
 			r.blocks[topic][partition] = fetchBlock
 		}
 	}
+
+	if r.Version >= 7 {
+		forgottenCount, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.forgotten = make(map[string][]int32)
+		for i := 0; i < forgottenCount; i++ {
+			topic, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			partitionCount, err := pd.getArrayLength()
+			if err != nil {
+				return err
+			}
+			r.forgotten[topic] = make([]int32, partitionCount)
+
+			for j := 0; j < partitionCount; j++ {
+				partition, err := pd.getInt32()
+				if err != nil {
+					return err
+				}
+				r.forgotten[topic][j] = partition
+			}
+		}
+	}
+
+	if r.Version >= 11 {
+		r.RackID, err = pd.getString()
+		if err != nil {
+			return err
+		}
+	}
+
 	return nil
 }
 
@@ -138,18 +239,34 @@
 	return r.Version
 }
 
+func (r *FetchRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *FetchRequest) requiredVersion() KafkaVersion {
 	switch r.Version {
+	case 0:
+		return MinVersion
 	case 1:
 		return V0_9_0_0
 	case 2:
 		return V0_10_0_0
 	case 3:
 		return V0_10_1_0
-	case 4:
+	case 4, 5:
 		return V0_11_0_0
+	case 6:
+		return V1_0_0_0
+	case 7:
+		return V1_1_0_0
+	case 8:
+		return V2_0_0_0
+	case 9, 10:
+		return V2_1_0_0
+	case 11:
+		return V2_3_0_0
 	default:
-		return MinVersion
+		return MaxVersion
 	}
 }
 
@@ -158,13 +275,21 @@
 		r.blocks = make(map[string]map[int32]*fetchRequestBlock)
 	}
 
+	if r.Version >= 7 && r.forgotten == nil {
+		r.forgotten = make(map[string][]int32)
+	}
+
 	if r.blocks[topic] == nil {
 		r.blocks[topic] = make(map[int32]*fetchRequestBlock)
 	}
 
 	tmp := new(fetchRequestBlock)
+	tmp.Version = r.Version
 	tmp.maxBytes = maxBytes
 	tmp.fetchOffset = fetchOffset
+	if r.Version >= 9 {
+		tmp.currentLeaderEpoch = int32(-1)
+	}
 
 	r.blocks[topic][partitionID] = tmp
 }
diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go
index 3afc187..54b8828 100644
--- a/vendor/github.com/Shopify/sarama/fetch_response.go
+++ b/vendor/github.com/Shopify/sarama/fetch_response.go
@@ -30,13 +30,15 @@
 }
 
 type FetchResponseBlock struct {
-	Err                 KError
-	HighWaterMarkOffset int64
-	LastStableOffset    int64
-	AbortedTransactions []*AbortedTransaction
-	Records             *Records // deprecated: use FetchResponseBlock.RecordsSet
-	RecordsSet          []*Records
-	Partial             bool
+	Err                  KError
+	HighWaterMarkOffset  int64
+	LastStableOffset     int64
+	LogStartOffset       int64
+	AbortedTransactions  []*AbortedTransaction
+	PreferredReadReplica int32
+	Records              *Records // deprecated: use FetchResponseBlock.RecordsSet
+	RecordsSet           []*Records
+	Partial              bool
 }
 
 func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
@@ -57,6 +59,13 @@
 			return err
 		}
 
+		if version >= 5 {
+			b.LogStartOffset, err = pd.getInt64()
+			if err != nil {
+				return err
+			}
+		}
+
 		numTransact, err := pd.getArrayLength()
 		if err != nil {
 			return err
@@ -75,6 +84,15 @@
 		}
 	}
 
+	if version >= 11 {
+		b.PreferredReadReplica, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	} else {
+		b.PreferredReadReplica = -1
+	}
+
 	recordsSize, err := pd.getInt32()
 	if err != nil {
 		return err
@@ -166,6 +184,10 @@
 	if version >= 4 {
 		pe.putInt64(b.LastStableOffset)
 
+		if version >= 5 {
+			pe.putInt64(b.LogStartOffset)
+		}
+
 		if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil {
 			return err
 		}
@@ -176,6 +198,10 @@
 		}
 	}
 
+	if version >= 11 {
+		pe.putInt32(b.PreferredReadReplica)
+	}
+
 	pe.push(&lengthField{})
 	for _, records := range b.RecordsSet {
 		err = records.encode(pe)
@@ -200,7 +226,9 @@
 type FetchResponse struct {
 	Blocks        map[string]map[int32]*FetchResponseBlock
 	ThrottleTime  time.Duration
-	Version       int16 // v1 requires 0.9+, v2 requires 0.10+
+	ErrorCode     int16
+	SessionID     int32
+	Version       int16
 	LogAppendTime bool
 	Timestamp     time.Time
 }
@@ -216,6 +244,17 @@
 		r.ThrottleTime = time.Duration(throttle) * time.Millisecond
 	}
 
+	if r.Version >= 7 {
+		r.ErrorCode, err = pd.getInt16()
+		if err != nil {
+			return err
+		}
+		r.SessionID, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	}
+
 	numTopics, err := pd.getArrayLength()
 	if err != nil {
 		return err
@@ -258,6 +297,11 @@
 		pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
 	}
 
+	if r.Version >= 7 {
+		pe.putInt16(r.ErrorCode)
+		pe.putInt32(r.SessionID)
+	}
+
 	err = pe.putArrayLength(len(r.Blocks))
 	if err != nil {
 		return err
@@ -281,7 +325,6 @@
 				return err
 			}
 		}
-
 	}
 	return nil
 }
@@ -294,18 +337,34 @@
 	return r.Version
 }
 
+func (r *FetchResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *FetchResponse) requiredVersion() KafkaVersion {
 	switch r.Version {
+	case 0:
+		return MinVersion
 	case 1:
 		return V0_9_0_0
 	case 2:
 		return V0_10_0_0
 	case 3:
 		return V0_10_1_0
-	case 4:
+	case 4, 5:
 		return V0_11_0_0
+	case 6:
+		return V1_0_0_0
+	case 7:
+		return V1_1_0_0
+	case 8:
+		return V2_0_0_0
+	case 9, 10:
+		return V2_1_0_0
+	case 11:
+		return V2_3_0_0
 	default:
-		return MinVersion
+		return MaxVersion
 	}
 }
 
diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_request.go b/vendor/github.com/Shopify/sarama/find_coordinator_request.go
index ff2ad20..597bcbf 100644
--- a/vendor/github.com/Shopify/sarama/find_coordinator_request.go
+++ b/vendor/github.com/Shopify/sarama/find_coordinator_request.go
@@ -51,6 +51,10 @@
 	return f.Version
 }
 
+func (r *FindCoordinatorRequest) headerVersion() int16 {
+	return 1
+}
+
 func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion {
 	switch f.Version {
 	case 1:
diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_response.go b/vendor/github.com/Shopify/sarama/find_coordinator_response.go
index 9c900e8..83a648a 100644
--- a/vendor/github.com/Shopify/sarama/find_coordinator_response.go
+++ b/vendor/github.com/Shopify/sarama/find_coordinator_response.go
@@ -82,6 +82,10 @@
 	return f.Version
 }
 
+func (r *FindCoordinatorResponse) headerVersion() int16 {
+	return 0
+}
+
 func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion {
 	switch f.Version {
 	case 1:
diff --git a/vendor/github.com/Shopify/sarama/go.mod b/vendor/github.com/Shopify/sarama/go.mod
index 8c45155..ccbd8e2 100644
--- a/vendor/github.com/Shopify/sarama/go.mod
+++ b/vendor/github.com/Shopify/sarama/go.mod
@@ -1,24 +1,28 @@
 module github.com/Shopify/sarama
 
+go 1.13
+
 require (
-	github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798
 	github.com/Shopify/toxiproxy v2.1.4+incompatible
 	github.com/davecgh/go-spew v1.1.1
-	github.com/eapache/go-resiliency v1.1.0
+	github.com/eapache/go-resiliency v1.2.0
 	github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21
 	github.com/eapache/queue v1.1.0
-	github.com/golang/snappy v0.0.1 // indirect
-	github.com/hashicorp/go-uuid v1.0.1 // indirect
-	github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03
-	github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41
-	github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a
-	github.com/stretchr/testify v1.3.0
-	github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c
-	github.com/xdg/stringprep v1.0.0 // indirect
-	golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5 // indirect
-	golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3
-	gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect
-	gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect
-	gopkg.in/jcmturner/gokrb5.v7 v7.2.3
-	gopkg.in/jcmturner/rpc.v1 v1.1.0 // indirect
+	github.com/fortytw2/leaktest v1.3.0
+	github.com/frankban/quicktest v1.11.3 // indirect
+	github.com/jcmturner/gofork v1.0.0
+	github.com/jcmturner/gokrb5/v8 v8.4.2
+	github.com/klauspost/compress v1.12.2
+	github.com/kr/text v0.2.0 // indirect
+	github.com/pierrec/lz4 v2.6.0+incompatible
+	github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
+	github.com/stretchr/testify v1.7.0
+	github.com/xdg/scram v1.0.3
+	github.com/xdg/stringprep v1.0.3 // indirect
+	golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e // indirect
+	golang.org/x/net v0.0.0-20210614182718-04defd469f4e
+	golang.org/x/text v0.3.6 // indirect
+	golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
+	gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
+	gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
 )
diff --git a/vendor/github.com/Shopify/sarama/go.sum b/vendor/github.com/Shopify/sarama/go.sum
index 4dbc6d2..a497760 100644
--- a/vendor/github.com/Shopify/sarama/go.sum
+++ b/vendor/github.com/Shopify/sarama/go.sum
@@ -1,51 +1,93 @@
-github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798 h1:2T/jmrHeTezcCM58lvEQXs0UpQJCo5SoGAcg+mbSTIg=
-github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
 github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
 github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=
-github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
+github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q=
+github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
 github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
 github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
 github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
 github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
-github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
-github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
-github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjLBdCp5PRlCFijNjvcYANOZXzCfXwCM=
-github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
-github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41 h1:GeinFsrjWz97fAxVUEd748aV0cYL+I6k44gFJTCVvpU=
-github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
-github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
+github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
+github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
+github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY=
+github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
+github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ=
+github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
+github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI=
+github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
+github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=
+github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
+github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
+github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo=
+github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM=
+github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8=
+github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
+github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o=
+github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg=
+github.com/jcmturner/gokrb5/v8 v8.4.2 h1:6ZIM6b/JJN0X8UM43ZOM6Z4SJzla+a/u7scXFJzodkA=
+github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc=
+github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY=
+github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
+github.com/klauspost/compress v1.12.2 h1:2KCfW3I9M7nSc5wOqXAlW2v2U6v+w6cbjvbfp+OykW8=
+github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A=
+github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
-github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
+github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=
-github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
-github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=
-github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/xdg/scram v1.0.3 h1:nTadYh2Fs4BK2xdldEa2g5bbaZp0/+1nJMMPtPxS/to=
+github.com/xdg/scram v1.0.3/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
+github.com/xdg/stringprep v1.0.3 h1:cmL5Enob4W83ti/ZHuZLuKD/xqJfus4fVPwE+/BDm+4=
+github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5 h1:bselrhR0Or1vomJZC8ZIjWtbDmn9OYFLX5Ik9alpJpE=
-golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
+golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI=
+golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
 golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210427231257-85d9c07bbe3a h1:njMmldwFTyDLqonHMagNXKBWptTBeDZOdblgaDsNEGQ=
+golang.org/x/net v0.0.0-20210427231257-85d9c07bbe3a/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q=
+golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw=
-gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
-gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM=
-gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
-gopkg.in/jcmturner/gokrb5.v7 v7.2.3 h1:hHMV/yKPwMnJhPuPx7pH2Uw/3Qyf+thJYlisUc44010=
-gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
-gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU=
-gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go b/vendor/github.com/Shopify/sarama/gssapi_kerberos.go
index 49b632d..ab8b701 100644
--- a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go
+++ b/vendor/github.com/Shopify/sarama/gssapi_kerberos.go
@@ -2,17 +2,20 @@
 
 import (
 	"encoding/binary"
+	"errors"
 	"fmt"
-	"github.com/jcmturner/gofork/encoding/asn1"
-	"gopkg.in/jcmturner/gokrb5.v7/asn1tools"
-	"gopkg.in/jcmturner/gokrb5.v7/gssapi"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
-	"gopkg.in/jcmturner/gokrb5.v7/messages"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
 	"io"
+	"math"
 	"strings"
 	"time"
+
+	"github.com/jcmturner/gofork/encoding/asn1"
+	"github.com/jcmturner/gokrb5/v8/asn1tools"
+	"github.com/jcmturner/gokrb5/v8/gssapi"
+	"github.com/jcmturner/gokrb5/v8/iana/chksumtype"
+	"github.com/jcmturner/gokrb5/v8/iana/keyusage"
+	"github.com/jcmturner/gokrb5/v8/messages"
+	"github.com/jcmturner/gokrb5/v8/types"
 )
 
 const (
@@ -33,6 +36,7 @@
 	Username           string
 	Password           string
 	Realm              string
+	DisablePAFXFAST    bool
 }
 
 type GSSAPIKerberosAuth struct {
@@ -51,15 +55,14 @@
 	Destroy()
 }
 
-/*
-*
-* Appends length in big endian before payload, and send it to kafka
-*
- */
-
+// writePackage appends length in big endian before the payload, and sends it to kafka
 func (krbAuth *GSSAPIKerberosAuth) writePackage(broker *Broker, payload []byte) (int, error) {
-	length := len(payload)
-	finalPackage := make([]byte, length+4) //4 byte length header + payload
+	length := uint64(len(payload))
+	size := length + 4 // 4 byte length header + payload
+	if size > math.MaxInt32 {
+		return 0, errors.New("payload too large, will overflow int32")
+	}
+	finalPackage := make([]byte, size)
 	copy(finalPackage[4:], payload)
 	binary.BigEndian.PutUint32(finalPackage, uint32(length))
 	bytes, err := broker.conn.Write(finalPackage)
@@ -69,12 +72,7 @@
 	return bytes, nil
 }
 
-/*
-*
-* Read length (4 bytes) and then read the payload
-*
- */
-
+// readPackage reads payload length (4 bytes) and then reads the payload into []byte
 func (krbAuth *GSSAPIKerberosAuth) readPackage(broker *Broker) ([]byte, int, error) {
 	bytesRead := 0
 	lengthInBytes := make([]byte, 4)
@@ -152,7 +150,7 @@
 *
  */
 func (krbAuth *GSSAPIKerberosAuth) appendGSSAPIHeader(payload []byte) ([]byte, error) {
-	oidBytes, err := asn1.Marshal(gssapi.OID(gssapi.OIDKRB5))
+	oidBytes, err := asn1.Marshal(gssapi.OIDKRB5.OID())
 	if err != nil {
 		return nil, err
 	}
@@ -199,7 +197,6 @@
 
 /* This does the handshake for authorization */
 func (krbAuth *GSSAPIKerberosAuth) Authorize(broker *Broker) error {
-
 	kerberosClient, err := krbAuth.NewKerberosClientFunc(krbAuth.Config)
 	if err != nil {
 		Logger.Printf("Kerberos client error: %s", err)
@@ -218,7 +215,6 @@
 	spn := fmt.Sprintf("%s/%s", broker.conf.Net.SASL.GSSAPI.ServiceName, host)
 
 	ticket, encKey, err := kerberosClient.GetServiceTicket(spn)
-
 	if err != nil {
 		Logger.Printf("Error getting Kerberos service ticket : %s", err)
 		return err
@@ -242,7 +238,7 @@
 		}
 		broker.updateOutgoingCommunicationMetrics(bytesWritten)
 		if krbAuth.step == GSS_API_VERIFY {
-			var bytesRead = 0
+			bytesRead := 0
 			receivedBytes, bytesRead, err = krbAuth.readPackage(broker)
 			requestLatency := time.Since(requestTime)
 			broker.updateIncomingCommunicationMetrics(bytesRead, requestLatency)
diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/Shopify/sarama/heartbeat_request.go
index ce49c47..e9d9af1 100644
--- a/vendor/github.com/Shopify/sarama/heartbeat_request.go
+++ b/vendor/github.com/Shopify/sarama/heartbeat_request.go
@@ -42,6 +42,10 @@
 	return 0
 }
 
+func (r *HeartbeatRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *HeartbeatRequest) requiredVersion() KafkaVersion {
 	return V0_9_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/Shopify/sarama/heartbeat_response.go
index 766f5fd..577ab72 100644
--- a/vendor/github.com/Shopify/sarama/heartbeat_response.go
+++ b/vendor/github.com/Shopify/sarama/heartbeat_response.go
@@ -27,6 +27,10 @@
 	return 0
 }
 
+func (r *HeartbeatResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *HeartbeatResponse) requiredVersion() KafkaVersion {
 	return V0_9_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go b/vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go
new file mode 100644
index 0000000..c4d05a9
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go
@@ -0,0 +1,173 @@
+package sarama
+
+type IncrementalAlterConfigsOperation int8
+
+const (
+	IncrementalAlterConfigsOperationSet IncrementalAlterConfigsOperation = iota
+	IncrementalAlterConfigsOperationDelete
+	IncrementalAlterConfigsOperationAppend
+	IncrementalAlterConfigsOperationSubtract
+)
+
+// IncrementalAlterConfigsRequest is an incremental alter config request type
+type IncrementalAlterConfigsRequest struct {
+	Resources    []*IncrementalAlterConfigsResource
+	ValidateOnly bool
+}
+
+type IncrementalAlterConfigsResource struct {
+	Type          ConfigResourceType
+	Name          string
+	ConfigEntries map[string]IncrementalAlterConfigsEntry
+}
+
+type IncrementalAlterConfigsEntry struct {
+	Operation IncrementalAlterConfigsOperation
+	Value     *string
+}
+
+func (a *IncrementalAlterConfigsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(a.Resources)); err != nil {
+		return err
+	}
+
+	for _, r := range a.Resources {
+		if err := r.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	pe.putBool(a.ValidateOnly)
+	return nil
+}
+
+func (a *IncrementalAlterConfigsRequest) decode(pd packetDecoder, version int16) error {
+	resourceCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	a.Resources = make([]*IncrementalAlterConfigsResource, resourceCount)
+	for i := range a.Resources {
+		r := &IncrementalAlterConfigsResource{}
+		err = r.decode(pd, version)
+		if err != nil {
+			return err
+		}
+		a.Resources[i] = r
+	}
+
+	validateOnly, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+
+	a.ValidateOnly = validateOnly
+
+	return nil
+}
+
+func (a *IncrementalAlterConfigsResource) encode(pe packetEncoder) error {
+	pe.putInt8(int8(a.Type))
+
+	if err := pe.putString(a.Name); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil {
+		return err
+	}
+
+	for name, e := range a.ConfigEntries {
+		if err := pe.putString(name); err != nil {
+			return err
+		}
+
+		if err := e.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *IncrementalAlterConfigsResource) decode(pd packetDecoder, version int16) error {
+	t, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.Type = ConfigResourceType(t)
+
+	name, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	a.Name = name
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		a.ConfigEntries = make(map[string]IncrementalAlterConfigsEntry, n)
+		for i := 0; i < n; i++ {
+			name, err := pd.getString()
+			if err != nil {
+				return err
+			}
+
+			var v IncrementalAlterConfigsEntry
+
+			if err := v.decode(pd, version); err != nil {
+				return err
+			}
+
+			a.ConfigEntries[name] = v
+		}
+	}
+	return err
+}
+
+func (a *IncrementalAlterConfigsEntry) encode(pe packetEncoder) error {
+	pe.putInt8(int8(a.Operation))
+
+	if err := pe.putNullableString(a.Value); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (a *IncrementalAlterConfigsEntry) decode(pd packetDecoder, version int16) error {
+	t, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.Operation = IncrementalAlterConfigsOperation(t)
+
+	s, err := pd.getNullableString()
+	if err != nil {
+		return err
+	}
+
+	a.Value = s
+
+	return nil
+}
+
+func (a *IncrementalAlterConfigsRequest) key() int16 {
+	return 44
+}
+
+func (a *IncrementalAlterConfigsRequest) version() int16 {
+	return 0
+}
+
+func (a *IncrementalAlterConfigsRequest) headerVersion() int16 {
+	return 1
+}
+
+func (a *IncrementalAlterConfigsRequest) requiredVersion() KafkaVersion {
+	return V2_3_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go b/vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go
new file mode 100644
index 0000000..3e8c450
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go
@@ -0,0 +1,66 @@
+package sarama
+
+import "time"
+
+// IncrementalAlterConfigsResponse is a response type for incremental alter config
+type IncrementalAlterConfigsResponse struct {
+	ThrottleTime time.Duration
+	Resources    []*AlterConfigsResourceResponse
+}
+
+func (a *IncrementalAlterConfigsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
+
+	if err := pe.putArrayLength(len(a.Resources)); err != nil {
+		return err
+	}
+
+	for _, v := range a.Resources {
+		if err := v.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *IncrementalAlterConfigsResponse) decode(pd packetDecoder, version int16) error {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	responseCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	a.Resources = make([]*AlterConfigsResourceResponse, responseCount)
+
+	for i := range a.Resources {
+		a.Resources[i] = new(AlterConfigsResourceResponse)
+
+		if err := a.Resources[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *IncrementalAlterConfigsResponse) key() int16 {
+	return 44
+}
+
+func (a *IncrementalAlterConfigsResponse) version() int16 {
+	return 0
+}
+
+func (a *IncrementalAlterConfigsResponse) headerVersion() int16 {
+	return 0
+}
+
+func (a *IncrementalAlterConfigsResponse) requiredVersion() KafkaVersion {
+	return V2_3_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_request.go b/vendor/github.com/Shopify/sarama/init_producer_id_request.go
index 8ceb6c2..6894443 100644
--- a/vendor/github.com/Shopify/sarama/init_producer_id_request.go
+++ b/vendor/github.com/Shopify/sarama/init_producer_id_request.go
@@ -38,6 +38,10 @@
 	return 0
 }
 
+func (i *InitProducerIDRequest) headerVersion() int16 {
+	return 1
+}
+
 func (i *InitProducerIDRequest) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_response.go b/vendor/github.com/Shopify/sarama/init_producer_id_response.go
index 1b32eb0..3e1242b 100644
--- a/vendor/github.com/Shopify/sarama/init_producer_id_response.go
+++ b/vendor/github.com/Shopify/sarama/init_producer_id_response.go
@@ -50,6 +50,10 @@
 	return 0
 }
 
+func (i *InitProducerIDResponse) headerVersion() int16 {
+	return 0
+}
+
 func (i *InitProducerIDResponse) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/interceptors.go b/vendor/github.com/Shopify/sarama/interceptors.go
new file mode 100644
index 0000000..d0d33e5
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/interceptors.go
@@ -0,0 +1,43 @@
+package sarama
+
+// ProducerInterceptor allows you to intercept (and possibly mutate) the records
+// received by the producer before they are published to the Kafka cluster.
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-42%3A+Add+Producer+and+Consumer+Interceptors#KIP42:AddProducerandConsumerInterceptors-Motivation
+type ProducerInterceptor interface {
+
+	// OnSend is called when the producer message is intercepted. Please avoid
+	// modifying the message until it's safe to do so, as this is _not_ a copy
+	// of the message.
+	OnSend(*ProducerMessage)
+}
+
+// ConsumerInterceptor allows you to intercept (and possibly mutate) the records
+// received by the consumer before they are sent to the messages channel.
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-42%3A+Add+Producer+and+Consumer+Interceptors#KIP42:AddProducerandConsumerInterceptors-Motivation
+type ConsumerInterceptor interface {
+
+	// OnConsume is called when the consumed message is intercepted. Please
+	// avoid modifying the message until it's safe to do so, as this is _not_ a
+	// copy of the message.
+	OnConsume(*ConsumerMessage)
+}
+
+func (msg *ProducerMessage) safelyApplyInterceptor(interceptor ProducerInterceptor) {
+	defer func() {
+		if r := recover(); r != nil {
+			Logger.Printf("Error when calling producer interceptor: %s, %w\n", interceptor, r)
+		}
+	}()
+
+	interceptor.OnSend(msg)
+}
+
+func (msg *ConsumerMessage) safelyApplyInterceptor(interceptor ConsumerInterceptor) {
+	defer func() {
+		if r := recover(); r != nil {
+			Logger.Printf("Error when calling consumer interceptor: %s, %w\n", interceptor, r)
+		}
+	}()
+
+	interceptor.OnConsume(msg)
+}
diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/Shopify/sarama/join_group_request.go
index 97e9299..3734e82 100644
--- a/vendor/github.com/Shopify/sarama/join_group_request.go
+++ b/vendor/github.com/Shopify/sarama/join_group_request.go
@@ -134,6 +134,10 @@
 	return r.Version
 }
 
+func (r *JoinGroupRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
 	switch r.Version {
 	case 2:
diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/Shopify/sarama/join_group_response.go
index 5752acc..54b0a45 100644
--- a/vendor/github.com/Shopify/sarama/join_group_response.go
+++ b/vendor/github.com/Shopify/sarama/join_group_response.go
@@ -123,6 +123,10 @@
 	return r.Version
 }
 
+func (r *JoinGroupResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *JoinGroupResponse) requiredVersion() KafkaVersion {
 	switch r.Version {
 	case 2:
diff --git a/vendor/github.com/Shopify/sarama/kerberos_client.go b/vendor/github.com/Shopify/sarama/kerberos_client.go
index 91b998f..01a5319 100644
--- a/vendor/github.com/Shopify/sarama/kerberos_client.go
+++ b/vendor/github.com/Shopify/sarama/kerberos_client.go
@@ -1,10 +1,10 @@
 package sarama
 
 import (
-	krb5client "gopkg.in/jcmturner/gokrb5.v7/client"
-	krb5config "gopkg.in/jcmturner/gokrb5.v7/config"
-	"gopkg.in/jcmturner/gokrb5.v7/keytab"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
+	krb5client "github.com/jcmturner/gokrb5/v8/client"
+	krb5config "github.com/jcmturner/gokrb5/v8/config"
+	"github.com/jcmturner/gokrb5/v8/keytab"
+	"github.com/jcmturner/gokrb5/v8/types"
 )
 
 type KerberosGoKrb5Client struct {
@@ -19,14 +19,9 @@
 	return c.Credentials.CName()
 }
 
-/*
-*
-* Create kerberos client used to obtain TGT and TGS tokens
-* used gokrb5 library, which is a pure go kerberos client with
-* some GSS-API capabilities, and SPNEGO support. Kafka does not use SPNEGO
-* it uses pure Kerberos 5 solution (RFC-4121 and RFC-4120).
-*
- */
+// NewKerberosClient creates kerberos client used to obtain TGT and TGS tokens.
+// It uses pure go Kerberos 5 solution (RFC-4121 and RFC-4120).
+// uses gokrb5 library underlying which is a pure go kerberos client with some GSS-API capabilities.
 func NewKerberosClient(config *GSSAPIConfig) (KerberosClient, error) {
 	cfg, err := krb5config.Load(config.KerberosConfigPath)
 	if err != nil {
@@ -42,10 +37,10 @@
 		if err != nil {
 			return nil, err
 		}
-		client = krb5client.NewClientWithKeytab(config.Username, config.Realm, kt, cfg)
+		client = krb5client.NewWithKeytab(config.Username, config.Realm, kt, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST))
 	} else {
-		client = krb5client.NewClientWithPassword(config.Username,
-			config.Realm, config.Password, cfg)
+		client = krb5client.NewWithPassword(config.Username,
+			config.Realm, config.Password, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST))
 	}
 	return &KerberosGoKrb5Client{*client}, nil
 }
diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/Shopify/sarama/leave_group_request.go
index e177427..d7789b6 100644
--- a/vendor/github.com/Shopify/sarama/leave_group_request.go
+++ b/vendor/github.com/Shopify/sarama/leave_group_request.go
@@ -35,6 +35,10 @@
 	return 0
 }
 
+func (r *LeaveGroupRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *LeaveGroupRequest) requiredVersion() KafkaVersion {
 	return V0_9_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/Shopify/sarama/leave_group_response.go
index d60c626..25f8d5e 100644
--- a/vendor/github.com/Shopify/sarama/leave_group_response.go
+++ b/vendor/github.com/Shopify/sarama/leave_group_response.go
@@ -27,6 +27,10 @@
 	return 0
 }
 
+func (r *LeaveGroupResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *LeaveGroupResponse) requiredVersion() KafkaVersion {
 	return V0_9_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/Shopify/sarama/list_groups_request.go
index 3b16abf..4553b2d 100644
--- a/vendor/github.com/Shopify/sarama/list_groups_request.go
+++ b/vendor/github.com/Shopify/sarama/list_groups_request.go
@@ -1,7 +1,6 @@
 package sarama
 
-type ListGroupsRequest struct {
-}
+type ListGroupsRequest struct{}
 
 func (r *ListGroupsRequest) encode(pe packetEncoder) error {
 	return nil
@@ -19,6 +18,10 @@
 	return 0
 }
 
+func (r *ListGroupsRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *ListGroupsRequest) requiredVersion() KafkaVersion {
 	return V0_9_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/Shopify/sarama/list_groups_response.go
index 56115d4..777bae7 100644
--- a/vendor/github.com/Shopify/sarama/list_groups_response.go
+++ b/vendor/github.com/Shopify/sarama/list_groups_response.go
@@ -64,6 +64,10 @@
 	return 0
 }
 
+func (r *ListGroupsResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *ListGroupsResponse) requiredVersion() KafkaVersion {
 	return V0_9_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go b/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go
new file mode 100644
index 0000000..c1ffa9b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go
@@ -0,0 +1,98 @@
+package sarama
+
+type ListPartitionReassignmentsRequest struct {
+	TimeoutMs int32
+	blocks    map[string][]int32
+	Version   int16
+}
+
+func (r *ListPartitionReassignmentsRequest) encode(pe packetEncoder) error {
+	pe.putInt32(r.TimeoutMs)
+
+	pe.putCompactArrayLength(len(r.blocks))
+
+	for topic, partitions := range r.blocks {
+		if err := pe.putCompactString(topic); err != nil {
+			return err
+		}
+
+		if err := pe.putCompactInt32Array(partitions); err != nil {
+			return err
+		}
+
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+
+	return nil
+}
+
+func (r *ListPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.TimeoutMs, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	topicCount, err := pd.getCompactArrayLength()
+	if err != nil {
+		return err
+	}
+	if topicCount > 0 {
+		r.blocks = make(map[string][]int32)
+		for i := 0; i < topicCount; i++ {
+			topic, err := pd.getCompactString()
+			if err != nil {
+				return err
+			}
+			partitionCount, err := pd.getCompactArrayLength()
+			if err != nil {
+				return err
+			}
+			r.blocks[topic] = make([]int32, partitionCount)
+			for j := 0; j < partitionCount; j++ {
+				partition, err := pd.getInt32()
+				if err != nil {
+					return err
+				}
+				r.blocks[topic][j] = partition
+			}
+			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+		return err
+	}
+
+	return
+}
+
+func (r *ListPartitionReassignmentsRequest) key() int16 {
+	return 46
+}
+
+func (r *ListPartitionReassignmentsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *ListPartitionReassignmentsRequest) headerVersion() int16 {
+	return 2
+}
+
+func (r *ListPartitionReassignmentsRequest) requiredVersion() KafkaVersion {
+	return V2_4_0_0
+}
+
+func (r *ListPartitionReassignmentsRequest) AddBlock(topic string, partitionIDs []int32) {
+	if r.blocks == nil {
+		r.blocks = make(map[string][]int32)
+	}
+
+	if r.blocks[topic] == nil {
+		r.blocks[topic] = partitionIDs
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go b/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go
new file mode 100644
index 0000000..4baa6a0
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go
@@ -0,0 +1,169 @@
+package sarama
+
+type PartitionReplicaReassignmentsStatus struct {
+	Replicas         []int32
+	AddingReplicas   []int32
+	RemovingReplicas []int32
+}
+
+func (b *PartitionReplicaReassignmentsStatus) encode(pe packetEncoder) error {
+	if err := pe.putCompactInt32Array(b.Replicas); err != nil {
+		return err
+	}
+	if err := pe.putCompactInt32Array(b.AddingReplicas); err != nil {
+		return err
+	}
+	if err := pe.putCompactInt32Array(b.RemovingReplicas); err != nil {
+		return err
+	}
+
+	pe.putEmptyTaggedFieldArray()
+
+	return nil
+}
+
+func (b *PartitionReplicaReassignmentsStatus) decode(pd packetDecoder) (err error) {
+	if b.Replicas, err = pd.getCompactInt32Array(); err != nil {
+		return err
+	}
+
+	if b.AddingReplicas, err = pd.getCompactInt32Array(); err != nil {
+		return err
+	}
+
+	if b.RemovingReplicas, err = pd.getCompactInt32Array(); err != nil {
+		return err
+	}
+
+	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+		return err
+	}
+
+	return err
+}
+
+type ListPartitionReassignmentsResponse struct {
+	Version        int16
+	ThrottleTimeMs int32
+	ErrorCode      KError
+	ErrorMessage   *string
+	TopicStatus    map[string]map[int32]*PartitionReplicaReassignmentsStatus
+}
+
+func (r *ListPartitionReassignmentsResponse) AddBlock(topic string, partition int32, replicas, addingReplicas, removingReplicas []int32) {
+	if r.TopicStatus == nil {
+		r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus)
+	}
+	partitions := r.TopicStatus[topic]
+	if partitions == nil {
+		partitions = make(map[int32]*PartitionReplicaReassignmentsStatus)
+		r.TopicStatus[topic] = partitions
+	}
+
+	partitions[partition] = &PartitionReplicaReassignmentsStatus{Replicas: replicas, AddingReplicas: addingReplicas, RemovingReplicas: removingReplicas}
+}
+
+func (r *ListPartitionReassignmentsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(r.ThrottleTimeMs)
+	pe.putInt16(int16(r.ErrorCode))
+	if err := pe.putNullableCompactString(r.ErrorMessage); err != nil {
+		return err
+	}
+
+	pe.putCompactArrayLength(len(r.TopicStatus))
+	for topic, partitions := range r.TopicStatus {
+		if err := pe.putCompactString(topic); err != nil {
+			return err
+		}
+		pe.putCompactArrayLength(len(partitions))
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+
+			if err := block.encode(pe); err != nil {
+				return err
+			}
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+
+	return nil
+}
+
+func (r *ListPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	r.ErrorCode = KError(kerr)
+
+	if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil {
+		return err
+	}
+
+	numTopics, err := pd.getCompactArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus, numTopics)
+	for i := 0; i < numTopics; i++ {
+		topic, err := pd.getCompactString()
+		if err != nil {
+			return err
+		}
+
+		ongoingPartitionReassignments, err := pd.getCompactArrayLength()
+		if err != nil {
+			return err
+		}
+
+		r.TopicStatus[topic] = make(map[int32]*PartitionReplicaReassignmentsStatus, ongoingPartitionReassignments)
+
+		for j := 0; j < ongoingPartitionReassignments; j++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+
+			block := &PartitionReplicaReassignmentsStatus{}
+			if err := block.decode(pd); err != nil {
+				return err
+			}
+			r.TopicStatus[topic][partition] = block
+		}
+
+		if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *ListPartitionReassignmentsResponse) key() int16 {
+	return 46
+}
+
+func (r *ListPartitionReassignmentsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *ListPartitionReassignmentsResponse) headerVersion() int16 {
+	return 1
+}
+
+func (r *ListPartitionReassignmentsResponse) requiredVersion() KafkaVersion {
+	return V2_4_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go
index 7c54748..fd0d1d9 100644
--- a/vendor/github.com/Shopify/sarama/message.go
+++ b/vendor/github.com/Shopify/sarama/message.go
@@ -6,15 +6,15 @@
 )
 
 const (
-	//CompressionNone no compression
+	// CompressionNone no compression
 	CompressionNone CompressionCodec = iota
-	//CompressionGZIP compression using GZIP
+	// CompressionGZIP compression using GZIP
 	CompressionGZIP
-	//CompressionSnappy compression using snappy
+	// CompressionSnappy compression using snappy
 	CompressionSnappy
-	//CompressionLZ4 compression using LZ4
+	// CompressionLZ4 compression using LZ4
 	CompressionLZ4
-	//CompressionZSTD compression using ZSTD
+	// CompressionZSTD compression using ZSTD
 	CompressionZSTD
 
 	// The lowest 3 bits contain the compression codec used for the message
@@ -42,7 +42,7 @@
 	}[int(cc)]
 }
 
-//Message is a kafka message type
+// Message is a kafka message type
 type Message struct {
 	Codec            CompressionCodec // codec used to compress the message contents
 	CompressionLevel int              // compression level
@@ -85,7 +85,6 @@
 		payload = m.compressedCache
 		m.compressedCache = nil
 	} else if m.Value != nil {
-
 		payload, err = compress(m.Codec, m.CompressionLevel, m.Value)
 		if err != nil {
 			return err
@@ -147,18 +146,12 @@
 	// for future metrics about the compression ratio in fetch requests
 	m.compressedSize = len(m.Value)
 
-	switch m.Codec {
-	case CompressionNone:
-		// nothing to do
-	default:
-		if m.Value == nil {
-			break
-		}
-
+	if m.Value != nil && m.Codec != CompressionNone {
 		m.Value, err = decompress(m.Codec, m.Value)
 		if err != nil {
 			return err
 		}
+
 		if err := m.decodeSet(); err != nil {
 			return err
 		}
diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go
index 1b590d3..e835f5a 100644
--- a/vendor/github.com/Shopify/sarama/metadata_request.go
+++ b/vendor/github.com/Shopify/sarama/metadata_request.go
@@ -65,6 +65,10 @@
 	return r.Version
 }
 
+func (r *MetadataRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *MetadataRequest) requiredVersion() KafkaVersion {
 	switch r.Version {
 	case 1:
diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go
index b2d532e..0bb8702 100644
--- a/vendor/github.com/Shopify/sarama/metadata_response.go
+++ b/vendor/github.com/Shopify/sarama/metadata_response.go
@@ -255,6 +255,10 @@
 	return r.Version
 }
 
+func (r *MetadataResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *MetadataResponse) requiredVersion() KafkaVersion {
 	switch r.Version {
 	case 1:
@@ -318,5 +322,4 @@
 	pmatch.Isr = isr
 	pmatch.OfflineReplicas = offline
 	pmatch.Err = err
-
 }
diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go
index 4ed46a6..c2654d1 100644
--- a/vendor/github.com/Shopify/sarama/mockbroker.go
+++ b/vendor/github.com/Shopify/sarama/mockbroker.go
@@ -20,7 +20,7 @@
 
 type GSSApiHandlerFunc func([]byte) []byte
 
-type requestHandlerFunc func(req *request) (res encoder)
+type requestHandlerFunc func(req *request) (res encoderWithHeader)
 
 // RequestNotifierFunc is invoked when a mock broker processes a request successfully
 // and will provides the number of bytes read and written.
@@ -30,7 +30,7 @@
 // to facilitate testing of higher level or specialized consumers and producers
 // built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol,
 // but rather provides a facility to do that. It takes care of the TCP
-// transport, request unmarshaling, response marshaling, and makes it the test
+// transport, request unmarshalling, response marshalling, and makes it the test
 // writer responsibility to program correct according to the Kafka API protocol
 // MockBroker behaviour.
 //
@@ -55,7 +55,7 @@
 	port          int32
 	closing       chan none
 	stopper       chan none
-	expectations  chan encoder
+	expectations  chan encoderWithHeader
 	listener      net.Listener
 	t             TestReporter
 	latency       time.Duration
@@ -83,7 +83,7 @@
 // and uses the found MockResponse instance to generate an appropriate reply.
 // If the request type is not found in the map then nothing is sent.
 func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) {
-	b.setHandler(func(req *request) (res encoder) {
+	b.setHandler(func(req *request) (res encoderWithHeader) {
 		reqTypeName := reflect.TypeOf(req.body).Elem().Name()
 		mockResponse := handlerMap[reqTypeName]
 		if mockResponse == nil {
@@ -213,11 +213,13 @@
 	return buffer[4] == 0x60 || bytes.Equal(buffer[4:6], []byte{0x05, 0x04})
 }
 
-func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) {
+func (b *MockBroker) handleRequests(conn io.ReadWriteCloser, idx int, wg *sync.WaitGroup) {
 	defer wg.Done()
 	defer func() {
 		_ = conn.Close()
 	}()
+	s := spew.NewDefaultConfig()
+	s.MaxDepth = 1
 	Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx)
 	var err error
 
@@ -231,11 +233,9 @@
 		}
 	}()
 
-	resHeader := make([]byte, 8)
 	var bytesWritten int
 	var bytesRead int
 	for {
-
 		buffer, err := b.readToBytes(conn)
 		if err != nil {
 			Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(buffer))
@@ -245,7 +245,6 @@
 
 		bytesWritten = 0
 		if !b.isGSSAPI(buffer) {
-
 			req, br, err := decodeRequest(bytes.NewReader(buffer))
 			bytesRead = br
 			if err != nil {
@@ -267,7 +266,12 @@
 				Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
 				continue
 			}
-			Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res)
+			Logger.Printf(
+				"*** mockbroker/%d/%d: replied to %T with %T\n-> %s\n-> %s",
+				b.brokerID, idx, req.body, res,
+				s.Sprintf("%#v", req.body),
+				s.Sprintf("%#v", res),
+			)
 
 			encodedRes, err := encode(res, nil)
 			if err != nil {
@@ -283,8 +287,7 @@
 				continue
 			}
 
-			binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4))
-			binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID))
+			resHeader := b.encodeHeader(res.headerVersion(), req.correlationID, uint32(len(encodedRes)))
 			if _, err = conn.Write(resHeader); err != nil {
 				b.serverError(err)
 				break
@@ -294,7 +297,6 @@
 				break
 			}
 			bytesWritten = len(resHeader) + len(encodedRes)
-
 		} else {
 			// GSSAPI is not part of kafka protocol, but is supported for authentication proposes.
 			// Don't support history for this kind of request as is only used for test GSSAPI authentication mechanism
@@ -317,12 +319,29 @@
 			b.notifier(bytesRead, bytesWritten)
 		}
 		b.lock.Unlock()
-
 	}
 	Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err)
 }
 
-func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) {
+func (b *MockBroker) encodeHeader(headerVersion int16, correlationId int32, payloadLength uint32) []byte {
+	headerLength := uint32(8)
+
+	if headerVersion >= 1 {
+		headerLength = 9
+	}
+
+	resHeader := make([]byte, headerLength)
+	binary.BigEndian.PutUint32(resHeader, payloadLength+headerLength-4)
+	binary.BigEndian.PutUint32(resHeader[4:], uint32(correlationId))
+
+	if headerVersion >= 1 {
+		binary.PutUvarint(resHeader[8:], 0)
+	}
+
+	return resHeader
+}
+
+func (b *MockBroker) defaultRequestHandler(req *request) (res encoderWithHeader) {
 	select {
 	case res, ok := <-b.expectations:
 		if !ok {
@@ -377,7 +396,7 @@
 		stopper:      make(chan none),
 		t:            t,
 		brokerID:     brokerID,
-		expectations: make(chan encoder, 512),
+		expectations: make(chan encoderWithHeader, 512),
 		listener:     listener,
 	}
 	broker.handler = broker.defaultRequestHandler
@@ -398,6 +417,6 @@
 	return broker
 }
 
-func (b *MockBroker) Returns(e encoder) {
+func (b *MockBroker) Returns(e encoderWithHeader) {
 	b.expectations <- e
 }
diff --git a/vendor/github.com/Shopify/sarama/mockkerberos.go b/vendor/github.com/Shopify/sarama/mockkerberos.go
index affeb2d..a43607e 100644
--- a/vendor/github.com/Shopify/sarama/mockkerberos.go
+++ b/vendor/github.com/Shopify/sarama/mockkerberos.go
@@ -3,11 +3,12 @@
 import (
 	"encoding/binary"
 	"encoding/hex"
-	"gopkg.in/jcmturner/gokrb5.v7/credentials"
-	"gopkg.in/jcmturner/gokrb5.v7/gssapi"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
-	"gopkg.in/jcmturner/gokrb5.v7/messages"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
+
+	"github.com/jcmturner/gokrb5/v8/credentials"
+	"github.com/jcmturner/gokrb5/v8/gssapi"
+	"github.com/jcmturner/gokrb5/v8/iana/keyusage"
+	"github.com/jcmturner/gokrb5/v8/messages"
+	"github.com/jcmturner/gokrb5/v8/types"
 )
 
 type KafkaGSSAPIHandler struct {
@@ -26,7 +27,7 @@
 		return []byte{0x00, 0x00, 0x00, 0x01, 0xAD}
 	}
 
-	var pack = gssapi.WrapToken{
+	pack := gssapi.WrapToken{
 		Flags:     KRB5_USER_AUTH,
 		EC:        12,
 		RRC:       0,
@@ -55,7 +56,6 @@
 }
 
 type MockKerberosClient struct {
-	asReqBytes  string
 	asRepBytes  string
 	ASRep       messages.ASRep
 	credentials *credentials.Credentials
@@ -108,8 +108,9 @@
 func (c *MockKerberosClient) Domain() string {
 	return "EXAMPLE.COM"
 }
+
 func (c *MockKerberosClient) CName() types.PrincipalName {
-	var p = types.PrincipalName{
+	p := types.PrincipalName{
 		NameType: KRB5_USER_AUTH,
 		NameString: []string{
 			"kafka",
@@ -118,6 +119,7 @@
 	}
 	return p
 }
+
 func (c *MockKerberosClient) Destroy() {
 	// Do nothing.
 }
diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go
index c78f0ac..6654ed0 100644
--- a/vendor/github.com/Shopify/sarama/mockresponses.go
+++ b/vendor/github.com/Shopify/sarama/mockresponses.go
@@ -18,20 +18,20 @@
 // allows generating a response based on a request body. MockResponses are used
 // to program behavior of MockBroker in tests.
 type MockResponse interface {
-	For(reqBody versionedDecoder) (res encoder)
+	For(reqBody versionedDecoder) (res encoderWithHeader)
 }
 
 // MockWrapper is a mock response builder that returns a particular concrete
 // response regardless of the actual request passed to the `For` method.
 type MockWrapper struct {
-	res encoder
+	res encoderWithHeader
 }
 
-func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) {
+func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoderWithHeader) {
 	return mw.res
 }
 
-func NewMockWrapper(res encoder) *MockWrapper {
+func NewMockWrapper(res encoderWithHeader) *MockWrapper {
 	return &MockWrapper{res: res}
 }
 
@@ -50,7 +50,7 @@
 		switch res := res.(type) {
 		case MockResponse:
 			ms.responses[i] = res
-		case encoder:
+		case encoderWithHeader:
 			ms.responses[i] = NewMockWrapper(res)
 		default:
 			panic(fmt.Sprintf("Unexpected response type: %T", res))
@@ -59,7 +59,7 @@
 	return ms
 }
 
-func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) {
+func (mc *MockSequence) For(reqBody versionedDecoder) (res encoderWithHeader) {
 	res = mc.responses[0].For(reqBody)
 	if len(mc.responses) > 1 {
 		mc.responses = mc.responses[1:]
@@ -79,7 +79,7 @@
 	}
 }
 
-func (m *MockListGroupsResponse) For(reqBody versionedDecoder) encoder {
+func (m *MockListGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	request := reqBody.(*ListGroupsRequest)
 	_ = request
 	response := &ListGroupsResponse{
@@ -110,7 +110,7 @@
 	return m
 }
 
-func (m *MockDescribeGroupsResponse) For(reqBody versionedDecoder) encoder {
+func (m *MockDescribeGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	request := reqBody.(*DescribeGroupsRequest)
 
 	response := &DescribeGroupsResponse{}
@@ -166,7 +166,7 @@
 	return mmr
 }
 
-func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder {
+func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	metadataRequest := reqBody.(*MetadataRequest)
 	metadataResponse := &MetadataResponse{
 		Version:      metadataRequest.version(),
@@ -177,8 +177,8 @@
 	}
 
 	// Generate set of replicas
-	replicas := []int32{}
-	offlineReplicas := []int32{}
+	var replicas []int32
+	var offlineReplicas []int32
 	for _, brokerID := range mmr.brokers {
 		replicas = append(replicas, brokerID)
 	}
@@ -233,7 +233,7 @@
 	return mor
 }
 
-func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder {
+func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	offsetRequest := reqBody.(*OffsetRequest)
 	offsetResponse := &OffsetResponse{Version: mor.version}
 	for topic, partitions := range offsetRequest.blocks {
@@ -309,7 +309,7 @@
 	return mfr
 }
 
-func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder {
+func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	fetchRequest := reqBody.(*FetchRequest)
 	res := &FetchResponse{
 		Version: mfr.version,
@@ -393,7 +393,7 @@
 	return mr
 }
 
-func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder {
+func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	req := reqBody.(*ConsumerMetadataRequest)
 	group := req.ConsumerGroup
 	res := &ConsumerMetadataResponse{}
@@ -442,7 +442,7 @@
 	return mr
 }
 
-func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoder {
+func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	req := reqBody.(*FindCoordinatorRequest)
 	res := &FindCoordinatorResponse{}
 	var v interface{}
@@ -489,7 +489,7 @@
 	return mr
 }
 
-func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder {
+func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	req := reqBody.(*OffsetCommitRequest)
 	group := req.ConsumerGroup
 	res := &OffsetCommitResponse{}
@@ -546,7 +546,7 @@
 	return mr
 }
 
-func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder {
+func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	req := reqBody.(*ProduceRequest)
 	res := &ProduceResponse{
 		Version: mr.version,
@@ -605,7 +605,7 @@
 	return mr
 }
 
-func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder {
+func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	req := reqBody.(*OffsetFetchRequest)
 	group := req.ConsumerGroup
 	res := &OffsetFetchResponse{Version: req.Version}
@@ -630,7 +630,7 @@
 	return &MockCreateTopicsResponse{t: t}
 }
 
-func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoder {
+func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	req := reqBody.(*CreateTopicsRequest)
 	res := &CreateTopicsResponse{
 		Version: req.Version,
@@ -659,7 +659,7 @@
 	return &MockDeleteTopicsResponse{t: t}
 }
 
-func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoder {
+func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	req := reqBody.(*DeleteTopicsRequest)
 	res := &DeleteTopicsResponse{}
 	res.TopicErrorCodes = make(map[string]KError)
@@ -667,6 +667,7 @@
 	for _, topic := range req.Topics {
 		res.TopicErrorCodes[topic] = ErrNoError
 	}
+	res.Version = req.Version
 	return res
 }
 
@@ -678,7 +679,7 @@
 	return &MockCreatePartitionsResponse{t: t}
 }
 
-func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoder {
+func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	req := reqBody.(*CreatePartitionsRequest)
 	res := &CreatePartitionsResponse{}
 	res.TopicPartitionErrors = make(map[string]*TopicPartitionError)
@@ -697,6 +698,43 @@
 	return res
 }
 
+type MockAlterPartitionReassignmentsResponse struct {
+	t TestReporter
+}
+
+func NewMockAlterPartitionReassignmentsResponse(t TestReporter) *MockAlterPartitionReassignmentsResponse {
+	return &MockAlterPartitionReassignmentsResponse{t: t}
+}
+
+func (mr *MockAlterPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*AlterPartitionReassignmentsRequest)
+	_ = req
+	res := &AlterPartitionReassignmentsResponse{}
+	return res
+}
+
+type MockListPartitionReassignmentsResponse struct {
+	t TestReporter
+}
+
+func NewMockListPartitionReassignmentsResponse(t TestReporter) *MockListPartitionReassignmentsResponse {
+	return &MockListPartitionReassignmentsResponse{t: t}
+}
+
+func (mr *MockListPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*ListPartitionReassignmentsRequest)
+	_ = req
+	res := &ListPartitionReassignmentsResponse{}
+
+	for topic, partitions := range req.blocks {
+		for _, partition := range partitions {
+			res.AddBlock(topic, partition, []int32{0}, []int32{1}, []int32{2})
+		}
+	}
+
+	return res
+}
+
 type MockDeleteRecordsResponse struct {
 	t TestReporter
 }
@@ -705,7 +743,7 @@
 	return &MockDeleteRecordsResponse{t: t}
 }
 
-func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoder {
+func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	req := reqBody.(*DeleteRecordsRequest)
 	res := &DeleteRecordsResponse{}
 	res.Topics = make(map[string]*DeleteRecordsResponseTopic)
@@ -728,31 +766,87 @@
 	return &MockDescribeConfigsResponse{t: t}
 }
 
-func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoder {
+func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	req := reqBody.(*DescribeConfigsRequest)
-	res := &DescribeConfigsResponse{}
+	res := &DescribeConfigsResponse{
+		Version: req.Version,
+	}
+
+	includeSynonyms := req.Version > 0
+	includeSource := req.Version > 0
 
 	for _, r := range req.Resources {
 		var configEntries []*ConfigEntry
 		switch r.Type {
-		case TopicResource:
+		case BrokerResource:
 			configEntries = append(configEntries,
-				&ConfigEntry{Name: "max.message.bytes",
-					Value:     "1000000",
-					ReadOnly:  false,
-					Default:   true,
-					Sensitive: false,
-				}, &ConfigEntry{Name: "retention.ms",
-					Value:     "5000",
-					ReadOnly:  false,
-					Default:   false,
-					Sensitive: false,
-				}, &ConfigEntry{Name: "password",
-					Value:     "12345",
-					ReadOnly:  false,
-					Default:   false,
-					Sensitive: true,
-				})
+				&ConfigEntry{
+					Name:     "min.insync.replicas",
+					Value:    "2",
+					ReadOnly: false,
+					Default:  false,
+				},
+			)
+			res.Resources = append(res.Resources, &ResourceResponse{
+				Name:    r.Name,
+				Configs: configEntries,
+			})
+		case BrokerLoggerResource:
+			configEntries = append(configEntries,
+				&ConfigEntry{
+					Name:     "kafka.controller.KafkaController",
+					Value:    "DEBUG",
+					ReadOnly: false,
+					Default:  false,
+				},
+			)
+			res.Resources = append(res.Resources, &ResourceResponse{
+				Name:    r.Name,
+				Configs: configEntries,
+			})
+		case TopicResource:
+			maxMessageBytes := &ConfigEntry{
+				Name:      "max.message.bytes",
+				Value:     "1000000",
+				ReadOnly:  false,
+				Default:   !includeSource,
+				Sensitive: false,
+			}
+			if includeSource {
+				maxMessageBytes.Source = SourceDefault
+			}
+			if includeSynonyms {
+				maxMessageBytes.Synonyms = []*ConfigSynonym{
+					{
+						ConfigName:  "max.message.bytes",
+						ConfigValue: "500000",
+					},
+				}
+			}
+			retentionMs := &ConfigEntry{
+				Name:      "retention.ms",
+				Value:     "5000",
+				ReadOnly:  false,
+				Default:   false,
+				Sensitive: false,
+			}
+			if includeSynonyms {
+				retentionMs.Synonyms = []*ConfigSynonym{
+					{
+						ConfigName:  "log.retention.ms",
+						ConfigValue: "2500",
+					},
+				}
+			}
+			password := &ConfigEntry{
+				Name:      "password",
+				Value:     "12345",
+				ReadOnly:  false,
+				Default:   false,
+				Sensitive: true,
+			}
+			configEntries = append(
+				configEntries, maxMessageBytes, retentionMs, password)
 			res.Resources = append(res.Resources, &ResourceResponse{
 				Name:    r.Name,
 				Configs: configEntries,
@@ -762,6 +856,31 @@
 	return res
 }
 
+type MockDescribeConfigsResponseWithErrorCode struct {
+	t TestReporter
+}
+
+func NewMockDescribeConfigsResponseWithErrorCode(t TestReporter) *MockDescribeConfigsResponseWithErrorCode {
+	return &MockDescribeConfigsResponseWithErrorCode{t: t}
+}
+
+func (mr *MockDescribeConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*DescribeConfigsRequest)
+	res := &DescribeConfigsResponse{
+		Version: req.Version,
+	}
+
+	for _, r := range req.Resources {
+		res.Resources = append(res.Resources, &ResourceResponse{
+			Name:      r.Name,
+			Type:      r.Type,
+			ErrorCode: 83,
+			ErrorMsg:  "",
+		})
+	}
+	return res
+}
+
 type MockAlterConfigsResponse struct {
 	t TestReporter
 }
@@ -770,19 +889,43 @@
 	return &MockAlterConfigsResponse{t: t}
 }
 
-func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoder {
+func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	req := reqBody.(*AlterConfigsRequest)
 	res := &AlterConfigsResponse{}
 
 	for _, r := range req.Resources {
-		res.Resources = append(res.Resources, &AlterConfigsResourceResponse{Name: r.Name,
-			Type:     TopicResource,
+		res.Resources = append(res.Resources, &AlterConfigsResourceResponse{
+			Name:     r.Name,
+			Type:     r.Type,
 			ErrorMsg: "",
 		})
 	}
 	return res
 }
 
+type MockAlterConfigsResponseWithErrorCode struct {
+	t TestReporter
+}
+
+func NewMockAlterConfigsResponseWithErrorCode(t TestReporter) *MockAlterConfigsResponseWithErrorCode {
+	return &MockAlterConfigsResponseWithErrorCode{t: t}
+}
+
+func (mr *MockAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*AlterConfigsRequest)
+	res := &AlterConfigsResponse{}
+
+	for _, r := range req.Resources {
+		res.Resources = append(res.Resources, &AlterConfigsResourceResponse{
+			Name:      r.Name,
+			Type:      r.Type,
+			ErrorCode: 83,
+			ErrorMsg:  "",
+		})
+	}
+	return res
+}
+
 type MockCreateAclsResponse struct {
 	t TestReporter
 }
@@ -791,7 +934,7 @@
 	return &MockCreateAclsResponse{t: t}
 }
 
-func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoder {
+func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	req := reqBody.(*CreateAclsRequest)
 	res := &CreateAclsResponse{}
 
@@ -809,17 +952,35 @@
 	return &MockListAclsResponse{t: t}
 }
 
-func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoder {
+func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	req := reqBody.(*DescribeAclsRequest)
 	res := &DescribeAclsResponse{}
-
 	res.Err = ErrNoError
 	acl := &ResourceAcls{}
-	acl.Resource.ResourceName = *req.ResourceName
+	if req.ResourceName != nil {
+		acl.Resource.ResourceName = *req.ResourceName
+	}
+	acl.Resource.ResourcePatternType = req.ResourcePatternTypeFilter
 	acl.Resource.ResourceType = req.ResourceType
-	acl.Acls = append(acl.Acls, &Acl{})
-	res.ResourceAcls = append(res.ResourceAcls, acl)
 
+	host := "*"
+	if req.Host != nil {
+		host = *req.Host
+	}
+
+	principal := "User:test"
+	if req.Principal != nil {
+		principal = *req.Principal
+	}
+
+	permissionType := req.PermissionType
+	if permissionType == AclPermissionAny {
+		permissionType = AclPermissionAllow
+	}
+
+	acl.Acls = append(acl.Acls, &Acl{Operation: req.Operation, PermissionType: permissionType, Host: host, Principal: principal})
+	res.ResourceAcls = append(res.ResourceAcls, acl)
+	res.Version = int16(req.Version)
 	return res
 }
 
@@ -833,7 +994,7 @@
 	return &MockSaslAuthenticateResponse{t: t}
 }
 
-func (msar *MockSaslAuthenticateResponse) For(reqBody versionedDecoder) encoder {
+func (msar *MockSaslAuthenticateResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	res := &SaslAuthenticateResponse{}
 	res.Err = msar.kerror
 	res.SaslAuthBytes = msar.saslAuthBytes
@@ -864,7 +1025,7 @@
 	return &MockSaslHandshakeResponse{t: t}
 }
 
-func (mshr *MockSaslHandshakeResponse) For(reqBody versionedDecoder) encoder {
+func (mshr *MockSaslHandshakeResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	res := &SaslHandshakeResponse{}
 	res.Err = mshr.kerror
 	res.EnabledMechanisms = mshr.enabledMechanisms
@@ -885,7 +1046,7 @@
 	return &MockDeleteAclsResponse{t: t}
 }
 
-func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoder {
+func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	req := reqBody.(*DeleteAclsRequest)
 	res := &DeleteAclsResponse{}
 
@@ -894,6 +1055,7 @@
 		response.MatchingAcls = append(response.MatchingAcls, &MatchingAcl{Err: ErrNoError})
 		res.FilterResponses = append(res.FilterResponses, response)
 	}
+	res.Version = int16(req.Version)
 	return res
 }
 
@@ -910,7 +1072,7 @@
 	return m
 }
 
-func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoder {
+func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	resp := &DeleteGroupsResponse{
 		GroupErrorCodes: map[string]KError{},
 	}
@@ -919,3 +1081,193 @@
 	}
 	return resp
 }
+
+type MockJoinGroupResponse struct {
+	t TestReporter
+
+	ThrottleTime  int32
+	Err           KError
+	GenerationId  int32
+	GroupProtocol string
+	LeaderId      string
+	MemberId      string
+	Members       map[string][]byte
+}
+
+func NewMockJoinGroupResponse(t TestReporter) *MockJoinGroupResponse {
+	return &MockJoinGroupResponse{
+		t:       t,
+		Members: make(map[string][]byte),
+	}
+}
+
+func (m *MockJoinGroupResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*JoinGroupRequest)
+	resp := &JoinGroupResponse{
+		Version:       req.Version,
+		ThrottleTime:  m.ThrottleTime,
+		Err:           m.Err,
+		GenerationId:  m.GenerationId,
+		GroupProtocol: m.GroupProtocol,
+		LeaderId:      m.LeaderId,
+		MemberId:      m.MemberId,
+		Members:       m.Members,
+	}
+	return resp
+}
+
+func (m *MockJoinGroupResponse) SetThrottleTime(t int32) *MockJoinGroupResponse {
+	m.ThrottleTime = t
+	return m
+}
+
+func (m *MockJoinGroupResponse) SetError(kerr KError) *MockJoinGroupResponse {
+	m.Err = kerr
+	return m
+}
+
+func (m *MockJoinGroupResponse) SetGenerationId(id int32) *MockJoinGroupResponse {
+	m.GenerationId = id
+	return m
+}
+
+func (m *MockJoinGroupResponse) SetGroupProtocol(proto string) *MockJoinGroupResponse {
+	m.GroupProtocol = proto
+	return m
+}
+
+func (m *MockJoinGroupResponse) SetLeaderId(id string) *MockJoinGroupResponse {
+	m.LeaderId = id
+	return m
+}
+
+func (m *MockJoinGroupResponse) SetMemberId(id string) *MockJoinGroupResponse {
+	m.MemberId = id
+	return m
+}
+
+func (m *MockJoinGroupResponse) SetMember(id string, meta *ConsumerGroupMemberMetadata) *MockJoinGroupResponse {
+	bin, err := encode(meta, nil)
+	if err != nil {
+		panic(fmt.Sprintf("error encoding member metadata: %v", err))
+	}
+	m.Members[id] = bin
+	return m
+}
+
+type MockLeaveGroupResponse struct {
+	t TestReporter
+
+	Err KError
+}
+
+func NewMockLeaveGroupResponse(t TestReporter) *MockLeaveGroupResponse {
+	return &MockLeaveGroupResponse{t: t}
+}
+
+func (m *MockLeaveGroupResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	resp := &LeaveGroupResponse{
+		Err: m.Err,
+	}
+	return resp
+}
+
+func (m *MockLeaveGroupResponse) SetError(kerr KError) *MockLeaveGroupResponse {
+	m.Err = kerr
+	return m
+}
+
+type MockSyncGroupResponse struct {
+	t TestReporter
+
+	Err              KError
+	MemberAssignment []byte
+}
+
+func NewMockSyncGroupResponse(t TestReporter) *MockSyncGroupResponse {
+	return &MockSyncGroupResponse{t: t}
+}
+
+func (m *MockSyncGroupResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	resp := &SyncGroupResponse{
+		Err:              m.Err,
+		MemberAssignment: m.MemberAssignment,
+	}
+	return resp
+}
+
+func (m *MockSyncGroupResponse) SetError(kerr KError) *MockSyncGroupResponse {
+	m.Err = kerr
+	return m
+}
+
+func (m *MockSyncGroupResponse) SetMemberAssignment(assignment *ConsumerGroupMemberAssignment) *MockSyncGroupResponse {
+	bin, err := encode(assignment, nil)
+	if err != nil {
+		panic(fmt.Sprintf("error encoding member assignment: %v", err))
+	}
+	m.MemberAssignment = bin
+	return m
+}
+
+type MockHeartbeatResponse struct {
+	t TestReporter
+
+	Err KError
+}
+
+func NewMockHeartbeatResponse(t TestReporter) *MockHeartbeatResponse {
+	return &MockHeartbeatResponse{t: t}
+}
+
+func (m *MockHeartbeatResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	resp := &HeartbeatResponse{}
+	return resp
+}
+
+func (m *MockHeartbeatResponse) SetError(kerr KError) *MockHeartbeatResponse {
+	m.Err = kerr
+	return m
+}
+
+type MockDescribeLogDirsResponse struct {
+	t       TestReporter
+	logDirs []DescribeLogDirsResponseDirMetadata
+}
+
+func NewMockDescribeLogDirsResponse(t TestReporter) *MockDescribeLogDirsResponse {
+	return &MockDescribeLogDirsResponse{t: t}
+}
+
+func (m *MockDescribeLogDirsResponse) SetLogDirs(logDirPath string, topicPartitions map[string]int) *MockDescribeLogDirsResponse {
+	var topics []DescribeLogDirsResponseTopic
+	for topic := range topicPartitions {
+		var partitions []DescribeLogDirsResponsePartition
+		for i := 0; i < topicPartitions[topic]; i++ {
+			partitions = append(partitions, DescribeLogDirsResponsePartition{
+				PartitionID: int32(i),
+				IsTemporary: false,
+				OffsetLag:   int64(0),
+				Size:        int64(1234),
+			})
+		}
+		topics = append(topics, DescribeLogDirsResponseTopic{
+			Topic:      topic,
+			Partitions: partitions,
+		})
+	}
+	logDir := DescribeLogDirsResponseDirMetadata{
+		ErrorCode: ErrNoError,
+		Path:      logDirPath,
+		Topics:    topics,
+	}
+	m.logDirs = []DescribeLogDirsResponseDirMetadata{logDir}
+	return m
+}
+
+func (m *MockDescribeLogDirsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	resp := &DescribeLogDirsResponse{
+		LogDirs: m.logDirs,
+	}
+	return resp
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go
index 5732ed9..9931cad 100644
--- a/vendor/github.com/Shopify/sarama/offset_commit_request.go
+++ b/vendor/github.com/Shopify/sarama/offset_commit_request.go
@@ -170,6 +170,10 @@
 	return r.Version
 }
 
+func (r *OffsetCommitRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *OffsetCommitRequest) requiredVersion() KafkaVersion {
 	switch r.Version {
 	case 1:
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/Shopify/sarama/offset_commit_response.go
index e842298..342260e 100644
--- a/vendor/github.com/Shopify/sarama/offset_commit_response.go
+++ b/vendor/github.com/Shopify/sarama/offset_commit_response.go
@@ -94,6 +94,10 @@
 	return r.Version
 }
 
+func (r *OffsetCommitResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *OffsetCommitResponse) requiredVersion() KafkaVersion {
 	switch r.Version {
 	case 1:
diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/Shopify/sarama/offset_fetch_request.go
index 6860824..7e147eb 100644
--- a/vendor/github.com/Shopify/sarama/offset_fetch_request.go
+++ b/vendor/github.com/Shopify/sarama/offset_fetch_request.go
@@ -3,60 +3,155 @@
 type OffsetFetchRequest struct {
 	Version       int16
 	ConsumerGroup string
+	RequireStable bool // requires v7+
 	partitions    map[string][]int32
 }
 
 func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) {
-	if r.Version < 0 || r.Version > 5 {
+	if r.Version < 0 || r.Version > 7 {
 		return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"}
 	}
 
-	if err = pe.putString(r.ConsumerGroup); err != nil {
+	isFlexible := r.Version >= 6
+
+	if isFlexible {
+		err = pe.putCompactString(r.ConsumerGroup)
+	} else {
+		err = pe.putString(r.ConsumerGroup)
+	}
+	if err != nil {
 		return err
 	}
 
-	if r.Version >= 2 && r.partitions == nil {
-		pe.putInt32(-1)
-	} else {
-		if err = pe.putArrayLength(len(r.partitions)); err != nil {
-			return err
+	if isFlexible {
+		if r.partitions == nil {
+			pe.putUVarint(0)
+		} else {
+			pe.putCompactArrayLength(len(r.partitions))
 		}
-		for topic, partitions := range r.partitions {
-			if err = pe.putString(topic); err != nil {
-				return err
-			}
-			if err = pe.putInt32Array(partitions); err != nil {
+	} else {
+		if r.partitions == nil && r.Version >= 2 {
+			pe.putInt32(-1)
+		} else {
+			if err = pe.putArrayLength(len(r.partitions)); err != nil {
 				return err
 			}
 		}
 	}
+
+	for topic, partitions := range r.partitions {
+		if isFlexible {
+			err = pe.putCompactString(topic)
+		} else {
+			err = pe.putString(topic)
+		}
+		if err != nil {
+			return err
+		}
+
+		//
+
+		if isFlexible {
+			err = pe.putCompactInt32Array(partitions)
+		} else {
+			err = pe.putInt32Array(partitions)
+		}
+		if err != nil {
+			return err
+		}
+
+		if isFlexible {
+			pe.putEmptyTaggedFieldArray()
+		}
+	}
+
+	if r.RequireStable && r.Version < 7 {
+		return PacketEncodingError{"requireStable is not supported. use version 7 or later"}
+	}
+
+	if r.Version >= 7 {
+		pe.putBool(r.RequireStable)
+	}
+
+	if isFlexible {
+		pe.putEmptyTaggedFieldArray()
+	}
+
 	return nil
 }
 
 func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) {
 	r.Version = version
-	if r.ConsumerGroup, err = pd.getString(); err != nil {
-		return err
+	isFlexible := r.Version >= 6
+	if isFlexible {
+		r.ConsumerGroup, err = pd.getCompactString()
+	} else {
+		r.ConsumerGroup, err = pd.getString()
 	}
-	partitionCount, err := pd.getArrayLength()
 	if err != nil {
 		return err
 	}
+
+	var partitionCount int
+
+	if isFlexible {
+		partitionCount, err = pd.getCompactArrayLength()
+	} else {
+		partitionCount, err = pd.getArrayLength()
+	}
+	if err != nil {
+		return err
+	}
+
 	if (partitionCount == 0 && version < 2) || partitionCount < 0 {
 		return nil
 	}
-	r.partitions = make(map[string][]int32)
+
+	r.partitions = make(map[string][]int32, partitionCount)
 	for i := 0; i < partitionCount; i++ {
-		topic, err := pd.getString()
+		var topic string
+		if isFlexible {
+			topic, err = pd.getCompactString()
+		} else {
+			topic, err = pd.getString()
+		}
 		if err != nil {
 			return err
 		}
-		partitions, err := pd.getInt32Array()
+
+		var partitions []int32
+		if isFlexible {
+			partitions, err = pd.getCompactInt32Array()
+		} else {
+			partitions, err = pd.getInt32Array()
+		}
 		if err != nil {
 			return err
 		}
+		if isFlexible {
+			_, err = pd.getEmptyTaggedFieldArray()
+			if err != nil {
+				return err
+			}
+		}
+
 		r.partitions[topic] = partitions
 	}
+
+	if r.Version >= 7 {
+		r.RequireStable, err = pd.getBool()
+		if err != nil {
+			return err
+		}
+	}
+
+	if isFlexible {
+		_, err = pd.getEmptyTaggedFieldArray()
+		if err != nil {
+			return err
+		}
+	}
+
 	return nil
 }
 
@@ -68,6 +163,14 @@
 	return r.Version
 }
 
+func (r *OffsetFetchRequest) headerVersion() int16 {
+	if r.Version >= 6 {
+		return 2
+	}
+
+	return 1
+}
+
 func (r *OffsetFetchRequest) requiredVersion() KafkaVersion {
 	switch r.Version {
 	case 1:
@@ -80,6 +183,10 @@
 		return V2_0_0_0
 	case 5:
 		return V2_1_0_0
+	case 6:
+		return V2_4_0_0
+	case 7:
+		return V2_5_0_0
 	default:
 		return MinVersion
 	}
diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/Shopify/sarama/offset_fetch_response.go
index 9e25702..1944922 100644
--- a/vendor/github.com/Shopify/sarama/offset_fetch_response.go
+++ b/vendor/github.com/Shopify/sarama/offset_fetch_response.go
@@ -8,6 +8,8 @@
 }
 
 func (b *OffsetFetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+	isFlexible := version >= 6
+
 	b.Offset, err = pd.getInt64()
 	if err != nil {
 		return err
@@ -20,7 +22,11 @@
 		}
 	}
 
-	b.Metadata, err = pd.getString()
+	if isFlexible {
+		b.Metadata, err = pd.getCompactString()
+	} else {
+		b.Metadata, err = pd.getString()
+	}
 	if err != nil {
 		return err
 	}
@@ -31,23 +37,37 @@
 	}
 	b.Err = KError(tmp)
 
+	if isFlexible {
+		if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
 	return nil
 }
 
 func (b *OffsetFetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+	isFlexible := version >= 6
 	pe.putInt64(b.Offset)
 
 	if version >= 5 {
 		pe.putInt32(b.LeaderEpoch)
 	}
-
-	err = pe.putString(b.Metadata)
+	if isFlexible {
+		err = pe.putCompactString(b.Metadata)
+	} else {
+		err = pe.putString(b.Metadata)
+	}
 	if err != nil {
 		return err
 	}
 
 	pe.putInt16(int16(b.Err))
 
+	if isFlexible {
+		pe.putEmptyTaggedFieldArray()
+	}
+
 	return nil
 }
 
@@ -58,19 +78,37 @@
 	Err            KError
 }
 
-func (r *OffsetFetchResponse) encode(pe packetEncoder) error {
+func (r *OffsetFetchResponse) encode(pe packetEncoder) (err error) {
+	isFlexible := r.Version >= 6
+
 	if r.Version >= 3 {
 		pe.putInt32(r.ThrottleTimeMs)
 	}
-
-	if err := pe.putArrayLength(len(r.Blocks)); err != nil {
+	if isFlexible {
+		pe.putCompactArrayLength(len(r.Blocks))
+	} else {
+		err = pe.putArrayLength(len(r.Blocks))
+	}
+	if err != nil {
 		return err
 	}
+
 	for topic, partitions := range r.Blocks {
-		if err := pe.putString(topic); err != nil {
+		if isFlexible {
+			err = pe.putCompactString(topic)
+		} else {
+			err = pe.putString(topic)
+		}
+		if err != nil {
 			return err
 		}
-		if err := pe.putArrayLength(len(partitions)); err != nil {
+
+		if isFlexible {
+			pe.putCompactArrayLength(len(partitions))
+		} else {
+			err = pe.putArrayLength(len(partitions))
+		}
+		if err != nil {
 			return err
 		}
 		for partition, block := range partitions {
@@ -79,15 +117,22 @@
 				return err
 			}
 		}
+		if isFlexible {
+			pe.putEmptyTaggedFieldArray()
+		}
 	}
 	if r.Version >= 2 {
 		pe.putInt16(int16(r.Err))
 	}
+	if isFlexible {
+		pe.putEmptyTaggedFieldArray()
+	}
 	return nil
 }
 
 func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) {
 	r.Version = version
+	isFlexible := version >= 6
 
 	if version >= 3 {
 		r.ThrottleTimeMs, err = pd.getInt32()
@@ -96,7 +141,12 @@
 		}
 	}
 
-	numTopics, err := pd.getArrayLength()
+	var numTopics int
+	if isFlexible {
+		numTopics, err = pd.getCompactArrayLength()
+	} else {
+		numTopics, err = pd.getArrayLength()
+	}
 	if err != nil {
 		return err
 	}
@@ -104,22 +154,30 @@
 	if numTopics > 0 {
 		r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
 		for i := 0; i < numTopics; i++ {
-			name, err := pd.getString()
+			var name string
+			if isFlexible {
+				name, err = pd.getCompactString()
+			} else {
+				name, err = pd.getString()
+			}
 			if err != nil {
 				return err
 			}
 
-			numBlocks, err := pd.getArrayLength()
+			var numBlocks int
+			if isFlexible {
+				numBlocks, err = pd.getCompactArrayLength()
+			} else {
+				numBlocks, err = pd.getArrayLength()
+			}
 			if err != nil {
 				return err
 			}
 
-			if numBlocks == 0 {
-				r.Blocks[name] = nil
-				continue
+			r.Blocks[name] = nil
+			if numBlocks > 0 {
+				r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
 			}
-			r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
-
 			for j := 0; j < numBlocks; j++ {
 				id, err := pd.getInt32()
 				if err != nil {
@@ -131,8 +189,15 @@
 				if err != nil {
 					return err
 				}
+
 				r.Blocks[name][id] = block
 			}
+
+			if isFlexible {
+				if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+					return err
+				}
+			}
 		}
 	}
 
@@ -144,6 +209,12 @@
 		r.Err = KError(kerr)
 	}
 
+	if isFlexible {
+		if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
 	return nil
 }
 
@@ -155,6 +226,14 @@
 	return r.Version
 }
 
+func (r *OffsetFetchResponse) headerVersion() int16 {
+	if r.Version >= 6 {
+		return 1
+	}
+
+	return 0
+}
+
 func (r *OffsetFetchResponse) requiredVersion() KafkaVersion {
 	switch r.Version {
 	case 1:
@@ -167,6 +246,10 @@
 		return V2_0_0_0
 	case 5:
 		return V2_1_0_0
+	case 6:
+		return V2_4_0_0
+	case 7:
+		return V2_5_0_0
 	default:
 		return MinVersion
 	}
diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go
index 923972f..4f480a0 100644
--- a/vendor/github.com/Shopify/sarama/offset_manager.go
+++ b/vendor/github.com/Shopify/sarama/offset_manager.go
@@ -19,6 +19,10 @@
 	// will otherwise leak memory. You must call this after all the
 	// PartitionOffsetManagers are closed.
 	Close() error
+
+	// Commit commits the offsets. This method can be used if AutoCommit.Enable is
+	// set to false.
+	Commit()
 }
 
 type offsetManager struct {
@@ -58,7 +62,6 @@
 		client: client,
 		conf:   conf,
 		group:  group,
-		ticker: time.NewTicker(conf.Consumer.Offsets.CommitInterval),
 		poms:   make(map[string]map[int32]*partitionOffsetManager),
 
 		memberID:   memberID,
@@ -67,7 +70,10 @@
 		closing: make(chan none),
 		closed:  make(chan none),
 	}
-	go withRecover(om.mainLoop)
+	if conf.Consumer.Offsets.AutoCommit.Enable {
+		om.ticker = time.NewTicker(conf.Consumer.Offsets.AutoCommit.Interval)
+		go withRecover(om.mainLoop)
+	}
 
 	return om, nil
 }
@@ -99,16 +105,20 @@
 	om.closeOnce.Do(func() {
 		// exit the mainLoop
 		close(om.closing)
-		<-om.closed
+		if om.conf.Consumer.Offsets.AutoCommit.Enable {
+			<-om.closed
+		}
 
 		// mark all POMs as closed
 		om.asyncClosePOMs()
 
 		// flush one last time
-		for attempt := 0; attempt <= om.conf.Consumer.Offsets.Retry.Max; attempt++ {
-			om.flushToBroker()
-			if om.releasePOMs(false) == 0 {
-				break
+		if om.conf.Consumer.Offsets.AutoCommit.Enable {
+			for attempt := 0; attempt <= om.conf.Consumer.Offsets.Retry.Max; attempt++ {
+				om.flushToBroker()
+				if om.releasePOMs(false) == 0 {
+					break
+				}
 			}
 		}
 
@@ -225,14 +235,18 @@
 	for {
 		select {
 		case <-om.ticker.C:
-			om.flushToBroker()
-			om.releasePOMs(false)
+			om.Commit()
 		case <-om.closing:
 			return
 		}
 	}
 }
 
+func (om *offsetManager) Commit() {
+	om.flushToBroker()
+	om.releasePOMs(false)
+}
+
 func (om *offsetManager) flushToBroker() {
 	req := om.constructRequest()
 	if req == nil {
@@ -275,7 +289,6 @@
 			ConsumerID:              om.memberID,
 			ConsumerGroupGeneration: om.generation,
 		}
-
 	}
 
 	om.pomsLock.RLock()
diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/Shopify/sarama/offset_request.go
index 326c372..4c9ce4d 100644
--- a/vendor/github.com/Shopify/sarama/offset_request.go
+++ b/vendor/github.com/Shopify/sarama/offset_request.go
@@ -6,7 +6,7 @@
 }
 
 func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error {
-	pe.putInt64(int64(b.time))
+	pe.putInt64(b.time)
 	if version == 0 {
 		pe.putInt32(b.maxOffsets)
 	}
@@ -28,6 +28,7 @@
 
 type OffsetRequest struct {
 	Version        int16
+	IsolationLevel IsolationLevel
 	replicaID      int32
 	isReplicaIDSet bool
 	blocks         map[string]map[int32]*offsetRequestBlock
@@ -41,6 +42,10 @@
 		pe.putInt32(-1)
 	}
 
+	if r.Version >= 2 {
+		pe.putBool(r.IsolationLevel == ReadCommitted)
+	}
+
 	err := pe.putArrayLength(len(r.blocks))
 	if err != nil {
 		return err
@@ -75,6 +80,18 @@
 		r.SetReplicaID(replicaID)
 	}
 
+	if r.Version >= 2 {
+		tmp, err := pd.getBool()
+		if err != nil {
+			return err
+		}
+
+		r.IsolationLevel = ReadUncommitted
+		if tmp {
+			r.IsolationLevel = ReadCommitted
+		}
+	}
+
 	blockCount, err := pd.getArrayLength()
 	if err != nil {
 		return err
@@ -116,10 +133,16 @@
 	return r.Version
 }
 
+func (r *OffsetRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *OffsetRequest) requiredVersion() KafkaVersion {
 	switch r.Version {
 	case 1:
 		return V0_10_1_0
+	case 2:
+		return V0_11_0_0
 	default:
 		return MinVersion
 	}
diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/Shopify/sarama/offset_response.go
index 8b2193f..69349ef 100644
--- a/vendor/github.com/Shopify/sarama/offset_response.go
+++ b/vendor/github.com/Shopify/sarama/offset_response.go
@@ -50,11 +50,19 @@
 }
 
 type OffsetResponse struct {
-	Version int16
-	Blocks  map[string]map[int32]*OffsetResponseBlock
+	Version        int16
+	ThrottleTimeMs int32
+	Blocks         map[string]map[int32]*OffsetResponseBlock
 }
 
 func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) {
+	if version >= 2 {
+		r.ThrottleTimeMs, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	}
+
 	numTopics, err := pd.getArrayLength()
 	if err != nil {
 		return err
@@ -120,6 +128,10 @@
 
 */
 func (r *OffsetResponse) encode(pe packetEncoder) (err error) {
+	if r.Version >= 2 {
+		pe.putInt32(r.ThrottleTimeMs)
+	}
+
 	if err = pe.putArrayLength(len(r.Blocks)); err != nil {
 		return err
 	}
@@ -150,10 +162,16 @@
 	return r.Version
 }
 
+func (r *OffsetResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *OffsetResponse) requiredVersion() KafkaVersion {
 	switch r.Version {
 	case 1:
 		return V0_10_1_0
+	case 2:
+		return V0_11_0_0
 	default:
 		return MinVersion
 	}
diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go
index 9be854c..184bc26 100644
--- a/vendor/github.com/Shopify/sarama/packet_decoder.go
+++ b/vendor/github.com/Shopify/sarama/packet_decoder.go
@@ -10,15 +10,22 @@
 	getInt32() (int32, error)
 	getInt64() (int64, error)
 	getVarint() (int64, error)
+	getUVarint() (uint64, error)
 	getArrayLength() (int, error)
+	getCompactArrayLength() (int, error)
 	getBool() (bool, error)
+	getEmptyTaggedFieldArray() (int, error)
 
 	// Collections
 	getBytes() ([]byte, error)
 	getVarintBytes() ([]byte, error)
+	getCompactBytes() ([]byte, error)
 	getRawBytes(length int) ([]byte, error)
 	getString() (string, error)
 	getNullableString() (*string, error)
+	getCompactString() (string, error)
+	getCompactNullableString() (*string, error)
+	getCompactInt32Array() ([]int32, error)
 	getInt32Array() ([]int32, error)
 	getInt64Array() ([]int64, error)
 	getStringArray() ([]string, error)
diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/Shopify/sarama/packet_encoder.go
index 67b8dae..aea53ca 100644
--- a/vendor/github.com/Shopify/sarama/packet_encoder.go
+++ b/vendor/github.com/Shopify/sarama/packet_encoder.go
@@ -12,18 +12,26 @@
 	putInt32(in int32)
 	putInt64(in int64)
 	putVarint(in int64)
+	putUVarint(in uint64)
+	putCompactArrayLength(in int)
 	putArrayLength(in int) error
 	putBool(in bool)
 
 	// Collections
 	putBytes(in []byte) error
 	putVarintBytes(in []byte) error
+	putCompactBytes(in []byte) error
 	putRawBytes(in []byte) error
+	putCompactString(in string) error
+	putNullableCompactString(in *string) error
 	putString(in string) error
 	putNullableString(in *string) error
 	putStringArray(in []string) error
+	putCompactInt32Array(in []int32) error
+	putNullableCompactInt32Array(in []int32) error
 	putInt32Array(in []int32) error
 	putInt64Array(in []int64) error
+	putEmptyTaggedFieldArray()
 
 	// Provide the current offset to record the batch size metric
 	offset() int
diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/Shopify/sarama/partitioner.go
index 6a708e7..a66e11e 100644
--- a/vendor/github.com/Shopify/sarama/partitioner.go
+++ b/vendor/github.com/Shopify/sarama/partitioner.go
@@ -42,7 +42,7 @@
 
 type manualPartitioner struct{}
 
-// HashPartitionOption lets you modify default values of the partitioner
+// HashPartitionerOption lets you modify default values of the partitioner
 type HashPartitionerOption func(*hashPartitioner)
 
 // WithAbsFirst means that the partitioner handles absolute values
diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/Shopify/sarama/prep_encoder.go
index b633cd1..0d01374 100644
--- a/vendor/github.com/Shopify/sarama/prep_encoder.go
+++ b/vendor/github.com/Shopify/sarama/prep_encoder.go
@@ -2,6 +2,7 @@
 
 import (
 	"encoding/binary"
+	"errors"
 	"fmt"
 	"math"
 
@@ -36,6 +37,11 @@
 	pe.length += binary.PutVarint(buf[:], in)
 }
 
+func (pe *prepEncoder) putUVarint(in uint64) {
+	var buf [binary.MaxVarintLen64]byte
+	pe.length += binary.PutUvarint(buf[:], in)
+}
+
 func (pe *prepEncoder) putArrayLength(in int) error {
 	if in > math.MaxInt32 {
 		return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)}
@@ -44,6 +50,10 @@
 	return nil
 }
 
+func (pe *prepEncoder) putCompactArrayLength(in int) {
+	pe.putUVarint(uint64(in + 1))
+}
+
 func (pe *prepEncoder) putBool(in bool) {
 	pe.length++
 }
@@ -67,6 +77,25 @@
 	return pe.putRawBytes(in)
 }
 
+func (pe *prepEncoder) putCompactBytes(in []byte) error {
+	pe.putUVarint(uint64(len(in) + 1))
+	return pe.putRawBytes(in)
+}
+
+func (pe *prepEncoder) putCompactString(in string) error {
+	pe.putCompactArrayLength(len(in))
+	return pe.putRawBytes([]byte(in))
+}
+
+func (pe *prepEncoder) putNullableCompactString(in *string) error {
+	if in == nil {
+		pe.putUVarint(0)
+		return nil
+	} else {
+		return pe.putCompactString(*in)
+	}
+}
+
 func (pe *prepEncoder) putRawBytes(in []byte) error {
 	if len(in) > math.MaxInt32 {
 		return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
@@ -107,6 +136,27 @@
 	return nil
 }
 
+func (pe *prepEncoder) putCompactInt32Array(in []int32) error {
+	if in == nil {
+		return errors.New("expected int32 array to be non null")
+	}
+
+	pe.putUVarint(uint64(len(in)) + 1)
+	pe.length += 4 * len(in)
+	return nil
+}
+
+func (pe *prepEncoder) putNullableCompactInt32Array(in []int32) error {
+	if in == nil {
+		pe.putUVarint(0)
+		return nil
+	}
+
+	pe.putUVarint(uint64(len(in)) + 1)
+	pe.length += 4 * len(in)
+	return nil
+}
+
 func (pe *prepEncoder) putInt32Array(in []int32) error {
 	err := pe.putArrayLength(len(in))
 	if err != nil {
@@ -125,6 +175,10 @@
 	return nil
 }
 
+func (pe *prepEncoder) putEmptyTaggedFieldArray() {
+	pe.putUVarint(0)
+}
+
 func (pe *prepEncoder) offset() int {
 	return pe.length
 }
diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/Shopify/sarama/produce_request.go
index 0c755d0..0034651 100644
--- a/vendor/github.com/Shopify/sarama/produce_request.go
+++ b/vendor/github.com/Shopify/sarama/produce_request.go
@@ -206,6 +206,10 @@
 	return r.Version
 }
 
+func (r *ProduceRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *ProduceRequest) requiredVersion() KafkaVersion {
 	switch r.Version {
 	case 1:
@@ -214,6 +218,8 @@
 		return V0_10_0_0
 	case 3:
 		return V0_11_0_0
+	case 7:
+		return V2_1_0_0
 	default:
 		return MinVersion
 	}
diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/Shopify/sarama/produce_response.go
index 4c5cd35..edf9787 100644
--- a/vendor/github.com/Shopify/sarama/produce_response.go
+++ b/vendor/github.com/Shopify/sarama/produce_response.go
@@ -5,11 +5,27 @@
 	"time"
 )
 
+// Protocol, http://kafka.apache.org/protocol.html
+// v1
+// v2 = v3 = v4
+// v5 = v6 = v7
+// Produce Response (Version: 7) => [responses] throttle_time_ms
+//   responses => topic [partition_responses]
+//     topic => STRING
+//     partition_responses => partition error_code base_offset log_append_time log_start_offset
+//       partition => INT32
+//       error_code => INT16
+//       base_offset => INT64
+//       log_append_time => INT64
+//       log_start_offset => INT64
+//   throttle_time_ms => INT32
+
+// partition_responses in protocol
 type ProduceResponseBlock struct {
-	Err    KError
-	Offset int64
-	// only provided if Version >= 2 and the broker is configured with `LogAppendTime`
-	Timestamp time.Time
+	Err         KError    // v0, error_code
+	Offset      int64     // v0, base_offset
+	Timestamp   time.Time // v2, log_append_time, and the broker is configured with `LogAppendTime`
+	StartOffset int64     // v5, log_start_offset
 }
 
 func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) {
@@ -32,6 +48,13 @@
 		}
 	}
 
+	if version >= 5 {
+		b.StartOffset, err = pd.getInt64()
+		if err != nil {
+			return err
+		}
+	}
+
 	return nil
 }
 
@@ -49,13 +72,17 @@
 		pe.putInt64(timestamp)
 	}
 
+	if version >= 5 {
+		pe.putInt64(b.StartOffset)
+	}
+
 	return nil
 }
 
 type ProduceResponse struct {
-	Blocks       map[string]map[int32]*ProduceResponseBlock
+	Blocks       map[string]map[int32]*ProduceResponseBlock // v0, responses
 	Version      int16
-	ThrottleTime time.Duration // only provided if Version >= 1
+	ThrottleTime time.Duration // v1, throttle_time_ms
 }
 
 func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) {
@@ -129,6 +156,7 @@
 			}
 		}
 	}
+
 	if r.Version >= 1 {
 		pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
 	}
@@ -143,17 +171,12 @@
 	return r.Version
 }
 
+func (r *ProduceResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *ProduceResponse) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 1:
-		return V0_9_0_0
-	case 2:
-		return V0_10_0_0
-	case 3:
-		return V0_11_0_0
-	default:
-		return MinVersion
-	}
+	return MinVersion
 }
 
 func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock {
diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go
index bba0f7e..9c70f81 100644
--- a/vendor/github.com/Shopify/sarama/produce_set.go
+++ b/vendor/github.com/Shopify/sarama/produce_set.go
@@ -13,17 +13,22 @@
 }
 
 type produceSet struct {
-	parent *asyncProducer
-	msgs   map[string]map[int32]*partitionSet
+	parent        *asyncProducer
+	msgs          map[string]map[int32]*partitionSet
+	producerID    int64
+	producerEpoch int16
 
 	bufferBytes int
 	bufferCount int
 }
 
 func newProduceSet(parent *asyncProducer) *produceSet {
+	pid, epoch := parent.txnmgr.getProducerID()
 	return &produceSet{
-		msgs:   make(map[string]map[int32]*partitionSet),
-		parent: parent,
+		msgs:          make(map[string]map[int32]*partitionSet),
+		parent:        parent,
+		producerID:    pid,
+		producerEpoch: epoch,
 	}
 }
 
@@ -44,9 +49,10 @@
 	}
 
 	timestamp := msg.Timestamp
-	if msg.Timestamp.IsZero() {
+	if timestamp.IsZero() {
 		timestamp = time.Now()
 	}
+	timestamp = timestamp.Truncate(time.Millisecond)
 
 	partitions := ps.msgs[msg.Topic]
 	if partitions == nil {
@@ -64,8 +70,8 @@
 				Version:          2,
 				Codec:            ps.parent.conf.Producer.Compression,
 				CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
-				ProducerID:       ps.parent.txnmgr.producerID,
-				ProducerEpoch:    ps.parent.txnmgr.producerEpoch,
+				ProducerID:       ps.producerID,
+				ProducerEpoch:    ps.producerEpoch,
 			}
 			if ps.parent.conf.Producer.Idempotent {
 				batch.FirstSequence = msg.sequenceNumber
@@ -77,12 +83,17 @@
 		}
 		partitions[msg.Partition] = set
 	}
-	set.msgs = append(set.msgs, msg)
 
 	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
 		if ps.parent.conf.Producer.Idempotent && msg.sequenceNumber < set.recordsToSend.RecordBatch.FirstSequence {
 			return errors.New("assertion failed: message out of sequence added to a batch")
 		}
+	}
+
+	// Past this point we can't return an error, because we've already added the message to the set.
+	set.msgs = append(set.msgs, msg)
+
+	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
 		// We are being conservative here to avoid having to prep encode the record
 		size += maximumRecordOverhead
 		rec := &Record{
@@ -128,6 +139,10 @@
 		req.Version = 3
 	}
 
+	if ps.parent.conf.Producer.Compression == CompressionZSTD && ps.parent.conf.Version.IsAtLeast(V2_1_0_0) {
+		req.Version = 7
+	}
+
 	for topic, partitionSets := range ps.msgs {
 		for partition, set := range partitionSets {
 			if req.Version >= 3 {
diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go
index 085cbb3..2482c63 100644
--- a/vendor/github.com/Shopify/sarama/real_decoder.go
+++ b/vendor/github.com/Shopify/sarama/real_decoder.go
@@ -5,13 +5,15 @@
 	"math"
 )
 
-var errInvalidArrayLength = PacketDecodingError{"invalid array length"}
-var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"}
-var errInvalidByteSliceLengthType = PacketDecodingError{"invalid byteslice length type"}
-var errInvalidStringLength = PacketDecodingError{"invalid string length"}
-var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"}
-var errVarintOverflow = PacketDecodingError{"varint overflow"}
-var errInvalidBool = PacketDecodingError{"invalid bool"}
+var (
+	errInvalidArrayLength      = PacketDecodingError{"invalid array length"}
+	errInvalidByteSliceLength  = PacketDecodingError{"invalid byteslice length"}
+	errInvalidStringLength     = PacketDecodingError{"invalid string length"}
+	errVarintOverflow          = PacketDecodingError{"varint overflow"}
+	errUVarintOverflow         = PacketDecodingError{"uvarint overflow"}
+	errInvalidBool             = PacketDecodingError{"invalid bool"}
+	errUnsupportedTaggedFields = PacketDecodingError{"non-empty tagged fields are not supported yet"}
+)
 
 type realDecoder struct {
 	raw   []byte
@@ -75,6 +77,22 @@
 	return tmp, nil
 }
 
+func (rd *realDecoder) getUVarint() (uint64, error) {
+	tmp, n := binary.Uvarint(rd.raw[rd.off:])
+	if n == 0 {
+		rd.off = len(rd.raw)
+		return 0, ErrInsufficientData
+	}
+
+	if n < 0 {
+		rd.off -= n
+		return 0, errUVarintOverflow
+	}
+
+	rd.off += n
+	return tmp, nil
+}
+
 func (rd *realDecoder) getArrayLength() (int, error) {
 	if rd.remaining() < 4 {
 		rd.off = len(rd.raw)
@@ -91,6 +109,19 @@
 	return tmp, nil
 }
 
+func (rd *realDecoder) getCompactArrayLength() (int, error) {
+	n, err := rd.getUVarint()
+	if err != nil {
+		return 0, err
+	}
+
+	if n == 0 {
+		return 0, nil
+	}
+
+	return int(n) - 1, nil
+}
+
 func (rd *realDecoder) getBool() (bool, error) {
 	b, err := rd.getInt8()
 	if err != nil || b == 0 {
@@ -102,6 +133,19 @@
 	return true, nil
 }
 
+func (rd *realDecoder) getEmptyTaggedFieldArray() (int, error) {
+	tagCount, err := rd.getUVarint()
+	if err != nil {
+		return 0, err
+	}
+
+	if tagCount != 0 {
+		return 0, errUnsupportedTaggedFields
+	}
+
+	return 0, nil
+}
+
 // collections
 
 func (rd *realDecoder) getBytes() ([]byte, error) {
@@ -128,6 +172,16 @@
 	return rd.getRawBytes(int(tmp))
 }
 
+func (rd *realDecoder) getCompactBytes() ([]byte, error) {
+	n, err := rd.getUVarint()
+	if err != nil {
+		return nil, err
+	}
+
+	length := int(n - 1)
+	return rd.getRawBytes(length)
+}
+
 func (rd *realDecoder) getStringLength() (int, error) {
 	length, err := rd.getInt16()
 	if err != nil {
@@ -169,6 +223,57 @@
 	return &tmpStr, err
 }
 
+func (rd *realDecoder) getCompactString() (string, error) {
+	n, err := rd.getUVarint()
+	if err != nil {
+		return "", err
+	}
+
+	length := int(n - 1)
+
+	tmpStr := string(rd.raw[rd.off : rd.off+length])
+	rd.off += length
+	return tmpStr, nil
+}
+
+func (rd *realDecoder) getCompactNullableString() (*string, error) {
+	n, err := rd.getUVarint()
+	if err != nil {
+		return nil, err
+	}
+
+	length := int(n - 1)
+
+	if length < 0 {
+		return nil, err
+	}
+
+	tmpStr := string(rd.raw[rd.off : rd.off+length])
+	rd.off += length
+	return &tmpStr, err
+}
+
+func (rd *realDecoder) getCompactInt32Array() ([]int32, error) {
+	n, err := rd.getUVarint()
+	if err != nil {
+		return nil, err
+	}
+
+	if n == 0 {
+		return nil, nil
+	}
+
+	arrayLength := int(n) - 1
+
+	ret := make([]int32, arrayLength)
+
+	for i := range ret {
+		ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+		rd.off += 4
+	}
+	return ret, nil
+}
+
 func (rd *realDecoder) getInt32Array() ([]int32, error) {
 	if rd.remaining() < 4 {
 		rd.off = len(rd.raw)
diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/Shopify/sarama/real_encoder.go
index 3c75387..c07204c 100644
--- a/vendor/github.com/Shopify/sarama/real_encoder.go
+++ b/vendor/github.com/Shopify/sarama/real_encoder.go
@@ -2,6 +2,7 @@
 
 import (
 	"encoding/binary"
+	"errors"
 
 	"github.com/rcrowley/go-metrics"
 )
@@ -39,11 +40,20 @@
 	re.off += binary.PutVarint(re.raw[re.off:], in)
 }
 
+func (re *realEncoder) putUVarint(in uint64) {
+	re.off += binary.PutUvarint(re.raw[re.off:], in)
+}
+
 func (re *realEncoder) putArrayLength(in int) error {
 	re.putInt32(int32(in))
 	return nil
 }
 
+func (re *realEncoder) putCompactArrayLength(in int) {
+	// 0 represents a null array, so +1 has to be added
+	re.putUVarint(uint64(in + 1))
+}
+
 func (re *realEncoder) putBool(in bool) {
 	if in {
 		re.putInt8(1)
@@ -78,6 +88,24 @@
 	return re.putRawBytes(in)
 }
 
+func (re *realEncoder) putCompactBytes(in []byte) error {
+	re.putUVarint(uint64(len(in) + 1))
+	return re.putRawBytes(in)
+}
+
+func (re *realEncoder) putCompactString(in string) error {
+	re.putCompactArrayLength(len(in))
+	return re.putRawBytes([]byte(in))
+}
+
+func (re *realEncoder) putNullableCompactString(in *string) error {
+	if in == nil {
+		re.putInt8(0)
+		return nil
+	}
+	return re.putCompactString(*in)
+}
+
 func (re *realEncoder) putString(in string) error {
 	re.putInt16(int16(len(in)))
 	copy(re.raw[re.off:], in)
@@ -108,6 +136,31 @@
 	return nil
 }
 
+func (re *realEncoder) putCompactInt32Array(in []int32) error {
+	if in == nil {
+		return errors.New("expected int32 array to be non null")
+	}
+	// 0 represents a null array, so +1 has to be added
+	re.putUVarint(uint64(len(in)) + 1)
+	for _, val := range in {
+		re.putInt32(val)
+	}
+	return nil
+}
+
+func (re *realEncoder) putNullableCompactInt32Array(in []int32) error {
+	if in == nil {
+		re.putUVarint(0)
+		return nil
+	}
+	// 0 represents a null array, so +1 has to be added
+	re.putUVarint(uint64(len(in)) + 1)
+	for _, val := range in {
+		re.putInt32(val)
+	}
+	return nil
+}
+
 func (re *realEncoder) putInt32Array(in []int32) error {
 	err := re.putArrayLength(len(in))
 	if err != nil {
@@ -130,6 +183,10 @@
 	return nil
 }
 
+func (re *realEncoder) putEmptyTaggedFieldArray() {
+	re.putUVarint(0)
+}
+
 func (re *realEncoder) offset() int {
 	return re.off
 }
diff --git a/vendor/github.com/Shopify/sarama/record.go b/vendor/github.com/Shopify/sarama/record.go
index cdccfe3..a3fe8c0 100644
--- a/vendor/github.com/Shopify/sarama/record.go
+++ b/vendor/github.com/Shopify/sarama/record.go
@@ -11,7 +11,7 @@
 	maximumRecordOverhead = 5*binary.MaxVarintLen32 + binary.MaxVarintLen64 + 1
 )
 
-//RecordHeader stores key and value for a record header
+// RecordHeader stores key and value for a record header
 type RecordHeader struct {
 	Key   []byte
 	Value []byte
@@ -35,7 +35,7 @@
 	return nil
 }
 
-//Record is kafka record type
+// Record is kafka record type
 type Record struct {
 	Headers []*RecordHeader
 
diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/Shopify/sarama/records.go
index 98160c7..f4c5e95 100644
--- a/vendor/github.com/Shopify/sarama/records.go
+++ b/vendor/github.com/Shopify/sarama/records.go
@@ -8,7 +8,6 @@
 	defaultRecords
 
 	magicOffset = 16
-	magicLength = 1
 )
 
 // Records implements a union type containing either a RecordBatch or a legacy MessageSet.
diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go
index 5ed8ca4..d899df5 100644
--- a/vendor/github.com/Shopify/sarama/request.go
+++ b/vendor/github.com/Shopify/sarama/request.go
@@ -11,6 +11,7 @@
 	versionedDecoder
 	key() int16
 	version() int16
+	headerVersion() int16
 	requiredVersion() KafkaVersion
 }
 
@@ -26,12 +27,19 @@
 	pe.putInt16(r.body.version())
 	pe.putInt32(r.correlationID)
 
-	err := pe.putString(r.clientID)
-	if err != nil {
-		return err
+	if r.body.headerVersion() >= 1 {
+		err := pe.putString(r.clientID)
+		if err != nil {
+			return err
+		}
 	}
 
-	err = r.body.encode(pe)
+	if r.body.headerVersion() >= 2 {
+		// we don't use tag headers at the moment so we just put an array length of 0
+		pe.putUVarint(0)
+	}
+
+	err := r.body.encode(pe)
 	if err != nil {
 		return err
 	}
@@ -65,6 +73,14 @@
 		return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)}
 	}
 
+	if r.body.headerVersion() >= 2 {
+		// tagged field
+		_, err = pd.getUVarint()
+		if err != nil {
+			return err
+		}
+	}
+
 	return r.body.decode(pd, version)
 }
 
@@ -105,7 +121,7 @@
 	case 0:
 		return &ProduceRequest{}
 	case 1:
-		return &FetchRequest{}
+		return &FetchRequest{Version: version}
 	case 2:
 		return &OffsetRequest{Version: version}
 	case 3:
@@ -113,7 +129,7 @@
 	case 8:
 		return &OffsetCommitRequest{Version: version}
 	case 9:
-		return &OffsetFetchRequest{}
+		return &OffsetFetchRequest{Version: version}
 	case 10:
 		return &FindCoordinatorRequest{}
 	case 11:
@@ -158,12 +174,24 @@
 		return &DescribeConfigsRequest{}
 	case 33:
 		return &AlterConfigsRequest{}
+	case 35:
+		return &DescribeLogDirsRequest{}
 	case 36:
 		return &SaslAuthenticateRequest{}
 	case 37:
 		return &CreatePartitionsRequest{}
 	case 42:
 		return &DeleteGroupsRequest{}
+	case 44:
+		return &IncrementalAlterConfigsRequest{}
+	case 45:
+		return &AlterPartitionReassignmentsRequest{}
+	case 46:
+		return &ListPartitionReassignmentsRequest{}
+	case 50:
+		return &DescribeUserScramCredentialsRequest{}
+	case 51:
+		return &AlterUserScramCredentialsRequest{}
 	}
 	return nil
 }
diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/Shopify/sarama/response_header.go
index 7a75918..fbcef0b 100644
--- a/vendor/github.com/Shopify/sarama/response_header.go
+++ b/vendor/github.com/Shopify/sarama/response_header.go
@@ -2,15 +2,17 @@
 
 import "fmt"
 
-const responseLengthSize = 4
-const correlationIDSize = 4
+const (
+	responseLengthSize = 4
+	correlationIDSize  = 4
+)
 
 type responseHeader struct {
 	length        int32
 	correlationID int32
 }
 
-func (r *responseHeader) decode(pd packetDecoder) (err error) {
+func (r *responseHeader) decode(pd packetDecoder, version int16) (err error) {
 	r.length, err = pd.getInt32()
 	if err != nil {
 		return err
@@ -20,5 +22,12 @@
 	}
 
 	r.correlationID, err = pd.getInt32()
+
+	if version >= 1 {
+		if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
 	return err
 }
diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go
index 1e0277a..48f362d 100644
--- a/vendor/github.com/Shopify/sarama/sarama.go
+++ b/vendor/github.com/Shopify/sarama/sarama.go
@@ -39,6 +39,10 @@
 	| response-rate-for-broker-<broker-id>         | meter      | Responses/second received from a given broker                 |
 	| response-size                                | histogram  | Distribution of the response size in bytes for all brokers    |
 	| response-size-for-broker-<broker-id>         | histogram  | Distribution of the response size in bytes for a given broker |
+	| requests-in-flight                           | counter    | The current number of in-flight requests awaiting a response  |
+	|                                              |            | for all brokers                                               |
+	| requests-in-flight-for-broker-<broker-id>    | counter    | The current number of in-flight requests awaiting a response  |
+	|                                              |            | for a given broker                                            |
 	+----------------------------------------------+------------+---------------------------------------------------------------+
 
 Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics.
diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go
index 54c8b09..90504df 100644
--- a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go
+++ b/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go
@@ -24,6 +24,10 @@
 	return 0
 }
 
+func (r *SaslAuthenticateRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *SaslAuthenticateRequest) requiredVersion() KafkaVersion {
 	return V1_0_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go
index 0038c3f..3ef57b5 100644
--- a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go
+++ b/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go
@@ -39,6 +39,10 @@
 	return 0
 }
 
+func (r *SaslAuthenticateResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *SaslAuthenticateResponse) requiredVersion() KafkaVersion {
 	return V1_0_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go
index fe5ba05..74dc307 100644
--- a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go
+++ b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go
@@ -29,6 +29,10 @@
 	return r.Version
 }
 
+func (r *SaslHandshakeRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion {
 	return V0_10_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go
index ef290d4..69dfc31 100644
--- a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go
+++ b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go
@@ -33,6 +33,10 @@
 	return 0
 }
 
+func (r *SaslHandshakeResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion {
 	return V0_10_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/scram_formatter.go b/vendor/github.com/Shopify/sarama/scram_formatter.go
new file mode 100644
index 0000000..2af9e4a
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/scram_formatter.go
@@ -0,0 +1,78 @@
+package sarama
+
+import (
+	"crypto/hmac"
+	"crypto/sha256"
+	"crypto/sha512"
+	"hash"
+)
+
+// ScramFormatter implementation
+// @see: https://github.com/apache/kafka/blob/99b9b3e84f4e98c3f07714e1de6a139a004cbc5b/clients/src/main/java/org/apache/kafka/common/security/scram/internals/ScramFormatter.java#L93
+type scramFormatter struct {
+	mechanism ScramMechanismType
+}
+
+func (s scramFormatter) mac(key []byte) (hash.Hash, error) {
+	var m hash.Hash
+
+	switch s.mechanism {
+	case SCRAM_MECHANISM_SHA_256:
+		m = hmac.New(sha256.New, key)
+
+	case SCRAM_MECHANISM_SHA_512:
+		m = hmac.New(sha512.New, key)
+	default:
+		return nil, ErrUnknownScramMechanism
+	}
+
+	return m, nil
+}
+
+func (s scramFormatter) hmac(key []byte, extra []byte) ([]byte, error) {
+	mac, err := s.mac(key)
+	if err != nil {
+		return nil, err
+	}
+
+	if _, err := mac.Write(extra); err != nil {
+		return nil, err
+	}
+	return mac.Sum(nil), nil
+}
+
+func (s scramFormatter) xor(result []byte, second []byte) {
+	for i := 0; i < len(result); i++ {
+		result[i] = result[i] ^ second[i]
+	}
+}
+
+func (s scramFormatter) saltedPassword(password []byte, salt []byte, iterations int) ([]byte, error) {
+	mac, err := s.mac(password)
+	if err != nil {
+		return nil, err
+	}
+
+	if _, err := mac.Write(salt); err != nil {
+		return nil, err
+	}
+	if _, err := mac.Write([]byte{0, 0, 0, 1}); err != nil {
+		return nil, err
+	}
+
+	u1 := mac.Sum(nil)
+	prev := u1
+	result := u1
+
+	for i := 2; i <= iterations; i++ {
+		ui, err := s.hmac(password, prev)
+		if err != nil {
+			return nil, err
+		}
+
+		s.xor(result, ui)
+		prev = ui
+	}
+
+	return result, nil
+}
diff --git a/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go b/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go
new file mode 100644
index 0000000..161233f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go
@@ -0,0 +1,124 @@
+package sarama
+
+type topicPartitionAssignment struct {
+	Topic     string
+	Partition int32
+}
+
+type StickyAssignorUserData interface {
+	partitions() []topicPartitionAssignment
+	hasGeneration() bool
+	generation() int
+}
+
+// StickyAssignorUserDataV0 holds topic partition information for an assignment
+type StickyAssignorUserDataV0 struct {
+	Topics map[string][]int32
+
+	topicPartitions []topicPartitionAssignment
+}
+
+func (m *StickyAssignorUserDataV0) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(m.Topics)); err != nil {
+		return err
+	}
+
+	for topic, partitions := range m.Topics {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putInt32Array(partitions); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (m *StickyAssignorUserDataV0) decode(pd packetDecoder) (err error) {
+	var topicLen int
+	if topicLen, err = pd.getArrayLength(); err != nil {
+		return
+	}
+
+	m.Topics = make(map[string][]int32, topicLen)
+	for i := 0; i < topicLen; i++ {
+		var topic string
+		if topic, err = pd.getString(); err != nil {
+			return
+		}
+		if m.Topics[topic], err = pd.getInt32Array(); err != nil {
+			return
+		}
+	}
+	m.topicPartitions = populateTopicPartitions(m.Topics)
+	return nil
+}
+
+func (m *StickyAssignorUserDataV0) partitions() []topicPartitionAssignment { return m.topicPartitions }
+func (m *StickyAssignorUserDataV0) hasGeneration() bool                    { return false }
+func (m *StickyAssignorUserDataV0) generation() int                        { return defaultGeneration }
+
+// StickyAssignorUserDataV1 holds topic partition information for an assignment
+type StickyAssignorUserDataV1 struct {
+	Topics     map[string][]int32
+	Generation int32
+
+	topicPartitions []topicPartitionAssignment
+}
+
+func (m *StickyAssignorUserDataV1) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(m.Topics)); err != nil {
+		return err
+	}
+
+	for topic, partitions := range m.Topics {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putInt32Array(partitions); err != nil {
+			return err
+		}
+	}
+
+	pe.putInt32(m.Generation)
+	return nil
+}
+
+func (m *StickyAssignorUserDataV1) decode(pd packetDecoder) (err error) {
+	var topicLen int
+	if topicLen, err = pd.getArrayLength(); err != nil {
+		return
+	}
+
+	m.Topics = make(map[string][]int32, topicLen)
+	for i := 0; i < topicLen; i++ {
+		var topic string
+		if topic, err = pd.getString(); err != nil {
+			return
+		}
+		if m.Topics[topic], err = pd.getInt32Array(); err != nil {
+			return
+		}
+	}
+
+	m.Generation, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+	m.topicPartitions = populateTopicPartitions(m.Topics)
+	return nil
+}
+
+func (m *StickyAssignorUserDataV1) partitions() []topicPartitionAssignment { return m.topicPartitions }
+func (m *StickyAssignorUserDataV1) hasGeneration() bool                    { return true }
+func (m *StickyAssignorUserDataV1) generation() int                        { return int(m.Generation) }
+
+func populateTopicPartitions(topics map[string][]int32) []topicPartitionAssignment {
+	topicPartitions := make([]topicPartitionAssignment, 0)
+	for topic, partitions := range topics {
+		for _, partition := range partitions {
+			topicPartitions = append(topicPartitions, topicPartitionAssignment{Topic: topic, Partition: partition})
+		}
+	}
+	return topicPartitions
+}
diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/Shopify/sarama/sync_group_request.go
index fe20708..ac6ecb1 100644
--- a/vendor/github.com/Shopify/sarama/sync_group_request.go
+++ b/vendor/github.com/Shopify/sarama/sync_group_request.go
@@ -77,6 +77,10 @@
 	return 0
 }
 
+func (r *SyncGroupRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *SyncGroupRequest) requiredVersion() KafkaVersion {
 	return V0_9_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/Shopify/sarama/sync_group_response.go
index 194b382..af019c4 100644
--- a/vendor/github.com/Shopify/sarama/sync_group_response.go
+++ b/vendor/github.com/Shopify/sarama/sync_group_response.go
@@ -36,6 +36,10 @@
 	return 0
 }
 
+func (r *SyncGroupResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *SyncGroupResponse) requiredVersion() KafkaVersion {
 	return V0_9_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go
index 71e95b8..c4043a3 100644
--- a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go
+++ b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go
@@ -91,6 +91,10 @@
 	return 0
 }
 
+func (a *TxnOffsetCommitRequest) headerVersion() int16 {
+	return 1
+}
+
 func (a *TxnOffsetCommitRequest) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go
index 6c980f4..94d8029 100644
--- a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go
+++ b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go
@@ -78,6 +78,10 @@
 	return 0
 }
 
+func (a *TxnOffsetCommitResponse) headerVersion() int16 {
+	return 0
+}
+
 func (a *TxnOffsetCommitResponse) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go
index 7c815cd..1859d29 100644
--- a/vendor/github.com/Shopify/sarama/utils.go
+++ b/vendor/github.com/Shopify/sarama/utils.go
@@ -26,9 +26,7 @@
 
 func dupInt32Slice(input []int32) []int32 {
 	ret := make([]int32, 0, len(input))
-	for _, val := range input {
-		ret = append(ret, val)
-	}
+	ret = append(ret, input...)
 	return ret
 }
 
@@ -161,6 +159,11 @@
 	V2_1_0_0  = newKafkaVersion(2, 1, 0, 0)
 	V2_2_0_0  = newKafkaVersion(2, 2, 0, 0)
 	V2_3_0_0  = newKafkaVersion(2, 3, 0, 0)
+	V2_4_0_0  = newKafkaVersion(2, 4, 0, 0)
+	V2_5_0_0  = newKafkaVersion(2, 5, 0, 0)
+	V2_6_0_0  = newKafkaVersion(2, 6, 0, 0)
+	V2_7_0_0  = newKafkaVersion(2, 7, 0, 0)
+	V2_8_0_0  = newKafkaVersion(2, 8, 0, 0)
 
 	SupportedVersions = []KafkaVersion{
 		V0_8_2_0,
@@ -185,15 +188,21 @@
 		V2_1_0_0,
 		V2_2_0_0,
 		V2_3_0_0,
+		V2_4_0_0,
+		V2_5_0_0,
+		V2_6_0_0,
+		V2_7_0_0,
+		V2_8_0_0,
 	}
-	MinVersion = V0_8_2_0
-	MaxVersion = V2_3_0_0
+	MinVersion     = V0_8_2_0
+	MaxVersion     = V2_8_0_0
+	DefaultVersion = V1_0_0_0
 )
 
-//ParseKafkaVersion parses and returns kafka version or error from a string
+// ParseKafkaVersion parses and returns kafka version or error from a string
 func ParseKafkaVersion(s string) (KafkaVersion, error) {
 	if len(s) < 5 {
-		return MinVersion, fmt.Errorf("invalid version `%s`", s)
+		return DefaultVersion, fmt.Errorf("invalid version `%s`", s)
 	}
 	var major, minor, veryMinor, patch uint
 	var err error
@@ -203,7 +212,7 @@
 		err = scanKafkaVersion(s, `^\d+\.\d+\.\d+$`, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor})
 	}
 	if err != nil {
-		return MinVersion, err
+		return DefaultVersion, err
 	}
 	return newKafkaVersion(major, minor, veryMinor, patch), nil
 }
diff --git a/vendor/github.com/Shopify/sarama/zstd.go b/vendor/github.com/Shopify/sarama/zstd.go
new file mode 100644
index 0000000..e23bfc4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/zstd.go
@@ -0,0 +1,18 @@
+package sarama
+
+import (
+	"github.com/klauspost/compress/zstd"
+)
+
+var (
+	zstdDec, _ = zstd.NewReader(nil)
+	zstdEnc, _ = zstd.NewWriter(nil, zstd.WithZeroFrames(true))
+)
+
+func zstdDecompress(dst, src []byte) ([]byte, error) {
+	return zstdDec.DecodeAll(src, dst)
+}
+
+func zstdCompress(dst, src []byte) ([]byte, error) {
+	return zstdEnc.EncodeAll(src, dst), nil
+}
diff --git a/vendor/github.com/Shopify/sarama/zstd_cgo.go b/vendor/github.com/Shopify/sarama/zstd_cgo.go
deleted file mode 100644
index f5ccb31..0000000
--- a/vendor/github.com/Shopify/sarama/zstd_cgo.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// +build cgo
-
-package sarama
-
-import "github.com/DataDog/zstd"
-
-func zstdDecompress(dst, src []byte) ([]byte, error) {
-	return zstd.Decompress(dst, src)
-}
-
-func zstdCompressLevel(dst, src []byte, level int) ([]byte, error) {
-	return zstd.CompressLevel(dst, src, level)
-}
diff --git a/vendor/github.com/Shopify/sarama/zstd_fallback.go b/vendor/github.com/Shopify/sarama/zstd_fallback.go
deleted file mode 100644
index 381a56b..0000000
--- a/vendor/github.com/Shopify/sarama/zstd_fallback.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// +build !cgo
-
-package sarama
-
-import (
-	"errors"
-)
-
-var errZstdCgo = errors.New("zstd compression requires building with cgo enabled")
-
-func zstdDecompress(dst, src []byte) ([]byte, error) {
-	return nil, errZstdCgo
-}
-
-func zstdCompressLevel(dst, src []byte, level int) ([]byte, error) {
-	return nil, errZstdCgo
-}
diff --git a/vendor/github.com/buraksezer/consistent/.gitignore b/vendor/github.com/buraksezer/consistent/.gitignore
deleted file mode 100644
index a1338d6..0000000
--- a/vendor/github.com/buraksezer/consistent/.gitignore
+++ /dev/null
@@ -1,14 +0,0 @@
-# Binaries for programs and plugins
-*.exe
-*.dll
-*.so
-*.dylib
-
-# Test binary, build with `go test -c`
-*.test
-
-# Output of the go coverage tool, specifically when used with LiteIDE
-*.out
-
-# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
-.glide/
diff --git a/vendor/github.com/buraksezer/consistent/.travis.yml b/vendor/github.com/buraksezer/consistent/.travis.yml
deleted file mode 100644
index 4f2ee4d..0000000
--- a/vendor/github.com/buraksezer/consistent/.travis.yml
+++ /dev/null
@@ -1 +0,0 @@
-language: go
diff --git a/vendor/github.com/buraksezer/consistent/LICENSE b/vendor/github.com/buraksezer/consistent/LICENSE
deleted file mode 100644
index e470334..0000000
--- a/vendor/github.com/buraksezer/consistent/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2018 Burak Sezer
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/buraksezer/consistent/README.md b/vendor/github.com/buraksezer/consistent/README.md
deleted file mode 100644
index bde53d1..0000000
--- a/vendor/github.com/buraksezer/consistent/README.md
+++ /dev/null
@@ -1,235 +0,0 @@
-consistent
-==========
-[![GoDoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/buraksezer/consistent) [![Build Status](https://travis-ci.org/buraksezer/consistent.svg?branch=master)](https://travis-ci.org/buraksezer/consistent) [![Coverage](http://gocover.io/_badge/github.com/buraksezer/consistent)](http://gocover.io/github.com/buraksezer/consistent) [![Go Report Card](https://goreportcard.com/badge/github.com/buraksezer/consistent)](https://goreportcard.com/report/github.com/buraksezer/consistent) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Mentioned in Awesome Go](https://awesome.re/mentioned-badge.svg)](https://github.com/avelino/awesome-go)  
-
-
-This library provides a consistent hashing function which simultaneously achieves both uniformity and consistency. 
-
-For detailed information about the concept, you should take a look at the following resources:
-
-* [Consistent Hashing with Bounded Loads on Google Research Blog](https://research.googleblog.com/2017/04/consistent-hashing-with-bounded-loads.html)
-* [Improving load balancing with a new consistent-hashing algorithm on Vimeo Engineering Blog](https://medium.com/vimeo-engineering-blog/improving-load-balancing-with-a-new-consistent-hashing-algorithm-9f1bd75709ed)
-* [Consistent Hashing with Bounded Loads paper on arXiv](https://arxiv.org/abs/1608.01350)
-
-Table of Content
-----------------
-
-- [Overview](#overview)
-- [Install](#install)
-- [Configuration](#configuration)
-- [Usage](#usage)
-- [Benchmarks](#benchmarks)
-- [Examples](#examples)
-
-Overview
---------
-
-In this package's context, the keys are distributed among partitions and partitions are distributed among members as well. 
-
-When you create a new consistent instance or call `Add/Remove`:
-
-* The member's name is hashed and inserted into the hash ring,
-* Average load is calculated by the algorithm defined in the paper,
-* Partitions are distributed among members by hashing partition IDs and none of them exceed the average load.
-
-Average load cannot be exceeded. So if all members are loaded at the maximum while trying to add a new member, it panics.
-
-When you want to locate a key by calling `LocateKey`:
-
-* The key(byte slice) is hashed,
-* The result of the hash is mod by the number of partitions,
-* The result of this modulo - `MOD(hash result, partition count)` - is the partition in which the key will be located,
-* Owner of the partition is already determined before calling `LocateKey`. So it returns the partition owner immediately.
-
-No memory is allocated by `consistent` except hashing when you want to locate a key.
-
-Note that the number of partitions cannot be changed after creation. 
-
-Install
--------
-
-With a correctly configured Go environment:
-
-```
-go get github.com/buraksezer/consistent
-```
-
-You will find some useful usage samples in [examples](https://github.com/buraksezer/consistent/tree/master/_examples) folder.
-
-Configuration
--------------
-
-```go
-type Config struct {
-	// Hasher is responsible for generating unsigned, 64 bit hash of provided byte slice.
-	Hasher Hasher
-
-	// Keys are distributed among partitions. Prime numbers are good to
-	// distribute keys uniformly. Select a big PartitionCount if you have
-	// too many keys.
-	PartitionCount int
-
-	// Members are replicated on consistent hash ring. This number controls
-	// the number each member is replicated on the ring.
-	ReplicationFactor int
-
-	// Load is used to calculate average load. See the code, the paper and Google's 
-	// blog post to learn about it.
-	Load float64
-}
-```
-
-Any hash algorithm can be used as hasher which implements Hasher interface. Please take a look at the *Sample* section for an example.
-
-Usage
------
-
-`LocateKey` function finds a member in the cluster for your key:
-```go
-// With a properly configured and initialized consistent instance
-key := []byte("my-key")
-member := c.LocateKey(key)
-```
-It returns a thread-safe copy of the member you added before.
-
-The second most frequently used function is `GetClosestN`. 
-
-```go
-// With a properly configured and initialized consistent instance
-
-key := []byte("my-key")
-members, err := c.GetClosestN(key, 2)
-```
-
-This may be useful to find backup nodes to store your key.
-
-Benchmarks
-----------
-On an early 2015 Macbook:
-
-```
-BenchmarkAddRemove-4     	  100000	     22006 ns/op
-BenchmarkLocateKey-4     	 5000000	       252 ns/op
-BenchmarkGetClosestN-4   	  500000	      2974 ns/op
-```
-
-Examples
---------
-
-The most basic use of consistent package should be like this. For detailed list of functions, [visit godoc.org.](https://godoc.org/github.com/buraksezer/consistent)
-More sample code can be found under [_examples](https://github.com/buraksezer/consistent/tree/master/_examples).
-
-```go
-package main
-
-import (
-	"fmt"
-
-	"github.com/buraksezer/consistent"
-	"github.com/cespare/xxhash"
-)
-
-// In your code, you probably have a custom data type 
-// for your cluster members. Just add a String function to implement 
-// consistent.Member interface.
-type myMember string
-
-func (m myMember) String() string {
-	return string(m)
-}
-
-// consistent package doesn't provide a default hashing function. 
-// You should provide a proper one to distribute keys/members uniformly.
-type hasher struct{}
-
-func (h hasher) Sum64(data []byte) uint64 {
-	// you should use a proper hash function for uniformity.
-	return xxhash.Sum64(data)
-}
-
-func main() {
-	// Create a new consistent instance
-	cfg := consistent.Config{
-		PartitionCount:    7,
-		ReplicationFactor: 20,
-		Load:              1.25,
-		Hasher:            hasher{},
-	}
-	c := consistent.New(nil, cfg)
-
-	// Add some members to the consistent hash table.
-	// Add function calculates average load and distributes partitions over members
-	node1 := myMember("node1.olric.com")
-	c.Add(node1)
-
-	node2 := myMember("node2.olric.com")
-	c.Add(node2)
-
-	key := []byte("my-key")
-	// calculates partition id for the given key
-	// partID := hash(key) % partitionCount
-	// the partitions are already distributed among members by Add function.
-	owner := c.LocateKey(key)
-	fmt.Println(owner.String())
-	// Prints node2.olric.com
-}
-```
-
-Another useful example is `_examples/relocation_percentage.go`. It creates a `consistent` object with 8 members and distributes partitions among them. Then adds 9th member, 
-here is the result with a proper configuration and hash function:
-
-```
-bloom:consistent burak$ go run _examples/relocation_percentage.go
-partID: 218 moved to node2.olric from node0.olric
-partID: 173 moved to node9.olric from node3.olric
-partID: 225 moved to node7.olric from node0.olric
-partID:  85 moved to node9.olric from node7.olric
-partID: 220 moved to node5.olric from node0.olric
-partID:  33 moved to node9.olric from node5.olric
-partID: 254 moved to node9.olric from node4.olric
-partID:  71 moved to node9.olric from node3.olric
-partID: 236 moved to node9.olric from node2.olric
-partID: 118 moved to node9.olric from node3.olric
-partID: 233 moved to node3.olric from node0.olric
-partID:  50 moved to node9.olric from node4.olric
-partID: 252 moved to node9.olric from node2.olric
-partID: 121 moved to node9.olric from node2.olric
-partID: 259 moved to node9.olric from node4.olric
-partID:  92 moved to node9.olric from node7.olric
-partID: 152 moved to node9.olric from node3.olric
-partID: 105 moved to node9.olric from node2.olric
-
-6% of the partitions are relocated
-```
-
-Moved partition count is highly dependent on your configuration and quailty of hash function. You should modify the configuration to find an optimum set of configurations
-for your system.
-
-`_examples/load_distribution.go` is also useful to understand load distribution. It creates a `consistent` object with 8 members and locates 1M key. It also calculates average 
-load which cannot be exceeded by any member. Here is the result:
-
-```
-Maximum key count for a member should be around this:  147602
-member: node2.olric, key count: 100362
-member: node5.olric, key count: 99448
-member: node0.olric, key count: 147735
-member: node3.olric, key count: 103455
-member: node6.olric, key count: 147069
-member: node1.olric, key count: 121566
-member: node4.olric, key count: 147932
-member: node7.olric, key count: 132433
-```
-
-Average load can be calculated by using the following formula:
-
-```
-load := (consistent.AverageLoad() * float64(keyCount)) / float64(config.PartitionCount)
-```
-
-Contributions
--------------
-Please don't hesitate to fork the project and send a pull request or just e-mail me to ask questions and share ideas.
-
-License
--------
-MIT License, - see LICENSE for more details.
diff --git a/vendor/github.com/buraksezer/consistent/consistent.go b/vendor/github.com/buraksezer/consistent/consistent.go
deleted file mode 100644
index a1446d6..0000000
--- a/vendor/github.com/buraksezer/consistent/consistent.go
+++ /dev/null
@@ -1,362 +0,0 @@
-// Copyright (c) 2018 Burak Sezer
-// All rights reserved.
-//
-// This code is licensed under the MIT License.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files(the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions :
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// Package consistent provides a consistent hashing function with bounded loads.
-// For more information about the underlying algorithm, please take a look at
-// https://research.googleblog.com/2017/04/consistent-hashing-with-bounded-loads.html
-//
-// Example Use:
-// 	cfg := consistent.Config{
-// 		PartitionCount:    71,
-// 		ReplicationFactor: 20,
-// 		Load:              1.25,
-// 		Hasher:            hasher{},
-//	}
-//
-//      // Create a new consistent object
-//      // You may call this with a list of members
-//      // instead of adding them one by one.
-//	c := consistent.New(members, cfg)
-//
-//      // myMember struct just needs to implement a String method.
-//      // New/Add/Remove distributes partitions among members using the algorithm
-//      // defined on Google Research Blog.
-//	c.Add(myMember)
-//
-//	key := []byte("my-key")
-//      // LocateKey hashes the key and calculates partition ID with
-//      // this modulo operation: MOD(hash result, partition count)
-//      // The owner of the partition is already calculated by New/Add/Remove.
-//      // LocateKey just returns the member which's responsible for the key.
-//	member := c.LocateKey(key)
-//
-package consistent
-
-import (
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"math"
-	"sort"
-	"sync"
-)
-
-var (
-	//ErrInsufficientMemberCount represents an error which means there are not enough members to complete the task.
-	ErrInsufficientMemberCount = errors.New("insufficient member count")
-
-	// ErrMemberNotFound represents an error which means requested member could not be found in consistent hash ring.
-	ErrMemberNotFound = errors.New("member could not be found in ring")
-)
-
-// Hasher is responsible for generating unsigned, 64 bit hash of provided byte slice.
-// Hasher should minimize collisions (generating same hash for different byte slice)
-// and while performance is also important fast functions are preferable (i.e.
-// you can use FarmHash family).
-type Hasher interface {
-	Sum64([]byte) uint64
-}
-
-// Member interface represents a member in consistent hash ring.
-type Member interface {
-	String() string
-}
-
-// Config represents a structure to control consistent package.
-type Config struct {
-	// Hasher is responsible for generating unsigned, 64 bit hash of provided byte slice.
-	Hasher Hasher
-
-	// Keys are distributed among partitions. Prime numbers are good to
-	// distribute keys uniformly. Select a big PartitionCount if you have
-	// too many keys.
-	PartitionCount int
-
-	// Members are replicated on consistent hash ring. This number means that a member
-	// how many times replicated on the ring.
-	ReplicationFactor int
-
-	// Load is used to calculate average load. See the code, the paper and Google's blog post to learn about it.
-	Load float64
-}
-
-// Consistent holds the information about the members of the consistent hash circle.
-type Consistent struct {
-	mu sync.RWMutex
-
-	config         Config
-	hasher         Hasher
-	sortedSet      []uint64
-	partitionCount uint64
-	loads          map[string]float64
-	members        map[string]*Member
-	partitions     map[int]*Member
-	ring           map[uint64]*Member
-}
-
-// New creates and returns a new Consistent object.
-func New(members []Member, config Config) *Consistent {
-	c := &Consistent{
-		config:         config,
-		members:        make(map[string]*Member),
-		partitionCount: uint64(config.PartitionCount),
-		ring:           make(map[uint64]*Member),
-	}
-	if config.Hasher == nil {
-		panic("Hasher cannot be nil")
-	}
-	// TODO: Check configuration here
-	c.hasher = config.Hasher
-	for _, member := range members {
-		c.add(member)
-	}
-	if members != nil {
-		c.distributePartitions()
-	}
-	return c
-}
-
-// GetMembers returns a thread-safe copy of members.
-func (c *Consistent) GetMembers() []Member {
-	c.mu.RLock()
-	defer c.mu.RUnlock()
-
-	// Create a thread-safe copy of member list.
-	members := make([]Member, 0, len(c.members))
-	for _, member := range c.members {
-		members = append(members, *member)
-	}
-	return members
-}
-
-// AverageLoad exposes the current average load.
-func (c *Consistent) AverageLoad() float64 {
-	avgLoad := float64(c.partitionCount/uint64(len(c.members))) * c.config.Load
-	return math.Ceil(avgLoad)
-}
-
-func (c *Consistent) distributeWithLoad(partID, idx int, partitions map[int]*Member, loads map[string]float64) {
-	avgLoad := c.AverageLoad()
-	var count int
-	for {
-		count++
-		if count >= len(c.sortedSet) {
-			// User needs to decrease partition count, increase member count or increase load factor.
-			panic("not enough room to distribute partitions")
-		}
-		i := c.sortedSet[idx]
-		member := *c.ring[i]
-		load := loads[member.String()]
-		if load+1 <= avgLoad {
-			partitions[partID] = &member
-			loads[member.String()]++
-			return
-		}
-		idx++
-		if idx >= len(c.sortedSet) {
-			idx = 0
-		}
-	}
-}
-
-func (c *Consistent) distributePartitions() {
-	loads := make(map[string]float64)
-	partitions := make(map[int]*Member)
-
-	bs := make([]byte, 8)
-	for partID := uint64(0); partID < c.partitionCount; partID++ {
-		binary.LittleEndian.PutUint64(bs, partID)
-		key := c.hasher.Sum64(bs)
-		idx := sort.Search(len(c.sortedSet), func(i int) bool {
-			return c.sortedSet[i] >= key
-		})
-		if idx >= len(c.sortedSet) {
-			idx = 0
-		}
-		c.distributeWithLoad(int(partID), idx, partitions, loads)
-	}
-	c.partitions = partitions
-	c.loads = loads
-}
-
-func (c *Consistent) add(member Member) {
-	for i := 0; i < c.config.ReplicationFactor; i++ {
-		key := []byte(fmt.Sprintf("%s%d", member.String(), i))
-		h := c.hasher.Sum64(key)
-		c.ring[h] = &member
-		c.sortedSet = append(c.sortedSet, h)
-	}
-	// sort hashes ascendingly
-	sort.Slice(c.sortedSet, func(i int, j int) bool {
-		return c.sortedSet[i] < c.sortedSet[j]
-	})
-	// Storing member at this map is useful to find backup members of a partition.
-	c.members[member.String()] = &member
-}
-
-// Add adds a new member to the consistent hash circle.
-func (c *Consistent) Add(member Member) {
-	c.mu.Lock()
-	defer c.mu.Unlock()
-
-	if _, ok := c.members[member.String()]; ok {
-		// We already have this member. Quit immediately.
-		return
-	}
-	c.add(member)
-	c.distributePartitions()
-}
-
-func (c *Consistent) delSlice(val uint64) {
-	for i := 0; i < len(c.sortedSet); i++ {
-		if c.sortedSet[i] == val {
-			c.sortedSet = append(c.sortedSet[:i], c.sortedSet[i+1:]...)
-			break
-		}
-	}
-}
-
-// Remove removes a member from the consistent hash circle.
-func (c *Consistent) Remove(name string) {
-	c.mu.Lock()
-	defer c.mu.Unlock()
-
-	if _, ok := c.members[name]; !ok {
-		// There is no member with that name. Quit immediately.
-		return
-	}
-
-	for i := 0; i < c.config.ReplicationFactor; i++ {
-		key := []byte(fmt.Sprintf("%s%d", name, i))
-		h := c.hasher.Sum64(key)
-		delete(c.ring, h)
-		c.delSlice(h)
-	}
-	delete(c.members, name)
-	if len(c.members) == 0 {
-		// consistent hash ring is empty now. Reset the partition table.
-		c.partitions = make(map[int]*Member)
-		return
-	}
-	c.distributePartitions()
-}
-
-// LoadDistribution exposes load distribution of members.
-func (c *Consistent) LoadDistribution() map[string]float64 {
-	c.mu.RLock()
-	defer c.mu.RUnlock()
-
-	// Create a thread-safe copy
-	res := make(map[string]float64)
-	for member, load := range c.loads {
-		res[member] = load
-	}
-	return res
-}
-
-// FindPartitionID returns partition id for given key.
-func (c *Consistent) FindPartitionID(key []byte) int {
-	hkey := c.hasher.Sum64(key)
-	return int(hkey % c.partitionCount)
-}
-
-// GetPartitionOwner returns the owner of the given partition.
-func (c *Consistent) GetPartitionOwner(partID int) Member {
-	c.mu.RLock()
-	defer c.mu.RUnlock()
-
-	member, ok := c.partitions[partID]
-	if !ok {
-		return nil
-	}
-	// Create a thread-safe copy of member and return it.
-	return *member
-}
-
-// LocateKey finds a home for given key
-func (c *Consistent) LocateKey(key []byte) Member {
-	partID := c.FindPartitionID(key)
-	return c.GetPartitionOwner(partID)
-}
-
-func (c *Consistent) getClosestN(partID, count int) ([]Member, error) {
-	c.mu.RLock()
-	defer c.mu.RUnlock()
-
-	res := []Member{}
-	if count > len(c.members) {
-		return res, ErrInsufficientMemberCount
-	}
-
-	var ownerKey uint64
-	owner := c.GetPartitionOwner(partID)
-	// Hash and sort all the names.
-	keys := []uint64{}
-	kmems := make(map[uint64]*Member)
-	for name, member := range c.members {
-		key := c.hasher.Sum64([]byte(name))
-		if name == owner.String() {
-			ownerKey = key
-		}
-		keys = append(keys, key)
-		kmems[key] = member
-	}
-	sort.Slice(keys, func(i, j int) bool {
-		return keys[i] < keys[j]
-	})
-
-	// Find the key owner
-	idx := 0
-	for idx < len(keys) {
-		if keys[idx] == ownerKey {
-			key := keys[idx]
-			res = append(res, *kmems[key])
-			break
-		}
-		idx++
-	}
-
-	// Find the closest(replica owners) members.
-	for len(res) < count {
-		idx++
-		if idx >= len(keys) {
-			idx = 0
-		}
-		key := keys[idx]
-		res = append(res, *kmems[key])
-	}
-	return res, nil
-}
-
-// GetClosestN returns the closest N member to a key in the hash ring.
-// This may be useful to find members for replication.
-func (c *Consistent) GetClosestN(key []byte, count int) ([]Member, error) {
-	partID := c.FindPartitionID(key)
-	return c.getClosestN(partID, count)
-}
-
-// GetClosestNForPartition returns the closest N member for given partition.
-// This may be useful to find members for replication.
-func (c *Consistent) GetClosestNForPartition(partID, count int) ([]Member, error) {
-	return c.getClosestN(partID, count)
-}
diff --git a/vendor/github.com/cespare/xxhash/README.md b/vendor/github.com/cespare/xxhash/README.md
deleted file mode 100644
index 0982fd2..0000000
--- a/vendor/github.com/cespare/xxhash/README.md
+++ /dev/null
@@ -1,50 +0,0 @@
-# xxhash
-
-[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
-
-xxhash is a Go implementation of the 64-bit
-[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
-high-quality hashing algorithm that is much faster than anything in the Go
-standard library.
-
-The API is very small, taking its cue from the other hashing packages in the
-standard library:
-
-    $ go doc github.com/cespare/xxhash                                                                                                                                                                                              !
-    package xxhash // import "github.com/cespare/xxhash"
-
-    Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
-    at http://cyan4973.github.io/xxHash/.
-
-    func New() hash.Hash64
-    func Sum64(b []byte) uint64
-    func Sum64String(s string) uint64
-
-This implementation provides a fast pure-Go implementation and an even faster
-assembly implementation for amd64.
-
-## Benchmarks
-
-Here are some quick benchmarks comparing the pure-Go and assembly
-implementations of Sum64 against another popular Go XXH64 implementation,
-[github.com/OneOfOne/xxhash](https://github.com/OneOfOne/xxhash):
-
-| input size | OneOfOne | cespare (purego) | cespare |
-| --- | --- | --- | --- |
-| 5 B   |  416 MB/s | 720 MB/s |  872 MB/s  |
-| 100 B | 3980 MB/s | 5013 MB/s | 5252 MB/s  |
-| 4 KB  | 12727 MB/s | 12999 MB/s | 13026 MB/s |
-| 10 MB | 9879 MB/s | 10775 MB/s | 10913 MB/s  |
-
-These numbers were generated with:
-
-```
-$ go test -benchtime 10s -bench '/OneOfOne,'
-$ go test -tags purego -benchtime 10s -bench '/xxhash,'
-$ go test -benchtime 10s -bench '/xxhash,'
-```
-
-## Projects using this package
-
-- [InfluxDB](https://github.com/influxdata/influxdb)
-- [Prometheus](https://github.com/prometheus/prometheus)
diff --git a/vendor/github.com/cespare/xxhash/go.mod b/vendor/github.com/cespare/xxhash/go.mod
deleted file mode 100644
index 10605a6..0000000
--- a/vendor/github.com/cespare/xxhash/go.mod
+++ /dev/null
@@ -1,6 +0,0 @@
-module github.com/cespare/xxhash
-
-require (
-	github.com/OneOfOne/xxhash v1.2.2
-	github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72
-)
diff --git a/vendor/github.com/cespare/xxhash/go.sum b/vendor/github.com/cespare/xxhash/go.sum
deleted file mode 100644
index f6b5542..0000000
--- a/vendor/github.com/cespare/xxhash/go.sum
+++ /dev/null
@@ -1,4 +0,0 @@
-github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
diff --git a/vendor/github.com/cespare/xxhash/rotate.go b/vendor/github.com/cespare/xxhash/rotate.go
deleted file mode 100644
index f3eac5e..0000000
--- a/vendor/github.com/cespare/xxhash/rotate.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// +build !go1.9
-
-package xxhash
-
-// TODO(caleb): After Go 1.10 comes out, remove this fallback code.
-
-func rol1(x uint64) uint64  { return (x << 1) | (x >> (64 - 1)) }
-func rol7(x uint64) uint64  { return (x << 7) | (x >> (64 - 7)) }
-func rol11(x uint64) uint64 { return (x << 11) | (x >> (64 - 11)) }
-func rol12(x uint64) uint64 { return (x << 12) | (x >> (64 - 12)) }
-func rol18(x uint64) uint64 { return (x << 18) | (x >> (64 - 18)) }
-func rol23(x uint64) uint64 { return (x << 23) | (x >> (64 - 23)) }
-func rol27(x uint64) uint64 { return (x << 27) | (x >> (64 - 27)) }
-func rol31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) }
diff --git a/vendor/github.com/cespare/xxhash/rotate19.go b/vendor/github.com/cespare/xxhash/rotate19.go
deleted file mode 100644
index b99612b..0000000
--- a/vendor/github.com/cespare/xxhash/rotate19.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// +build go1.9
-
-package xxhash
-
-import "math/bits"
-
-func rol1(x uint64) uint64  { return bits.RotateLeft64(x, 1) }
-func rol7(x uint64) uint64  { return bits.RotateLeft64(x, 7) }
-func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
-func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
-func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
-func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
-func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
-func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
diff --git a/vendor/github.com/cespare/xxhash/xxhash.go b/vendor/github.com/cespare/xxhash/xxhash.go
deleted file mode 100644
index f896bd2..0000000
--- a/vendor/github.com/cespare/xxhash/xxhash.go
+++ /dev/null
@@ -1,168 +0,0 @@
-// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
-// at http://cyan4973.github.io/xxHash/.
-package xxhash
-
-import (
-	"encoding/binary"
-	"hash"
-)
-
-const (
-	prime1 uint64 = 11400714785074694791
-	prime2 uint64 = 14029467366897019727
-	prime3 uint64 = 1609587929392839161
-	prime4 uint64 = 9650029242287828579
-	prime5 uint64 = 2870177450012600261
-)
-
-// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
-// possible in the Go code is worth a small (but measurable) performance boost
-// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
-// convenience in the Go code in a few places where we need to intentionally
-// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
-// result overflows a uint64).
-var (
-	prime1v = prime1
-	prime2v = prime2
-	prime3v = prime3
-	prime4v = prime4
-	prime5v = prime5
-)
-
-type xxh struct {
-	v1    uint64
-	v2    uint64
-	v3    uint64
-	v4    uint64
-	total int
-	mem   [32]byte
-	n     int // how much of mem is used
-}
-
-// New creates a new hash.Hash64 that implements the 64-bit xxHash algorithm.
-func New() hash.Hash64 {
-	var x xxh
-	x.Reset()
-	return &x
-}
-
-func (x *xxh) Reset() {
-	x.n = 0
-	x.total = 0
-	x.v1 = prime1v + prime2
-	x.v2 = prime2
-	x.v3 = 0
-	x.v4 = -prime1v
-}
-
-func (x *xxh) Size() int      { return 8 }
-func (x *xxh) BlockSize() int { return 32 }
-
-// Write adds more data to x. It always returns len(b), nil.
-func (x *xxh) Write(b []byte) (n int, err error) {
-	n = len(b)
-	x.total += len(b)
-
-	if x.n+len(b) < 32 {
-		// This new data doesn't even fill the current block.
-		copy(x.mem[x.n:], b)
-		x.n += len(b)
-		return
-	}
-
-	if x.n > 0 {
-		// Finish off the partial block.
-		copy(x.mem[x.n:], b)
-		x.v1 = round(x.v1, u64(x.mem[0:8]))
-		x.v2 = round(x.v2, u64(x.mem[8:16]))
-		x.v3 = round(x.v3, u64(x.mem[16:24]))
-		x.v4 = round(x.v4, u64(x.mem[24:32]))
-		b = b[32-x.n:]
-		x.n = 0
-	}
-
-	if len(b) >= 32 {
-		// One or more full blocks left.
-		b = writeBlocks(x, b)
-	}
-
-	// Store any remaining partial block.
-	copy(x.mem[:], b)
-	x.n = len(b)
-
-	return
-}
-
-func (x *xxh) Sum(b []byte) []byte {
-	s := x.Sum64()
-	return append(
-		b,
-		byte(s>>56),
-		byte(s>>48),
-		byte(s>>40),
-		byte(s>>32),
-		byte(s>>24),
-		byte(s>>16),
-		byte(s>>8),
-		byte(s),
-	)
-}
-
-func (x *xxh) Sum64() uint64 {
-	var h uint64
-
-	if x.total >= 32 {
-		v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4
-		h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
-		h = mergeRound(h, v1)
-		h = mergeRound(h, v2)
-		h = mergeRound(h, v3)
-		h = mergeRound(h, v4)
-	} else {
-		h = x.v3 + prime5
-	}
-
-	h += uint64(x.total)
-
-	i, end := 0, x.n
-	for ; i+8 <= end; i += 8 {
-		k1 := round(0, u64(x.mem[i:i+8]))
-		h ^= k1
-		h = rol27(h)*prime1 + prime4
-	}
-	if i+4 <= end {
-		h ^= uint64(u32(x.mem[i:i+4])) * prime1
-		h = rol23(h)*prime2 + prime3
-		i += 4
-	}
-	for i < end {
-		h ^= uint64(x.mem[i]) * prime5
-		h = rol11(h) * prime1
-		i++
-	}
-
-	h ^= h >> 33
-	h *= prime2
-	h ^= h >> 29
-	h *= prime3
-	h ^= h >> 32
-
-	return h
-}
-
-func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
-func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
-
-func round(acc, input uint64) uint64 {
-	acc += input * prime2
-	acc = rol31(acc)
-	acc *= prime1
-	return acc
-}
-
-func mergeRound(acc, val uint64) uint64 {
-	val = round(0, val)
-	acc ^= val
-	acc = acc*prime1 + prime4
-	return acc
-}
diff --git a/vendor/github.com/cespare/xxhash/xxhash_safe.go b/vendor/github.com/cespare/xxhash/xxhash_safe.go
deleted file mode 100644
index dfa15ab..0000000
--- a/vendor/github.com/cespare/xxhash/xxhash_safe.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// +build appengine
-
-// This file contains the safe implementations of otherwise unsafe-using code.
-
-package xxhash
-
-// Sum64String computes the 64-bit xxHash digest of s.
-func Sum64String(s string) uint64 {
-	return Sum64([]byte(s))
-}
diff --git a/vendor/github.com/cespare/xxhash/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/xxhash_unsafe.go
deleted file mode 100644
index d2b64e8..0000000
--- a/vendor/github.com/cespare/xxhash/xxhash_unsafe.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// +build !appengine
-
-// This file encapsulates usage of unsafe.
-// xxhash_safe.go contains the safe implementations.
-
-package xxhash
-
-import (
-	"reflect"
-	"unsafe"
-)
-
-// Sum64String computes the 64-bit xxHash digest of s.
-// It may be faster than Sum64([]byte(s)) by avoiding a copy.
-//
-// TODO(caleb): Consider removing this if an optimization is ever added to make
-// it unnecessary: https://golang.org/issue/2205.
-//
-// TODO(caleb): We still have a function call; we could instead write Go/asm
-// copies of Sum64 for strings to squeeze out a bit more speed.
-func Sum64String(s string) uint64 {
-	// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
-	// for some discussion about this unsafe conversion.
-	var b []byte
-	bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
-	bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
-	bh.Len = len(s)
-	bh.Cap = len(s)
-	return Sum64(b)
-}
diff --git a/vendor/github.com/coreos/etcd/LICENSE b/vendor/github.com/coreos/etcd/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/coreos/etcd/NOTICE b/vendor/github.com/coreos/etcd/NOTICE
new file mode 100644
index 0000000..b39ddfa
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/NOTICE
@@ -0,0 +1,5 @@
+CoreOS Project
+Copyright 2014 CoreOS, Inc
+
+This product includes software developed at CoreOS, Inc.
+(http://www.coreos.com/).
diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go
new file mode 100644
index 0000000..c5faf00
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go
@@ -0,0 +1,972 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: auth.proto
+
+package authpb
+
+import (
+	fmt "fmt"
+	io "io"
+	math "math"
+	math_bits "math/bits"
+
+	_ "github.com/gogo/protobuf/gogoproto"
+	proto "github.com/golang/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Permission_Type int32
+
+const (
+	READ      Permission_Type = 0
+	WRITE     Permission_Type = 1
+	READWRITE Permission_Type = 2
+)
+
+var Permission_Type_name = map[int32]string{
+	0: "READ",
+	1: "WRITE",
+	2: "READWRITE",
+}
+
+var Permission_Type_value = map[string]int32{
+	"READ":      0,
+	"WRITE":     1,
+	"READWRITE": 2,
+}
+
+func (x Permission_Type) String() string {
+	return proto.EnumName(Permission_Type_name, int32(x))
+}
+
+func (Permission_Type) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_8bbd6f3875b0e874, []int{1, 0}
+}
+
+// User is a single entry in the bucket authUsers
+type User struct {
+	Name                 []byte   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Password             []byte   `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+	Roles                []string `protobuf:"bytes,3,rep,name=roles,proto3" json:"roles,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *User) Reset()         { *m = User{} }
+func (m *User) String() string { return proto.CompactTextString(m) }
+func (*User) ProtoMessage()    {}
+func (*User) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8bbd6f3875b0e874, []int{0}
+}
+func (m *User) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_User.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *User) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_User.Merge(m, src)
+}
+func (m *User) XXX_Size() int {
+	return m.Size()
+}
+func (m *User) XXX_DiscardUnknown() {
+	xxx_messageInfo_User.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_User proto.InternalMessageInfo
+
+// Permission is a single entity
+type Permission struct {
+	PermType             Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"`
+	Key                  []byte          `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+	RangeEnd             []byte          `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *Permission) Reset()         { *m = Permission{} }
+func (m *Permission) String() string { return proto.CompactTextString(m) }
+func (*Permission) ProtoMessage()    {}
+func (*Permission) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8bbd6f3875b0e874, []int{1}
+}
+func (m *Permission) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Permission) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Permission.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Permission) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Permission.Merge(m, src)
+}
+func (m *Permission) XXX_Size() int {
+	return m.Size()
+}
+func (m *Permission) XXX_DiscardUnknown() {
+	xxx_messageInfo_Permission.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Permission proto.InternalMessageInfo
+
+// Role is a single entry in the bucket authRoles
+type Role struct {
+	Name                 []byte        `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	KeyPermission        []*Permission `protobuf:"bytes,2,rep,name=keyPermission,proto3" json:"keyPermission,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *Role) Reset()         { *m = Role{} }
+func (m *Role) String() string { return proto.CompactTextString(m) }
+func (*Role) ProtoMessage()    {}
+func (*Role) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8bbd6f3875b0e874, []int{2}
+}
+func (m *Role) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Role.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Role) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Role.Merge(m, src)
+}
+func (m *Role) XXX_Size() int {
+	return m.Size()
+}
+func (m *Role) XXX_DiscardUnknown() {
+	xxx_messageInfo_Role.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Role proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value)
+	proto.RegisterType((*User)(nil), "authpb.User")
+	proto.RegisterType((*Permission)(nil), "authpb.Permission")
+	proto.RegisterType((*Role)(nil), "authpb.Role")
+}
+
+func init() { proto.RegisterFile("auth.proto", fileDescriptor_8bbd6f3875b0e874) }
+
+var fileDescriptor_8bbd6f3875b0e874 = []byte{
+	// 288 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30,
+	0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78,
+	0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c,
+	0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d,
+	0x64, 0x43, 0xdc, 0xed, 0xfb, 0xbe, 0xff, 0x97, 0xe4, 0x97, 0x3f, 0x40, 0xfe, 0xb6, 0x7e, 0x4d,
+	0x1a, 0x29, 0xd6, 0x02, 0x0f, 0x7b, 0xdd, 0xcc, 0x27, 0xe3, 0x52, 0x94, 0x42, 0x47, 0x57, 0xbd,
+	0x32, 0xd3, 0xe8, 0x01, 0xdc, 0x27, 0xc5, 0x25, 0xc6, 0xe0, 0xd6, 0x79, 0xc5, 0x09, 0x0a, 0x51,
+	0x7c, 0xca, 0xb4, 0xc6, 0x13, 0xf0, 0x9a, 0x5c, 0xa9, 0x77, 0x21, 0x0b, 0x62, 0xeb, 0x7c, 0xef,
+	0xf1, 0x18, 0x06, 0x52, 0xac, 0xb8, 0x22, 0x4e, 0xe8, 0xc4, 0x3e, 0x33, 0x26, 0xfa, 0x44, 0x00,
+	0x8f, 0x5c, 0x56, 0x0b, 0xa5, 0x16, 0xa2, 0xc6, 0x33, 0xf0, 0x1a, 0x2e, 0xab, 0xac, 0x6d, 0xcc,
+	0xc5, 0x67, 0xd3, 0xf3, 0xc4, 0xd0, 0x24, 0x87, 0x56, 0xd2, 0x8f, 0xd9, 0xbe, 0x88, 0x03, 0x70,
+	0x96, 0xbc, 0xfd, 0x7d, 0xb0, 0x97, 0xf8, 0x02, 0x7c, 0x99, 0xd7, 0x25, 0x7f, 0xe1, 0x75, 0x41,
+	0x1c, 0x03, 0xa2, 0x83, 0xb4, 0x2e, 0xa2, 0x4b, 0x70, 0xf5, 0x31, 0x0f, 0x5c, 0x96, 0xde, 0xdc,
+	0x05, 0x16, 0xf6, 0x61, 0xf0, 0xcc, 0xee, 0xb3, 0x34, 0x40, 0x78, 0x04, 0x7e, 0x1f, 0x1a, 0x6b,
+	0x47, 0x19, 0xb8, 0x4c, 0xac, 0xf8, 0xd1, 0xcf, 0x5e, 0xc3, 0x68, 0xc9, 0xdb, 0x03, 0x16, 0xb1,
+	0x43, 0x27, 0x3e, 0x99, 0xe2, 0xff, 0xc0, 0xec, 0x6f, 0xf1, 0x96, 0x6c, 0x76, 0xd4, 0xda, 0xee,
+	0xa8, 0xb5, 0xe9, 0x28, 0xda, 0x76, 0x14, 0x7d, 0x75, 0x14, 0x7d, 0x7c, 0x53, 0x6b, 0x3e, 0xd4,
+	0x3b, 0x9e, 0xfd, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x76, 0x8d, 0x4f, 0x8f, 0x01, 0x00, 0x00,
+}
+
+func (m *User) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *User) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *User) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Roles) > 0 {
+		for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Roles[iNdEx])
+			copy(dAtA[i:], m.Roles[iNdEx])
+			i = encodeVarintAuth(dAtA, i, uint64(len(m.Roles[iNdEx])))
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if len(m.Password) > 0 {
+		i -= len(m.Password)
+		copy(dAtA[i:], m.Password)
+		i = encodeVarintAuth(dAtA, i, uint64(len(m.Password)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Permission) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Permission) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Permission) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.RangeEnd) > 0 {
+		i -= len(m.RangeEnd)
+		copy(dAtA[i:], m.RangeEnd)
+		i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	if len(m.Key) > 0 {
+		i -= len(m.Key)
+		copy(dAtA[i:], m.Key)
+		i = encodeVarintAuth(dAtA, i, uint64(len(m.Key)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.PermType != 0 {
+		i = encodeVarintAuth(dAtA, i, uint64(m.PermType))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Role) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Role) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.KeyPermission) > 0 {
+		for iNdEx := len(m.KeyPermission) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.KeyPermission[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintAuth(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintAuth(dAtA []byte, offset int, v uint64) int {
+	offset -= sovAuth(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *User) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovAuth(uint64(l))
+	}
+	l = len(m.Password)
+	if l > 0 {
+		n += 1 + l + sovAuth(uint64(l))
+	}
+	if len(m.Roles) > 0 {
+		for _, s := range m.Roles {
+			l = len(s)
+			n += 1 + l + sovAuth(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *Permission) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.PermType != 0 {
+		n += 1 + sovAuth(uint64(m.PermType))
+	}
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovAuth(uint64(l))
+	}
+	l = len(m.RangeEnd)
+	if l > 0 {
+		n += 1 + l + sovAuth(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *Role) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovAuth(uint64(l))
+	}
+	if len(m.KeyPermission) > 0 {
+		for _, e := range m.KeyPermission {
+			l = e.Size()
+			n += 1 + l + sovAuth(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func sovAuth(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozAuth(x uint64) (n int) {
+	return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *User) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowAuth
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: User: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: User: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthAuth
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
+			if m.Name == nil {
+				m.Name = []byte{}
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthAuth
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Password = append(m.Password[:0], dAtA[iNdEx:postIndex]...)
+			if m.Password == nil {
+				m.Password = []byte{}
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthAuth
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipAuth(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Permission) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowAuth
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Permission: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Permission: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PermType", wireType)
+			}
+			m.PermType = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.PermType |= Permission_Type(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthAuth
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+			if m.Key == nil {
+				m.Key = []byte{}
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthAuth
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+			if m.RangeEnd == nil {
+				m.RangeEnd = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipAuth(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Role) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowAuth
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Role: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthAuth
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
+			if m.Name == nil {
+				m.Name = []byte{}
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field KeyPermission", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthAuth
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.KeyPermission = append(m.KeyPermission, &Permission{})
+			if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipAuth(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipAuth(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowAuth
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthAuth
+			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthAuth
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowAuth
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipAuth(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthAuth
+				}
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowAuth   = fmt.Errorf("proto: integer overflow")
+)
diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.proto b/vendor/github.com/coreos/etcd/auth/authpb/auth.proto
new file mode 100644
index 0000000..001d334
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/auth/authpb/auth.proto
@@ -0,0 +1,37 @@
+syntax = "proto3";
+package authpb;
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+option (gogoproto.goproto_enum_prefix_all) = false;
+
+// User is a single entry in the bucket authUsers
+message User {
+  bytes name = 1;
+  bytes password = 2;
+  repeated string roles = 3;
+}
+
+// Permission is a single entity
+message Permission {
+  enum Type {
+    READ = 0;
+    WRITE = 1;
+    READWRITE = 2;
+  }
+  Type permType = 1;
+
+  bytes key = 2;
+  bytes range_end = 3;
+}
+
+// Role is a single entry in the bucket authRoles
+message Role {
+  bytes name = 1;
+
+  repeated Permission keyPermission = 2;
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/balancer.go b/vendor/github.com/coreos/etcd/clientv3/balancer/balancer.go
new file mode 100644
index 0000000..9306385
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/balancer/balancer.go
@@ -0,0 +1,293 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package balancer implements client balancer.
+package balancer
+
+import (
+	"strconv"
+	"sync"
+	"time"
+
+	"github.com/coreos/etcd/clientv3/balancer/connectivity"
+	"github.com/coreos/etcd/clientv3/balancer/picker"
+
+	"go.uber.org/zap"
+	"google.golang.org/grpc/balancer"
+	grpcconnectivity "google.golang.org/grpc/connectivity"
+	"google.golang.org/grpc/resolver"
+	_ "google.golang.org/grpc/resolver/dns"         // register DNS resolver
+	_ "google.golang.org/grpc/resolver/passthrough" // register passthrough resolver
+)
+
+// Config defines balancer configurations.
+type Config struct {
+	// Policy configures balancer policy.
+	Policy picker.Policy
+
+	// Picker implements gRPC picker.
+	// Leave empty if "Policy" field is not custom.
+	// TODO: currently custom policy is not supported.
+	// Picker picker.Picker
+
+	// Name defines an additional name for balancer.
+	// Useful for balancer testing to avoid register conflicts.
+	// If empty, defaults to policy name.
+	Name string
+
+	// Logger configures balancer logging.
+	// If nil, logs are discarded.
+	Logger *zap.Logger
+}
+
+// RegisterBuilder creates and registers a builder. Since this function calls balancer.Register, it
+// must be invoked at initialization time.
+func RegisterBuilder(cfg Config) {
+	bb := &builder{cfg}
+	balancer.Register(bb)
+
+	bb.cfg.Logger.Debug(
+		"registered balancer",
+		zap.String("policy", bb.cfg.Policy.String()),
+		zap.String("name", bb.cfg.Name),
+	)
+}
+
+type builder struct {
+	cfg Config
+}
+
+// Build is called initially when creating "ccBalancerWrapper".
+// "grpc.Dial" is called to this client connection.
+// Then, resolved addresses will be handled via "HandleResolvedAddrs".
+func (b *builder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
+	bb := &baseBalancer{
+		id:     strconv.FormatInt(time.Now().UnixNano(), 36),
+		policy: b.cfg.Policy,
+		name:   b.cfg.Name,
+		lg:     b.cfg.Logger,
+
+		addrToSc: make(map[resolver.Address]balancer.SubConn),
+		scToAddr: make(map[balancer.SubConn]resolver.Address),
+		scToSt:   make(map[balancer.SubConn]grpcconnectivity.State),
+
+		currentConn:          nil,
+		connectivityRecorder: connectivity.New(b.cfg.Logger),
+
+		// initialize picker always returns "ErrNoSubConnAvailable"
+		picker: picker.NewErr(balancer.ErrNoSubConnAvailable),
+	}
+
+	// TODO: support multiple connections
+	bb.mu.Lock()
+	bb.currentConn = cc
+	bb.mu.Unlock()
+
+	bb.lg.Info(
+		"built balancer",
+		zap.String("balancer-id", bb.id),
+		zap.String("policy", bb.policy.String()),
+		zap.String("resolver-target", cc.Target()),
+	)
+	return bb
+}
+
+// Name implements "grpc/balancer.Builder" interface.
+func (b *builder) Name() string { return b.cfg.Name }
+
+// Balancer defines client balancer interface.
+type Balancer interface {
+	// Balancer is called on specified client connection. Client initiates gRPC
+	// connection with "grpc.Dial(addr, grpc.WithBalancerName)", and then those resolved
+	// addresses are passed to "grpc/balancer.Balancer.HandleResolvedAddrs".
+	// For each resolved address, balancer calls "balancer.ClientConn.NewSubConn".
+	// "grpc/balancer.Balancer.HandleSubConnStateChange" is called when connectivity state
+	// changes, thus requires failover logic in this method.
+	balancer.Balancer
+
+	// Picker calls "Pick" for every client request.
+	picker.Picker
+}
+
+type baseBalancer struct {
+	id     string
+	policy picker.Policy
+	name   string
+	lg     *zap.Logger
+
+	mu sync.RWMutex
+
+	addrToSc map[resolver.Address]balancer.SubConn
+	scToAddr map[balancer.SubConn]resolver.Address
+	scToSt   map[balancer.SubConn]grpcconnectivity.State
+
+	currentConn          balancer.ClientConn
+	connectivityRecorder connectivity.Recorder
+
+	picker picker.Picker
+}
+
+// HandleResolvedAddrs implements "grpc/balancer.Balancer" interface.
+// gRPC sends initial or updated resolved addresses from "Build".
+func (bb *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
+	if err != nil {
+		bb.lg.Warn("HandleResolvedAddrs called with error", zap.String("balancer-id", bb.id), zap.Error(err))
+		return
+	}
+	bb.lg.Info("resolved",
+		zap.String("picker", bb.picker.String()),
+		zap.String("balancer-id", bb.id),
+		zap.Strings("addresses", addrsToStrings(addrs)),
+	)
+
+	bb.mu.Lock()
+	defer bb.mu.Unlock()
+
+	resolved := make(map[resolver.Address]struct{})
+	for _, addr := range addrs {
+		resolved[addr] = struct{}{}
+		if _, ok := bb.addrToSc[addr]; !ok {
+			sc, err := bb.currentConn.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{})
+			if err != nil {
+				bb.lg.Warn("NewSubConn failed", zap.String("picker", bb.picker.String()), zap.String("balancer-id", bb.id), zap.Error(err), zap.String("address", addr.Addr))
+				continue
+			}
+			bb.lg.Info("created subconn", zap.String("address", addr.Addr))
+			bb.addrToSc[addr] = sc
+			bb.scToAddr[sc] = addr
+			bb.scToSt[sc] = grpcconnectivity.Idle
+			sc.Connect()
+		}
+	}
+
+	for addr, sc := range bb.addrToSc {
+		if _, ok := resolved[addr]; !ok {
+			// was removed by resolver or failed to create subconn
+			bb.currentConn.RemoveSubConn(sc)
+			delete(bb.addrToSc, addr)
+
+			bb.lg.Info(
+				"removed subconn",
+				zap.String("picker", bb.picker.String()),
+				zap.String("balancer-id", bb.id),
+				zap.String("address", addr.Addr),
+				zap.String("subconn", scToString(sc)),
+			)
+
+			// Keep the state of this sc in bb.scToSt until sc's state becomes Shutdown.
+			// The entry will be deleted in HandleSubConnStateChange.
+			// (DO NOT) delete(bb.scToAddr, sc)
+			// (DO NOT) delete(bb.scToSt, sc)
+		}
+	}
+}
+
+// HandleSubConnStateChange implements "grpc/balancer.Balancer" interface.
+func (bb *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s grpcconnectivity.State) {
+	bb.mu.Lock()
+	defer bb.mu.Unlock()
+
+	old, ok := bb.scToSt[sc]
+	if !ok {
+		bb.lg.Warn(
+			"state change for an unknown subconn",
+			zap.String("picker", bb.picker.String()),
+			zap.String("balancer-id", bb.id),
+			zap.String("subconn", scToString(sc)),
+			zap.Int("subconn-size", len(bb.scToAddr)),
+			zap.String("state", s.String()),
+		)
+		return
+	}
+
+	bb.lg.Info(
+		"state changed",
+		zap.String("picker", bb.picker.String()),
+		zap.String("balancer-id", bb.id),
+		zap.Bool("connected", s == grpcconnectivity.Ready),
+		zap.String("subconn", scToString(sc)),
+		zap.Int("subconn-size", len(bb.scToAddr)),
+		zap.String("address", bb.scToAddr[sc].Addr),
+		zap.String("old-state", old.String()),
+		zap.String("new-state", s.String()),
+	)
+
+	bb.scToSt[sc] = s
+	switch s {
+	case grpcconnectivity.Idle:
+		sc.Connect()
+	case grpcconnectivity.Shutdown:
+		// When an address was removed by resolver, b called RemoveSubConn but
+		// kept the sc's state in scToSt. Remove state for this sc here.
+		delete(bb.scToAddr, sc)
+		delete(bb.scToSt, sc)
+	}
+
+	oldAggrState := bb.connectivityRecorder.GetCurrentState()
+	bb.connectivityRecorder.RecordTransition(old, s)
+
+	// Update balancer picker when one of the following happens:
+	//  - this sc became ready from not-ready
+	//  - this sc became not-ready from ready
+	//  - the aggregated state of balancer became TransientFailure from non-TransientFailure
+	//  - the aggregated state of balancer became non-TransientFailure from TransientFailure
+	if (s == grpcconnectivity.Ready) != (old == grpcconnectivity.Ready) ||
+		(bb.connectivityRecorder.GetCurrentState() == grpcconnectivity.TransientFailure) != (oldAggrState == grpcconnectivity.TransientFailure) {
+		bb.updatePicker()
+	}
+
+	bb.currentConn.UpdateBalancerState(bb.connectivityRecorder.GetCurrentState(), bb.picker)
+}
+
+func (bb *baseBalancer) updatePicker() {
+	if bb.connectivityRecorder.GetCurrentState() == grpcconnectivity.TransientFailure {
+		bb.picker = picker.NewErr(balancer.ErrTransientFailure)
+		bb.lg.Info(
+			"updated picker to transient error picker",
+			zap.String("picker", bb.picker.String()),
+			zap.String("balancer-id", bb.id),
+			zap.String("policy", bb.policy.String()),
+		)
+		return
+	}
+
+	// only pass ready subconns to picker
+	scToAddr := make(map[balancer.SubConn]resolver.Address)
+	for addr, sc := range bb.addrToSc {
+		if st, ok := bb.scToSt[sc]; ok && st == grpcconnectivity.Ready {
+			scToAddr[sc] = addr
+		}
+	}
+
+	bb.picker = picker.New(picker.Config{
+		Policy:                   bb.policy,
+		Logger:                   bb.lg,
+		SubConnToResolverAddress: scToAddr,
+	})
+	bb.lg.Info(
+		"updated picker",
+		zap.String("picker", bb.picker.String()),
+		zap.String("balancer-id", bb.id),
+		zap.String("policy", bb.policy.String()),
+		zap.Strings("subconn-ready", scsToStrings(scToAddr)),
+		zap.Int("subconn-size", len(scToAddr)),
+	)
+}
+
+// Close implements "grpc/balancer.Balancer" interface.
+// Close is a nop because base balancer doesn't have internal state to clean up,
+// and it doesn't need to call RemoveSubConn for the SubConns.
+func (bb *baseBalancer) Close() {
+	// TODO
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/connectivity/connectivity.go b/vendor/github.com/coreos/etcd/clientv3/balancer/connectivity/connectivity.go
new file mode 100644
index 0000000..4c4ad36
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/balancer/connectivity/connectivity.go
@@ -0,0 +1,93 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package connectivity implements client connectivity operations.
+package connectivity
+
+import (
+	"sync"
+
+	"go.uber.org/zap"
+	"google.golang.org/grpc/connectivity"
+)
+
+// Recorder records gRPC connectivity.
+type Recorder interface {
+	GetCurrentState() connectivity.State
+	RecordTransition(oldState, newState connectivity.State)
+}
+
+// New returns a new Recorder.
+func New(lg *zap.Logger) Recorder {
+	return &recorder{lg: lg}
+}
+
+// recorder takes the connectivity states of multiple SubConns
+// and returns one aggregated connectivity state.
+// ref. https://github.com/grpc/grpc-go/blob/master/balancer/balancer.go
+type recorder struct {
+	lg *zap.Logger
+
+	mu sync.RWMutex
+
+	cur connectivity.State
+
+	numReady            uint64 // Number of addrConns in ready state.
+	numConnecting       uint64 // Number of addrConns in connecting state.
+	numTransientFailure uint64 // Number of addrConns in transientFailure.
+}
+
+func (rc *recorder) GetCurrentState() (state connectivity.State) {
+	rc.mu.RLock()
+	defer rc.mu.RUnlock()
+	return rc.cur
+}
+
+// RecordTransition records state change happening in subConn and based on that
+// it evaluates what aggregated state should be.
+//
+//  - If at least one SubConn in Ready, the aggregated state is Ready;
+//  - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
+//  - Else the aggregated state is TransientFailure.
+//
+// Idle and Shutdown are not considered.
+//
+// ref. https://github.com/grpc/grpc-go/blob/master/balancer/balancer.go
+func (rc *recorder) RecordTransition(oldState, newState connectivity.State) {
+	rc.mu.Lock()
+	defer rc.mu.Unlock()
+
+	for idx, state := range []connectivity.State{oldState, newState} {
+		updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
+		switch state {
+		case connectivity.Ready:
+			rc.numReady += updateVal
+		case connectivity.Connecting:
+			rc.numConnecting += updateVal
+		case connectivity.TransientFailure:
+			rc.numTransientFailure += updateVal
+		default:
+			rc.lg.Warn("connectivity recorder received unknown state", zap.String("connectivity-state", state.String()))
+		}
+	}
+
+	switch { // must be exclusive, no overlap
+	case rc.numReady > 0:
+		rc.cur = connectivity.Ready
+	case rc.numConnecting > 0:
+		rc.cur = connectivity.Connecting
+	default:
+		rc.cur = connectivity.TransientFailure
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/doc.go b/vendor/github.com/coreos/etcd/clientv3/balancer/picker/doc.go
new file mode 100644
index 0000000..35dabf5
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/balancer/picker/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package picker defines/implements client balancer picker policy.
+package picker
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/err.go b/vendor/github.com/coreos/etcd/clientv3/balancer/picker/err.go
new file mode 100644
index 0000000..9e04378
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/balancer/picker/err.go
@@ -0,0 +1,39 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package picker
+
+import (
+	"context"
+
+	"google.golang.org/grpc/balancer"
+)
+
+// NewErr returns a picker that always returns err on "Pick".
+func NewErr(err error) Picker {
+	return &errPicker{p: Error, err: err}
+}
+
+type errPicker struct {
+	p   Policy
+	err error
+}
+
+func (ep *errPicker) String() string {
+	return ep.p.String()
+}
+
+func (ep *errPicker) Pick(context.Context, balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+	return nil, nil, ep.err
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/picker.go b/vendor/github.com/coreos/etcd/clientv3/balancer/picker/picker.go
new file mode 100644
index 0000000..bd1a5d2
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/balancer/picker/picker.go
@@ -0,0 +1,91 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package picker
+
+import (
+	"fmt"
+
+	"go.uber.org/zap"
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/resolver"
+)
+
+// Picker defines balancer Picker methods.
+type Picker interface {
+	balancer.Picker
+	String() string
+}
+
+// Config defines picker configuration.
+type Config struct {
+	// Policy specifies etcd clientv3's built in balancer policy.
+	Policy Policy
+
+	// Logger defines picker logging object.
+	Logger *zap.Logger
+
+	// SubConnToResolverAddress maps each gRPC sub-connection to an address.
+	// Basically, it is a list of addresses that the Picker can pick from.
+	SubConnToResolverAddress map[balancer.SubConn]resolver.Address
+}
+
+// Policy defines balancer picker policy.
+type Policy uint8
+
+const (
+	// Error is error picker policy.
+	Error Policy = iota
+
+	// RoundrobinBalanced balances loads over multiple endpoints
+	// and implements failover in roundrobin fashion.
+	RoundrobinBalanced
+
+	// Custom defines custom balancer picker.
+	// TODO: custom picker is not supported yet.
+	Custom
+)
+
+func (p Policy) String() string {
+	switch p {
+	case Error:
+		return "picker-error"
+
+	case RoundrobinBalanced:
+		return "picker-roundrobin-balanced"
+
+	case Custom:
+		panic("'custom' picker policy is not supported yet")
+
+	default:
+		panic(fmt.Errorf("invalid balancer picker policy (%d)", p))
+	}
+}
+
+// New creates a new Picker.
+func New(cfg Config) Picker {
+	switch cfg.Policy {
+	case Error:
+		panic("'error' picker policy is not supported here; use 'picker.NewErr'")
+
+	case RoundrobinBalanced:
+		return newRoundrobinBalanced(cfg)
+
+	case Custom:
+		panic("'custom' picker policy is not supported yet")
+
+	default:
+		panic(fmt.Errorf("invalid balancer picker policy (%d)", cfg.Policy))
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/roundrobin_balanced.go b/vendor/github.com/coreos/etcd/clientv3/balancer/picker/roundrobin_balanced.go
new file mode 100644
index 0000000..1b8b285
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/balancer/picker/roundrobin_balanced.go
@@ -0,0 +1,95 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package picker
+
+import (
+	"context"
+	"sync"
+
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/resolver"
+)
+
+// newRoundrobinBalanced returns a new roundrobin balanced picker.
+func newRoundrobinBalanced(cfg Config) Picker {
+	scs := make([]balancer.SubConn, 0, len(cfg.SubConnToResolverAddress))
+	for sc := range cfg.SubConnToResolverAddress {
+		scs = append(scs, sc)
+	}
+	return &rrBalanced{
+		p:        RoundrobinBalanced,
+		lg:       cfg.Logger,
+		scs:      scs,
+		scToAddr: cfg.SubConnToResolverAddress,
+	}
+}
+
+type rrBalanced struct {
+	p Policy
+
+	lg *zap.Logger
+
+	mu       sync.RWMutex
+	next     int
+	scs      []balancer.SubConn
+	scToAddr map[balancer.SubConn]resolver.Address
+}
+
+func (rb *rrBalanced) String() string { return rb.p.String() }
+
+// Pick is called for every client request.
+func (rb *rrBalanced) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+	rb.mu.RLock()
+	n := len(rb.scs)
+	rb.mu.RUnlock()
+	if n == 0 {
+		return nil, nil, balancer.ErrNoSubConnAvailable
+	}
+
+	rb.mu.Lock()
+	cur := rb.next
+	sc := rb.scs[cur]
+	picked := rb.scToAddr[sc].Addr
+	rb.next = (rb.next + 1) % len(rb.scs)
+	rb.mu.Unlock()
+
+	rb.lg.Debug(
+		"picked",
+		zap.String("picker", rb.p.String()),
+		zap.String("address", picked),
+		zap.Int("subconn-index", cur),
+		zap.Int("subconn-size", n),
+	)
+
+	doneFunc := func(info balancer.DoneInfo) {
+		// TODO: error handling?
+		fss := []zapcore.Field{
+			zap.Error(info.Err),
+			zap.String("picker", rb.p.String()),
+			zap.String("address", picked),
+			zap.Bool("success", info.Err == nil),
+			zap.Bool("bytes-sent", info.BytesSent),
+			zap.Bool("bytes-received", info.BytesReceived),
+		}
+		if info.Err == nil {
+			rb.lg.Debug("balancer done", fss...)
+		} else {
+			rb.lg.Warn("balancer failed", fss...)
+		}
+	}
+	return sc, doneFunc, nil
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/resolver/endpoint/endpoint.go b/vendor/github.com/coreos/etcd/clientv3/balancer/resolver/endpoint/endpoint.go
new file mode 100644
index 0000000..864b5df
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/balancer/resolver/endpoint/endpoint.go
@@ -0,0 +1,247 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package endpoint resolves etcd entpoints using grpc targets of the form 'endpoint://<id>/<endpoint>'.
+package endpoint
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"net/url"
+	"strings"
+	"sync"
+
+	"google.golang.org/grpc/resolver"
+)
+
+const scheme = "endpoint"
+
+var (
+	targetPrefix = fmt.Sprintf("%s://", scheme)
+
+	bldr *builder
+)
+
+func init() {
+	bldr = &builder{
+		resolverGroups: make(map[string]*ResolverGroup),
+	}
+	resolver.Register(bldr)
+}
+
+type builder struct {
+	mu             sync.RWMutex
+	resolverGroups map[string]*ResolverGroup
+}
+
+// NewResolverGroup creates a new ResolverGroup with the given id.
+func NewResolverGroup(id string) (*ResolverGroup, error) {
+	return bldr.newResolverGroup(id)
+}
+
+// ResolverGroup keeps all endpoints of resolvers using a common endpoint://<id>/ target
+// up-to-date.
+type ResolverGroup struct {
+	mu        sync.RWMutex
+	id        string
+	endpoints []string
+	resolvers []*Resolver
+}
+
+func (e *ResolverGroup) addResolver(r *Resolver) {
+	e.mu.Lock()
+	addrs := epsToAddrs(e.endpoints...)
+	e.resolvers = append(e.resolvers, r)
+	e.mu.Unlock()
+	r.cc.NewAddress(addrs)
+}
+
+func (e *ResolverGroup) removeResolver(r *Resolver) {
+	e.mu.Lock()
+	for i, er := range e.resolvers {
+		if er == r {
+			e.resolvers = append(e.resolvers[:i], e.resolvers[i+1:]...)
+			break
+		}
+	}
+	e.mu.Unlock()
+}
+
+// SetEndpoints updates the endpoints for ResolverGroup. All registered resolver are updated
+// immediately with the new endpoints.
+func (e *ResolverGroup) SetEndpoints(endpoints []string) {
+	addrs := epsToAddrs(endpoints...)
+	e.mu.Lock()
+	e.endpoints = endpoints
+	for _, r := range e.resolvers {
+		r.cc.NewAddress(addrs)
+	}
+	e.mu.Unlock()
+}
+
+// Target constructs a endpoint target using the endpoint id of the ResolverGroup.
+func (e *ResolverGroup) Target(endpoint string) string {
+	return Target(e.id, endpoint)
+}
+
+// Target constructs a endpoint resolver target.
+func Target(id, endpoint string) string {
+	return fmt.Sprintf("%s://%s/%s", scheme, id, endpoint)
+}
+
+// IsTarget checks if a given target string in an endpoint resolver target.
+func IsTarget(target string) bool {
+	return strings.HasPrefix(target, "endpoint://")
+}
+
+func (e *ResolverGroup) Close() {
+	bldr.close(e.id)
+}
+
+// Build creates or reuses an etcd resolver for the etcd cluster name identified by the authority part of the target.
+func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
+	if len(target.Authority) < 1 {
+		return nil, fmt.Errorf("'etcd' target scheme requires non-empty authority identifying etcd cluster being routed to")
+	}
+	id := target.Authority
+	es, err := b.getResolverGroup(id)
+	if err != nil {
+		return nil, fmt.Errorf("failed to build resolver: %v", err)
+	}
+	r := &Resolver{
+		endpointID: id,
+		cc:         cc,
+	}
+	es.addResolver(r)
+	return r, nil
+}
+
+func (b *builder) newResolverGroup(id string) (*ResolverGroup, error) {
+	b.mu.RLock()
+	_, ok := b.resolverGroups[id]
+	b.mu.RUnlock()
+	if ok {
+		return nil, fmt.Errorf("Endpoint already exists for id: %s", id)
+	}
+
+	es := &ResolverGroup{id: id}
+	b.mu.Lock()
+	b.resolverGroups[id] = es
+	b.mu.Unlock()
+	return es, nil
+}
+
+func (b *builder) getResolverGroup(id string) (*ResolverGroup, error) {
+	b.mu.RLock()
+	es, ok := b.resolverGroups[id]
+	b.mu.RUnlock()
+	if !ok {
+		return nil, fmt.Errorf("ResolverGroup not found for id: %s", id)
+	}
+	return es, nil
+}
+
+func (b *builder) close(id string) {
+	b.mu.Lock()
+	delete(b.resolverGroups, id)
+	b.mu.Unlock()
+}
+
+func (b *builder) Scheme() string {
+	return scheme
+}
+
+// Resolver provides a resolver for a single etcd cluster, identified by name.
+type Resolver struct {
+	endpointID string
+	cc         resolver.ClientConn
+	sync.RWMutex
+}
+
+// TODO: use balancer.epsToAddrs
+func epsToAddrs(eps ...string) (addrs []resolver.Address) {
+	addrs = make([]resolver.Address, 0, len(eps))
+	for _, ep := range eps {
+		addrs = append(addrs, resolver.Address{Addr: ep})
+	}
+	return addrs
+}
+
+func (*Resolver) ResolveNow(o resolver.ResolveNowOption) {}
+
+func (r *Resolver) Close() {
+	es, err := bldr.getResolverGroup(r.endpointID)
+	if err != nil {
+		return
+	}
+	es.removeResolver(r)
+}
+
+// ParseEndpoint endpoint parses an endpoint of the form
+// (http|https)://<host>*|(unix|unixs)://<path>)
+// and returns a protocol ('tcp' or 'unix'),
+// host (or filepath if a unix socket),
+// scheme (http, https, unix, unixs).
+func ParseEndpoint(endpoint string) (proto string, host string, scheme string) {
+	proto = "tcp"
+	host = endpoint
+	url, uerr := url.Parse(endpoint)
+	if uerr != nil || !strings.Contains(endpoint, "://") {
+		return proto, host, scheme
+	}
+	scheme = url.Scheme
+
+	// strip scheme:// prefix since grpc dials by host
+	host = url.Host
+	switch url.Scheme {
+	case "http", "https":
+	case "unix", "unixs":
+		proto = "unix"
+		host = url.Host + url.Path
+	default:
+		proto, host = "", ""
+	}
+	return proto, host, scheme
+}
+
+// ParseTarget parses a endpoint://<id>/<endpoint> string and returns the parsed id and endpoint.
+// If the target is malformed, an error is returned.
+func ParseTarget(target string) (string, string, error) {
+	noPrefix := strings.TrimPrefix(target, targetPrefix)
+	if noPrefix == target {
+		return "", "", fmt.Errorf("malformed target, %s prefix is required: %s", targetPrefix, target)
+	}
+	parts := strings.SplitN(noPrefix, "/", 2)
+	if len(parts) != 2 {
+		return "", "", fmt.Errorf("malformed target, expected %s://<id>/<endpoint>, but got %s", scheme, target)
+	}
+	return parts[0], parts[1], nil
+}
+
+// Dialer dials a endpoint using net.Dialer.
+// Context cancelation and timeout are supported.
+func Dialer(ctx context.Context, dialEp string) (net.Conn, error) {
+	proto, host, _ := ParseEndpoint(dialEp)
+	select {
+	case <-ctx.Done():
+		return nil, ctx.Err()
+	default:
+	}
+	dialer := &net.Dialer{}
+	if deadline, ok := ctx.Deadline(); ok {
+		dialer.Deadline = deadline
+	}
+	return dialer.DialContext(ctx, proto, host)
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/utils.go b/vendor/github.com/coreos/etcd/clientv3/balancer/utils.go
new file mode 100644
index 0000000..48eb875
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/balancer/utils.go
@@ -0,0 +1,68 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package balancer
+
+import (
+	"fmt"
+	"net/url"
+	"sort"
+	"sync/atomic"
+	"time"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/resolver"
+)
+
+func scToString(sc balancer.SubConn) string {
+	return fmt.Sprintf("%p", sc)
+}
+
+func scsToStrings(scs map[balancer.SubConn]resolver.Address) (ss []string) {
+	ss = make([]string, 0, len(scs))
+	for sc, a := range scs {
+		ss = append(ss, fmt.Sprintf("%s (%s)", a.Addr, scToString(sc)))
+	}
+	sort.Strings(ss)
+	return ss
+}
+
+func addrsToStrings(addrs []resolver.Address) (ss []string) {
+	ss = make([]string, len(addrs))
+	for i := range addrs {
+		ss[i] = addrs[i].Addr
+	}
+	sort.Strings(ss)
+	return ss
+}
+
+func epsToAddrs(eps ...string) (addrs []resolver.Address) {
+	addrs = make([]resolver.Address, 0, len(eps))
+	for _, ep := range eps {
+		u, err := url.Parse(ep)
+		if err != nil {
+			addrs = append(addrs, resolver.Address{Addr: ep, Type: resolver.Backend})
+			continue
+		}
+		addrs = append(addrs, resolver.Address{Addr: u.Host, Type: resolver.Backend})
+	}
+	return addrs
+}
+
+var genN = new(uint32)
+
+func genName() string {
+	now := time.Now().UnixNano()
+	return fmt.Sprintf("%X%X", now, atomic.AddUint32(genN, 1))
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/credentials/credentials.go b/vendor/github.com/coreos/etcd/clientv3/credentials/credentials.go
new file mode 100644
index 0000000..2dc2012
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/credentials/credentials.go
@@ -0,0 +1,173 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package credentials implements gRPC credential interface with etcd specific logic.
+// e.g., client handshake with custom authority parameter
+package credentials
+
+import (
+	"context"
+	"crypto/tls"
+	"net"
+	"sync"
+
+	"github.com/coreos/etcd/clientv3/balancer/resolver/endpoint"
+	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
+	grpccredentials "google.golang.org/grpc/credentials"
+)
+
+// Config defines gRPC credential configuration.
+type Config struct {
+	TLSConfig *tls.Config
+}
+
+// Bundle defines gRPC credential interface.
+type Bundle interface {
+	grpccredentials.Bundle
+	UpdateAuthToken(token string)
+}
+
+// NewBundle constructs a new gRPC credential bundle.
+func NewBundle(cfg Config) Bundle {
+	return &bundle{
+		tc: newTransportCredential(cfg.TLSConfig),
+		rc: newPerRPCCredential(),
+	}
+}
+
+// bundle implements "grpccredentials.Bundle" interface.
+type bundle struct {
+	tc *transportCredential
+	rc *perRPCCredential
+}
+
+func (b *bundle) TransportCredentials() grpccredentials.TransportCredentials {
+	return b.tc
+}
+
+func (b *bundle) PerRPCCredentials() grpccredentials.PerRPCCredentials {
+	return b.rc
+}
+
+func (b *bundle) NewWithMode(mode string) (grpccredentials.Bundle, error) {
+	// no-op
+	return nil, nil
+}
+
+// transportCredential implements "grpccredentials.TransportCredentials" interface.
+// transportCredential wraps TransportCredentials to track which
+// addresses are dialed for which endpoints, and then sets the authority when checking the endpoint's cert to the
+// hostname or IP of the dialed endpoint.
+// This is a workaround of a gRPC load balancer issue. gRPC uses the dialed target's service name as the authority when
+// checking all endpoint certs, which does not work for etcd servers using their hostname or IP as the Subject Alternative Name
+// in their TLS certs.
+// To enable, include both WithTransportCredentials(creds) and WithContextDialer(creds.Dialer)
+// when dialing.
+type transportCredential struct {
+	gtc grpccredentials.TransportCredentials
+	mu  sync.Mutex
+	// addrToEndpoint maps from the connection addresses that are dialed to the hostname or IP of the
+	// endpoint provided to the dialer when dialing
+	addrToEndpoint map[string]string
+}
+
+func newTransportCredential(cfg *tls.Config) *transportCredential {
+	return &transportCredential{
+		gtc:            grpccredentials.NewTLS(cfg),
+		addrToEndpoint: map[string]string{},
+	}
+}
+
+func (tc *transportCredential) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) {
+	// Set the authority when checking the endpoint's cert to the hostname or IP of the dialed endpoint
+	tc.mu.Lock()
+	dialEp, ok := tc.addrToEndpoint[rawConn.RemoteAddr().String()]
+	tc.mu.Unlock()
+	if ok {
+		_, host, _ := endpoint.ParseEndpoint(dialEp)
+		authority = host
+	}
+	return tc.gtc.ClientHandshake(ctx, authority, rawConn)
+}
+
+// return true if given string is an IP.
+func isIP(ep string) bool {
+	return net.ParseIP(ep) != nil
+}
+
+func (tc *transportCredential) ServerHandshake(rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) {
+	return tc.gtc.ServerHandshake(rawConn)
+}
+
+func (tc *transportCredential) Info() grpccredentials.ProtocolInfo {
+	return tc.gtc.Info()
+}
+
+func (tc *transportCredential) Clone() grpccredentials.TransportCredentials {
+	copy := map[string]string{}
+	tc.mu.Lock()
+	for k, v := range tc.addrToEndpoint {
+		copy[k] = v
+	}
+	tc.mu.Unlock()
+	return &transportCredential{
+		gtc:            tc.gtc.Clone(),
+		addrToEndpoint: copy,
+	}
+}
+
+func (tc *transportCredential) OverrideServerName(serverNameOverride string) error {
+	return tc.gtc.OverrideServerName(serverNameOverride)
+}
+
+func (tc *transportCredential) Dialer(ctx context.Context, dialEp string) (net.Conn, error) {
+	// Keep track of which addresses are dialed for which endpoints
+	conn, err := endpoint.Dialer(ctx, dialEp)
+	if conn != nil {
+		tc.mu.Lock()
+		tc.addrToEndpoint[conn.RemoteAddr().String()] = dialEp
+		tc.mu.Unlock()
+	}
+	return conn, err
+}
+
+// perRPCCredential implements "grpccredentials.PerRPCCredentials" interface.
+type perRPCCredential struct {
+	authToken   string
+	authTokenMu sync.RWMutex
+}
+
+func newPerRPCCredential() *perRPCCredential { return &perRPCCredential{} }
+
+func (rc *perRPCCredential) RequireTransportSecurity() bool { return false }
+
+func (rc *perRPCCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
+	rc.authTokenMu.RLock()
+	authToken := rc.authToken
+	rc.authTokenMu.RUnlock()
+	return map[string]string{rpctypes.TokenFieldNameGRPC: authToken}, nil
+}
+
+func (b *bundle) UpdateAuthToken(token string) {
+	if b.rc == nil {
+		return
+	}
+	b.rc.UpdateAuthToken(token)
+}
+
+func (rc *perRPCCredential) UpdateAuthToken(token string) {
+	rc.authTokenMu.Lock()
+	rc.authToken = token
+	rc.authTokenMu.Unlock()
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go
new file mode 100644
index 0000000..f72c6a6
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package rpctypes has types and values shared by the etcd server and client for v3 RPC interaction.
+package rpctypes
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go
new file mode 100644
index 0000000..bc1ad7b
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go
@@ -0,0 +1,217 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpctypes
+
+import (
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+// server-side error
+var (
+	ErrGRPCEmptyKey      = status.New(codes.InvalidArgument, "etcdserver: key is not provided").Err()
+	ErrGRPCKeyNotFound   = status.New(codes.InvalidArgument, "etcdserver: key not found").Err()
+	ErrGRPCValueProvided = status.New(codes.InvalidArgument, "etcdserver: value is provided").Err()
+	ErrGRPCLeaseProvided = status.New(codes.InvalidArgument, "etcdserver: lease is provided").Err()
+	ErrGRPCTooManyOps    = status.New(codes.InvalidArgument, "etcdserver: too many operations in txn request").Err()
+	ErrGRPCDuplicateKey  = status.New(codes.InvalidArgument, "etcdserver: duplicate key given in txn request").Err()
+	ErrGRPCCompacted     = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted").Err()
+	ErrGRPCFutureRev     = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision").Err()
+	ErrGRPCNoSpace       = status.New(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded").Err()
+
+	ErrGRPCLeaseNotFound    = status.New(codes.NotFound, "etcdserver: requested lease not found").Err()
+	ErrGRPCLeaseExist       = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err()
+	ErrGRPCLeaseTTLTooLarge = status.New(codes.OutOfRange, "etcdserver: too large lease TTL").Err()
+
+	ErrGRPCMemberExist            = status.New(codes.FailedPrecondition, "etcdserver: member ID already exist").Err()
+	ErrGRPCPeerURLExist           = status.New(codes.FailedPrecondition, "etcdserver: Peer URLs already exists").Err()
+	ErrGRPCMemberNotEnoughStarted = status.New(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members").Err()
+	ErrGRPCMemberBadURLs          = status.New(codes.InvalidArgument, "etcdserver: given member URLs are invalid").Err()
+	ErrGRPCMemberNotFound         = status.New(codes.NotFound, "etcdserver: member not found").Err()
+
+	ErrGRPCRequestTooLarge        = status.New(codes.InvalidArgument, "etcdserver: request is too large").Err()
+	ErrGRPCRequestTooManyRequests = status.New(codes.ResourceExhausted, "etcdserver: too many requests").Err()
+
+	ErrGRPCRootUserNotExist     = status.New(codes.FailedPrecondition, "etcdserver: root user does not exist").Err()
+	ErrGRPCRootRoleNotExist     = status.New(codes.FailedPrecondition, "etcdserver: root user does not have root role").Err()
+	ErrGRPCUserAlreadyExist     = status.New(codes.FailedPrecondition, "etcdserver: user name already exists").Err()
+	ErrGRPCUserEmpty            = status.New(codes.InvalidArgument, "etcdserver: user name is empty").Err()
+	ErrGRPCUserNotFound         = status.New(codes.FailedPrecondition, "etcdserver: user name not found").Err()
+	ErrGRPCRoleAlreadyExist     = status.New(codes.FailedPrecondition, "etcdserver: role name already exists").Err()
+	ErrGRPCRoleNotFound         = status.New(codes.FailedPrecondition, "etcdserver: role name not found").Err()
+	ErrGRPCAuthFailed           = status.New(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password").Err()
+	ErrGRPCPermissionDenied     = status.New(codes.PermissionDenied, "etcdserver: permission denied").Err()
+	ErrGRPCRoleNotGranted       = status.New(codes.FailedPrecondition, "etcdserver: role is not granted to the user").Err()
+	ErrGRPCPermissionNotGranted = status.New(codes.FailedPrecondition, "etcdserver: permission is not granted to the role").Err()
+	ErrGRPCAuthNotEnabled       = status.New(codes.FailedPrecondition, "etcdserver: authentication is not enabled").Err()
+	ErrGRPCInvalidAuthToken     = status.New(codes.Unauthenticated, "etcdserver: invalid auth token").Err()
+	ErrGRPCInvalidAuthMgmt      = status.New(codes.InvalidArgument, "etcdserver: invalid auth management").Err()
+
+	ErrGRPCNoLeader                   = status.New(codes.Unavailable, "etcdserver: no leader").Err()
+	ErrGRPCNotLeader                  = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err()
+	ErrGRPCLeaderChanged              = status.New(codes.Unavailable, "etcdserver: leader changed").Err()
+	ErrGRPCNotCapable                 = status.New(codes.Unavailable, "etcdserver: not capable").Err()
+	ErrGRPCStopped                    = status.New(codes.Unavailable, "etcdserver: server stopped").Err()
+	ErrGRPCTimeout                    = status.New(codes.Unavailable, "etcdserver: request timed out").Err()
+	ErrGRPCTimeoutDueToLeaderFail     = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure").Err()
+	ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost").Err()
+	ErrGRPCUnhealthy                  = status.New(codes.Unavailable, "etcdserver: unhealthy cluster").Err()
+	ErrGRPCCorrupt                    = status.New(codes.DataLoss, "etcdserver: corrupt cluster").Err()
+
+	errStringToError = map[string]error{
+		ErrorDesc(ErrGRPCEmptyKey):      ErrGRPCEmptyKey,
+		ErrorDesc(ErrGRPCKeyNotFound):   ErrGRPCKeyNotFound,
+		ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided,
+		ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided,
+
+		ErrorDesc(ErrGRPCTooManyOps):   ErrGRPCTooManyOps,
+		ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey,
+		ErrorDesc(ErrGRPCCompacted):    ErrGRPCCompacted,
+		ErrorDesc(ErrGRPCFutureRev):    ErrGRPCFutureRev,
+		ErrorDesc(ErrGRPCNoSpace):      ErrGRPCNoSpace,
+
+		ErrorDesc(ErrGRPCLeaseNotFound):    ErrGRPCLeaseNotFound,
+		ErrorDesc(ErrGRPCLeaseExist):       ErrGRPCLeaseExist,
+		ErrorDesc(ErrGRPCLeaseTTLTooLarge): ErrGRPCLeaseTTLTooLarge,
+
+		ErrorDesc(ErrGRPCMemberExist):            ErrGRPCMemberExist,
+		ErrorDesc(ErrGRPCPeerURLExist):           ErrGRPCPeerURLExist,
+		ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted,
+		ErrorDesc(ErrGRPCMemberBadURLs):          ErrGRPCMemberBadURLs,
+		ErrorDesc(ErrGRPCMemberNotFound):         ErrGRPCMemberNotFound,
+
+		ErrorDesc(ErrGRPCRequestTooLarge):        ErrGRPCRequestTooLarge,
+		ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests,
+
+		ErrorDesc(ErrGRPCRootUserNotExist):     ErrGRPCRootUserNotExist,
+		ErrorDesc(ErrGRPCRootRoleNotExist):     ErrGRPCRootRoleNotExist,
+		ErrorDesc(ErrGRPCUserAlreadyExist):     ErrGRPCUserAlreadyExist,
+		ErrorDesc(ErrGRPCUserEmpty):            ErrGRPCUserEmpty,
+		ErrorDesc(ErrGRPCUserNotFound):         ErrGRPCUserNotFound,
+		ErrorDesc(ErrGRPCRoleAlreadyExist):     ErrGRPCRoleAlreadyExist,
+		ErrorDesc(ErrGRPCRoleNotFound):         ErrGRPCRoleNotFound,
+		ErrorDesc(ErrGRPCAuthFailed):           ErrGRPCAuthFailed,
+		ErrorDesc(ErrGRPCPermissionDenied):     ErrGRPCPermissionDenied,
+		ErrorDesc(ErrGRPCRoleNotGranted):       ErrGRPCRoleNotGranted,
+		ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
+		ErrorDesc(ErrGRPCAuthNotEnabled):       ErrGRPCAuthNotEnabled,
+		ErrorDesc(ErrGRPCInvalidAuthToken):     ErrGRPCInvalidAuthToken,
+		ErrorDesc(ErrGRPCInvalidAuthMgmt):      ErrGRPCInvalidAuthMgmt,
+
+		ErrorDesc(ErrGRPCNoLeader):                   ErrGRPCNoLeader,
+		ErrorDesc(ErrGRPCNotLeader):                  ErrGRPCNotLeader,
+		ErrorDesc(ErrGRPCNotCapable):                 ErrGRPCNotCapable,
+		ErrorDesc(ErrGRPCStopped):                    ErrGRPCStopped,
+		ErrorDesc(ErrGRPCTimeout):                    ErrGRPCTimeout,
+		ErrorDesc(ErrGRPCTimeoutDueToLeaderFail):     ErrGRPCTimeoutDueToLeaderFail,
+		ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost,
+		ErrorDesc(ErrGRPCUnhealthy):                  ErrGRPCUnhealthy,
+		ErrorDesc(ErrGRPCCorrupt):                    ErrGRPCCorrupt,
+	}
+)
+
+// client-side error
+var (
+	ErrEmptyKey      = Error(ErrGRPCEmptyKey)
+	ErrKeyNotFound   = Error(ErrGRPCKeyNotFound)
+	ErrValueProvided = Error(ErrGRPCValueProvided)
+	ErrLeaseProvided = Error(ErrGRPCLeaseProvided)
+	ErrTooManyOps    = Error(ErrGRPCTooManyOps)
+	ErrDuplicateKey  = Error(ErrGRPCDuplicateKey)
+	ErrCompacted     = Error(ErrGRPCCompacted)
+	ErrFutureRev     = Error(ErrGRPCFutureRev)
+	ErrNoSpace       = Error(ErrGRPCNoSpace)
+
+	ErrLeaseNotFound    = Error(ErrGRPCLeaseNotFound)
+	ErrLeaseExist       = Error(ErrGRPCLeaseExist)
+	ErrLeaseTTLTooLarge = Error(ErrGRPCLeaseTTLTooLarge)
+
+	ErrMemberExist            = Error(ErrGRPCMemberExist)
+	ErrPeerURLExist           = Error(ErrGRPCPeerURLExist)
+	ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted)
+	ErrMemberBadURLs          = Error(ErrGRPCMemberBadURLs)
+	ErrMemberNotFound         = Error(ErrGRPCMemberNotFound)
+
+	ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge)
+	ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests)
+
+	ErrRootUserNotExist     = Error(ErrGRPCRootUserNotExist)
+	ErrRootRoleNotExist     = Error(ErrGRPCRootRoleNotExist)
+	ErrUserAlreadyExist     = Error(ErrGRPCUserAlreadyExist)
+	ErrUserEmpty            = Error(ErrGRPCUserEmpty)
+	ErrUserNotFound         = Error(ErrGRPCUserNotFound)
+	ErrRoleAlreadyExist     = Error(ErrGRPCRoleAlreadyExist)
+	ErrRoleNotFound         = Error(ErrGRPCRoleNotFound)
+	ErrAuthFailed           = Error(ErrGRPCAuthFailed)
+	ErrPermissionDenied     = Error(ErrGRPCPermissionDenied)
+	ErrRoleNotGranted       = Error(ErrGRPCRoleNotGranted)
+	ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
+	ErrAuthNotEnabled       = Error(ErrGRPCAuthNotEnabled)
+	ErrInvalidAuthToken     = Error(ErrGRPCInvalidAuthToken)
+	ErrInvalidAuthMgmt      = Error(ErrGRPCInvalidAuthMgmt)
+
+	ErrNoLeader                   = Error(ErrGRPCNoLeader)
+	ErrNotLeader                  = Error(ErrGRPCNotLeader)
+	ErrLeaderChanged              = Error(ErrGRPCLeaderChanged)
+	ErrNotCapable                 = Error(ErrGRPCNotCapable)
+	ErrStopped                    = Error(ErrGRPCStopped)
+	ErrTimeout                    = Error(ErrGRPCTimeout)
+	ErrTimeoutDueToLeaderFail     = Error(ErrGRPCTimeoutDueToLeaderFail)
+	ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost)
+	ErrUnhealthy                  = Error(ErrGRPCUnhealthy)
+	ErrCorrupt                    = Error(ErrGRPCCorrupt)
+)
+
+// EtcdError defines gRPC server errors.
+// (https://github.com/grpc/grpc-go/blob/master/rpc_util.go#L319-L323)
+type EtcdError struct {
+	code codes.Code
+	desc string
+}
+
+// Code returns grpc/codes.Code.
+// TODO: define clientv3/codes.Code.
+func (e EtcdError) Code() codes.Code {
+	return e.code
+}
+
+func (e EtcdError) Error() string {
+	return e.desc
+}
+
+func Error(err error) error {
+	if err == nil {
+		return nil
+	}
+	verr, ok := errStringToError[ErrorDesc(err)]
+	if !ok { // not gRPC error
+		return err
+	}
+	ev, ok := status.FromError(verr)
+	var desc string
+	if ok {
+		desc = ev.Message()
+	} else {
+		desc = verr.Error()
+	}
+	return EtcdError{code: ev.Code(), desc: desc}
+}
+
+func ErrorDesc(err error) string {
+	if s, ok := status.FromError(err); ok {
+		return s.Message()
+	}
+	return err.Error()
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go
new file mode 100644
index 0000000..90b8b83
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go
@@ -0,0 +1,22 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpctypes
+
+var (
+	MetadataRequireLeaderKey = "hasleader"
+	MetadataHasLeader        = "true"
+
+	MetadataClientAPIVersionKey = "client-api-version"
+)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go
new file mode 100644
index 0000000..8f8ac60
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go
@@ -0,0 +1,20 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpctypes
+
+var (
+	TokenFieldNameGRPC    = "token"
+	TokenFieldNameSwagger = "authorization"
+)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go
new file mode 100644
index 0000000..12b6763
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go
@@ -0,0 +1,1034 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: etcdserver.proto
+
+package etcdserverpb
+
+import (
+	fmt "fmt"
+	io "io"
+	math "math"
+	math_bits "math/bits"
+
+	_ "github.com/gogo/protobuf/gogoproto"
+	proto "github.com/golang/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Request struct {
+	ID                   uint64   `protobuf:"varint,1,opt,name=ID" json:"ID"`
+	Method               string   `protobuf:"bytes,2,opt,name=Method" json:"Method"`
+	Path                 string   `protobuf:"bytes,3,opt,name=Path" json:"Path"`
+	Val                  string   `protobuf:"bytes,4,opt,name=Val" json:"Val"`
+	Dir                  bool     `protobuf:"varint,5,opt,name=Dir" json:"Dir"`
+	PrevValue            string   `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"`
+	PrevIndex            uint64   `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"`
+	PrevExist            *bool    `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"`
+	Expiration           int64    `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"`
+	Wait                 bool     `protobuf:"varint,10,opt,name=Wait" json:"Wait"`
+	Since                uint64   `protobuf:"varint,11,opt,name=Since" json:"Since"`
+	Recursive            bool     `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"`
+	Sorted               bool     `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"`
+	Quorum               bool     `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"`
+	Time                 int64    `protobuf:"varint,15,opt,name=Time" json:"Time"`
+	Stream               bool     `protobuf:"varint,16,opt,name=Stream" json:"Stream"`
+	Refresh              *bool    `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Request) Reset()         { *m = Request{} }
+func (m *Request) String() string { return proto.CompactTextString(m) }
+func (*Request) ProtoMessage()    {}
+func (*Request) Descriptor() ([]byte, []int) {
+	return fileDescriptor_09ffbeb3bebbce7e, []int{0}
+}
+func (m *Request) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Request.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Request) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Request.Merge(m, src)
+}
+func (m *Request) XXX_Size() int {
+	return m.Size()
+}
+func (m *Request) XXX_DiscardUnknown() {
+	xxx_messageInfo_Request.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Request proto.InternalMessageInfo
+
+type Metadata struct {
+	NodeID               uint64   `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"`
+	ClusterID            uint64   `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Metadata) Reset()         { *m = Metadata{} }
+func (m *Metadata) String() string { return proto.CompactTextString(m) }
+func (*Metadata) ProtoMessage()    {}
+func (*Metadata) Descriptor() ([]byte, []int) {
+	return fileDescriptor_09ffbeb3bebbce7e, []int{1}
+}
+func (m *Metadata) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Metadata) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Metadata.Merge(m, src)
+}
+func (m *Metadata) XXX_Size() int {
+	return m.Size()
+}
+func (m *Metadata) XXX_DiscardUnknown() {
+	xxx_messageInfo_Metadata.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Metadata proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterType((*Request)(nil), "etcdserverpb.Request")
+	proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata")
+}
+
+func init() { proto.RegisterFile("etcdserver.proto", fileDescriptor_09ffbeb3bebbce7e) }
+
+var fileDescriptor_09ffbeb3bebbce7e = []byte{
+	// 380 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30,
+	0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb,
+	0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58,
+	0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f,
+	0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79,
+	0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d,
+	0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a,
+	0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89,
+	0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93,
+	0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe,
+	0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c,
+	0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70,
+	0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab,
+	0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11,
+	0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7,
+	0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89,
+	0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82,
+	0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6,
+	0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63,
+	0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6,
+	0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff,
+	0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea,
+	0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f,
+	0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00,
+}
+
+func (m *Request) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Request) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Request) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Refresh != nil {
+		i--
+		if *m.Refresh {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0x88
+	}
+	i--
+	if m.Stream {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x1
+	i--
+	dAtA[i] = 0x80
+	i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time))
+	i--
+	dAtA[i] = 0x78
+	i--
+	if m.Quorum {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x70
+	i--
+	if m.Sorted {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x68
+	i--
+	if m.Recursive {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x60
+	i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since))
+	i--
+	dAtA[i] = 0x58
+	i--
+	if m.Wait {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x50
+	i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration))
+	i--
+	dAtA[i] = 0x48
+	if m.PrevExist != nil {
+		i--
+		if *m.PrevExist {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x40
+	}
+	i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex))
+	i--
+	dAtA[i] = 0x38
+	i -= len(m.PrevValue)
+	copy(dAtA[i:], m.PrevValue)
+	i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue)))
+	i--
+	dAtA[i] = 0x32
+	i--
+	if m.Dir {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x28
+	i -= len(m.Val)
+	copy(dAtA[i:], m.Val)
+	i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.Path)
+	copy(dAtA[i:], m.Path)
+	i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Method)
+	copy(dAtA[i:], m.Method)
+	i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method)))
+	i--
+	dAtA[i] = 0x12
+	i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *Metadata) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Metadata) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID))
+	i--
+	dAtA[i] = 0x10
+	i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int {
+	offset -= sovEtcdserver(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *Request) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovEtcdserver(uint64(m.ID))
+	l = len(m.Method)
+	n += 1 + l + sovEtcdserver(uint64(l))
+	l = len(m.Path)
+	n += 1 + l + sovEtcdserver(uint64(l))
+	l = len(m.Val)
+	n += 1 + l + sovEtcdserver(uint64(l))
+	n += 2
+	l = len(m.PrevValue)
+	n += 1 + l + sovEtcdserver(uint64(l))
+	n += 1 + sovEtcdserver(uint64(m.PrevIndex))
+	if m.PrevExist != nil {
+		n += 2
+	}
+	n += 1 + sovEtcdserver(uint64(m.Expiration))
+	n += 2
+	n += 1 + sovEtcdserver(uint64(m.Since))
+	n += 2
+	n += 2
+	n += 2
+	n += 1 + sovEtcdserver(uint64(m.Time))
+	n += 3
+	if m.Refresh != nil {
+		n += 3
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *Metadata) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovEtcdserver(uint64(m.NodeID))
+	n += 1 + sovEtcdserver(uint64(m.ClusterID))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func sovEtcdserver(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozEtcdserver(x uint64) (n int) {
+	return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Request) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowEtcdserver
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Request: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Method = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Path = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Val = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Dir", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Dir = bool(v != 0)
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PrevValue", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PrevValue = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PrevIndex", wireType)
+			}
+			m.PrevIndex = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.PrevIndex |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PrevExist", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.PrevExist = &b
+		case 9:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Expiration", wireType)
+			}
+			m.Expiration = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Expiration |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 10:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Wait", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Wait = bool(v != 0)
+		case 11:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType)
+			}
+			m.Since = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Since |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 12:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Recursive = bool(v != 0)
+		case 13:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Sorted", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Sorted = bool(v != 0)
+		case 14:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Quorum", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Quorum = bool(v != 0)
+		case 15:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType)
+			}
+			m.Time = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Time |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 16:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Stream = bool(v != 0)
+		case 17:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Refresh", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.Refresh = &b
+		default:
+			iNdEx = preIndex
+			skippy, err := skipEtcdserver(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Metadata) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowEtcdserver
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Metadata: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
+			}
+			m.NodeID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.NodeID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType)
+			}
+			m.ClusterID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ClusterID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipEtcdserver(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipEtcdserver(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowEtcdserver
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthEtcdserver
+			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthEtcdserver
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowEtcdserver
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipEtcdserver(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthEtcdserver
+				}
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowEtcdserver   = fmt.Errorf("proto: integer overflow")
+)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto
new file mode 100644
index 0000000..25e0aca
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto
@@ -0,0 +1,34 @@
+syntax = "proto2";
+package etcdserverpb;
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+
+message Request {
+	optional uint64 ID         =  1 [(gogoproto.nullable) = false];
+	optional string Method     =  2 [(gogoproto.nullable) = false];
+	optional string Path       =  3 [(gogoproto.nullable) = false];
+	optional string Val        =  4 [(gogoproto.nullable) = false];
+	optional bool   Dir        =  5 [(gogoproto.nullable) = false];
+	optional string PrevValue  =  6 [(gogoproto.nullable) = false];
+	optional uint64 PrevIndex  =  7 [(gogoproto.nullable) = false];
+	optional bool   PrevExist  =  8 [(gogoproto.nullable) = true];
+	optional int64  Expiration =  9 [(gogoproto.nullable) = false];
+	optional bool   Wait       = 10 [(gogoproto.nullable) = false];
+	optional uint64 Since      = 11 [(gogoproto.nullable) = false];
+	optional bool   Recursive  = 12 [(gogoproto.nullable) = false];
+	optional bool   Sorted     = 13 [(gogoproto.nullable) = false];
+	optional bool   Quorum     = 14 [(gogoproto.nullable) = false];
+	optional int64  Time       = 15 [(gogoproto.nullable) = false];
+	optional bool   Stream     = 16 [(gogoproto.nullable) = false];
+	optional bool   Refresh    = 17 [(gogoproto.nullable) = true];
+}
+
+message Metadata {
+	optional uint64 NodeID    = 1 [(gogoproto.nullable) = false];
+	optional uint64 ClusterID = 2 [(gogoproto.nullable) = false];
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go
new file mode 100644
index 0000000..b3a199e
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go
@@ -0,0 +1,2427 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: raft_internal.proto
+
+package etcdserverpb
+
+import (
+	fmt "fmt"
+	io "io"
+	math "math"
+	math_bits "math/bits"
+
+	_ "github.com/gogo/protobuf/gogoproto"
+	proto "github.com/golang/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type RequestHeader struct {
+	ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	// username is a username that is associated with an auth token of gRPC connection
+	Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"`
+	// auth_revision is a revision number of auth.authStore. It is not related to mvcc
+	AuthRevision         uint64   `protobuf:"varint,3,opt,name=auth_revision,json=authRevision,proto3" json:"auth_revision,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *RequestHeader) Reset()         { *m = RequestHeader{} }
+func (m *RequestHeader) String() string { return proto.CompactTextString(m) }
+func (*RequestHeader) ProtoMessage()    {}
+func (*RequestHeader) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b4c9a9be0cfca103, []int{0}
+}
+func (m *RequestHeader) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *RequestHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_RequestHeader.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *RequestHeader) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_RequestHeader.Merge(m, src)
+}
+func (m *RequestHeader) XXX_Size() int {
+	return m.Size()
+}
+func (m *RequestHeader) XXX_DiscardUnknown() {
+	xxx_messageInfo_RequestHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RequestHeader proto.InternalMessageInfo
+
+// An InternalRaftRequest is the union of all requests which can be
+// sent via raft.
+type InternalRaftRequest struct {
+	Header                   *RequestHeader                   `protobuf:"bytes,100,opt,name=header,proto3" json:"header,omitempty"`
+	ID                       uint64                           `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	V2                       *Request                         `protobuf:"bytes,2,opt,name=v2,proto3" json:"v2,omitempty"`
+	Range                    *RangeRequest                    `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"`
+	Put                      *PutRequest                      `protobuf:"bytes,4,opt,name=put,proto3" json:"put,omitempty"`
+	DeleteRange              *DeleteRangeRequest              `protobuf:"bytes,5,opt,name=delete_range,json=deleteRange,proto3" json:"delete_range,omitempty"`
+	Txn                      *TxnRequest                      `protobuf:"bytes,6,opt,name=txn,proto3" json:"txn,omitempty"`
+	Compaction               *CompactionRequest               `protobuf:"bytes,7,opt,name=compaction,proto3" json:"compaction,omitempty"`
+	LeaseGrant               *LeaseGrantRequest               `protobuf:"bytes,8,opt,name=lease_grant,json=leaseGrant,proto3" json:"lease_grant,omitempty"`
+	LeaseRevoke              *LeaseRevokeRequest              `protobuf:"bytes,9,opt,name=lease_revoke,json=leaseRevoke,proto3" json:"lease_revoke,omitempty"`
+	Alarm                    *AlarmRequest                    `protobuf:"bytes,10,opt,name=alarm,proto3" json:"alarm,omitempty"`
+	AuthEnable               *AuthEnableRequest               `protobuf:"bytes,1000,opt,name=auth_enable,json=authEnable,proto3" json:"auth_enable,omitempty"`
+	AuthDisable              *AuthDisableRequest              `protobuf:"bytes,1011,opt,name=auth_disable,json=authDisable,proto3" json:"auth_disable,omitempty"`
+	Authenticate             *InternalAuthenticateRequest     `protobuf:"bytes,1012,opt,name=authenticate,proto3" json:"authenticate,omitempty"`
+	AuthUserAdd              *AuthUserAddRequest              `protobuf:"bytes,1100,opt,name=auth_user_add,json=authUserAdd,proto3" json:"auth_user_add,omitempty"`
+	AuthUserDelete           *AuthUserDeleteRequest           `protobuf:"bytes,1101,opt,name=auth_user_delete,json=authUserDelete,proto3" json:"auth_user_delete,omitempty"`
+	AuthUserGet              *AuthUserGetRequest              `protobuf:"bytes,1102,opt,name=auth_user_get,json=authUserGet,proto3" json:"auth_user_get,omitempty"`
+	AuthUserChangePassword   *AuthUserChangePasswordRequest   `protobuf:"bytes,1103,opt,name=auth_user_change_password,json=authUserChangePassword,proto3" json:"auth_user_change_password,omitempty"`
+	AuthUserGrantRole        *AuthUserGrantRoleRequest        `protobuf:"bytes,1104,opt,name=auth_user_grant_role,json=authUserGrantRole,proto3" json:"auth_user_grant_role,omitempty"`
+	AuthUserRevokeRole       *AuthUserRevokeRoleRequest       `protobuf:"bytes,1105,opt,name=auth_user_revoke_role,json=authUserRevokeRole,proto3" json:"auth_user_revoke_role,omitempty"`
+	AuthUserList             *AuthUserListRequest             `protobuf:"bytes,1106,opt,name=auth_user_list,json=authUserList,proto3" json:"auth_user_list,omitempty"`
+	AuthRoleList             *AuthRoleListRequest             `protobuf:"bytes,1107,opt,name=auth_role_list,json=authRoleList,proto3" json:"auth_role_list,omitempty"`
+	AuthRoleAdd              *AuthRoleAddRequest              `protobuf:"bytes,1200,opt,name=auth_role_add,json=authRoleAdd,proto3" json:"auth_role_add,omitempty"`
+	AuthRoleDelete           *AuthRoleDeleteRequest           `protobuf:"bytes,1201,opt,name=auth_role_delete,json=authRoleDelete,proto3" json:"auth_role_delete,omitempty"`
+	AuthRoleGet              *AuthRoleGetRequest              `protobuf:"bytes,1202,opt,name=auth_role_get,json=authRoleGet,proto3" json:"auth_role_get,omitempty"`
+	AuthRoleGrantPermission  *AuthRoleGrantPermissionRequest  `protobuf:"bytes,1203,opt,name=auth_role_grant_permission,json=authRoleGrantPermission,proto3" json:"auth_role_grant_permission,omitempty"`
+	AuthRoleRevokePermission *AuthRoleRevokePermissionRequest `protobuf:"bytes,1204,opt,name=auth_role_revoke_permission,json=authRoleRevokePermission,proto3" json:"auth_role_revoke_permission,omitempty"`
+	XXX_NoUnkeyedLiteral     struct{}                         `json:"-"`
+	XXX_unrecognized         []byte                           `json:"-"`
+	XXX_sizecache            int32                            `json:"-"`
+}
+
+func (m *InternalRaftRequest) Reset()         { *m = InternalRaftRequest{} }
+func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) }
+func (*InternalRaftRequest) ProtoMessage()    {}
+func (*InternalRaftRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b4c9a9be0cfca103, []int{1}
+}
+func (m *InternalRaftRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *InternalRaftRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_InternalRaftRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *InternalRaftRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_InternalRaftRequest.Merge(m, src)
+}
+func (m *InternalRaftRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *InternalRaftRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_InternalRaftRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_InternalRaftRequest proto.InternalMessageInfo
+
+type EmptyResponse struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *EmptyResponse) Reset()         { *m = EmptyResponse{} }
+func (m *EmptyResponse) String() string { return proto.CompactTextString(m) }
+func (*EmptyResponse) ProtoMessage()    {}
+func (*EmptyResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b4c9a9be0cfca103, []int{2}
+}
+func (m *EmptyResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *EmptyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_EmptyResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *EmptyResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EmptyResponse.Merge(m, src)
+}
+func (m *EmptyResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *EmptyResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_EmptyResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EmptyResponse proto.InternalMessageInfo
+
+// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest?
+// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing.
+// For avoiding misusage the field, we have an internal version of AuthenticateRequest.
+type InternalAuthenticateRequest struct {
+	Name     string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+	// simple_token is generated in API layer (etcdserver/v3_server.go)
+	SimpleToken          string   `protobuf:"bytes,3,opt,name=simple_token,json=simpleToken,proto3" json:"simple_token,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *InternalAuthenticateRequest) Reset()         { *m = InternalAuthenticateRequest{} }
+func (m *InternalAuthenticateRequest) String() string { return proto.CompactTextString(m) }
+func (*InternalAuthenticateRequest) ProtoMessage()    {}
+func (*InternalAuthenticateRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b4c9a9be0cfca103, []int{3}
+}
+func (m *InternalAuthenticateRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *InternalAuthenticateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_InternalAuthenticateRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *InternalAuthenticateRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_InternalAuthenticateRequest.Merge(m, src)
+}
+func (m *InternalAuthenticateRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *InternalAuthenticateRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_InternalAuthenticateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_InternalAuthenticateRequest proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterType((*RequestHeader)(nil), "etcdserverpb.RequestHeader")
+	proto.RegisterType((*InternalRaftRequest)(nil), "etcdserverpb.InternalRaftRequest")
+	proto.RegisterType((*EmptyResponse)(nil), "etcdserverpb.EmptyResponse")
+	proto.RegisterType((*InternalAuthenticateRequest)(nil), "etcdserverpb.InternalAuthenticateRequest")
+}
+
+func init() { proto.RegisterFile("raft_internal.proto", fileDescriptor_b4c9a9be0cfca103) }
+
+var fileDescriptor_b4c9a9be0cfca103 = []byte{
+	// 840 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x96, 0xdb, 0x4e, 0xdb, 0x48,
+	0x18, 0xc7, 0x71, 0x38, 0x66, 0x12, 0xb2, 0xec, 0x00, 0xbb, 0xb3, 0x41, 0xca, 0x86, 0xa0, 0xdd,
+	0x65, 0x77, 0x5b, 0x5a, 0x85, 0x07, 0x68, 0x53, 0x82, 0x00, 0x09, 0x21, 0x64, 0x51, 0xa9, 0x52,
+	0x2f, 0xdc, 0x21, 0xfe, 0x48, 0x5c, 0x1c, 0xdb, 0x1d, 0x4f, 0x52, 0xfa, 0x26, 0x7d, 0x8c, 0x9e,
+	0x1e, 0x82, 0x8b, 0x1e, 0x68, 0xfb, 0x02, 0x2d, 0xbd, 0xe9, 0x55, 0x6f, 0xda, 0x07, 0xa8, 0xe6,
+	0x60, 0x3b, 0x4e, 0x1c, 0xee, 0xec, 0x6f, 0xfe, 0xdf, 0xef, 0xfb, 0x0f, 0xf3, 0x37, 0x13, 0xb4,
+	0xc8, 0xe8, 0x09, 0xb7, 0x1c, 0x8f, 0x03, 0xf3, 0xa8, 0xbb, 0x11, 0x30, 0x9f, 0xfb, 0xb8, 0x08,
+	0xbc, 0x65, 0x87, 0xc0, 0xfa, 0xc0, 0x82, 0xe3, 0xf2, 0x52, 0xdb, 0x6f, 0xfb, 0x72, 0xe1, 0x86,
+	0x78, 0x52, 0x9a, 0xf2, 0x42, 0xa2, 0xd1, 0x95, 0x3c, 0x0b, 0x5a, 0xea, 0xb1, 0xf6, 0x00, 0xcd,
+	0x9b, 0xf0, 0xa8, 0x07, 0x21, 0xdf, 0x05, 0x6a, 0x03, 0xc3, 0x25, 0x94, 0xdb, 0x6b, 0x12, 0xa3,
+	0x6a, 0xac, 0x4f, 0x99, 0xb9, 0xbd, 0x26, 0x2e, 0xa3, 0xb9, 0x5e, 0x28, 0x46, 0x76, 0x81, 0xe4,
+	0xaa, 0xc6, 0x7a, 0xde, 0x8c, 0xdf, 0xf1, 0x1a, 0x9a, 0xa7, 0x3d, 0xde, 0xb1, 0x18, 0xf4, 0x9d,
+	0xd0, 0xf1, 0x3d, 0x32, 0x29, 0xdb, 0x8a, 0xa2, 0x68, 0xea, 0x5a, 0xed, 0x5b, 0x09, 0x2d, 0xee,
+	0x69, 0xd7, 0x26, 0x3d, 0xe1, 0x7a, 0x1c, 0xde, 0x44, 0x33, 0x1d, 0x39, 0x92, 0xd8, 0x55, 0x63,
+	0xbd, 0x50, 0x5f, 0xd9, 0x18, 0xdc, 0xcb, 0x46, 0xca, 0x95, 0xa9, 0xa5, 0x23, 0xee, 0xfe, 0x42,
+	0xb9, 0x7e, 0x5d, 0xfa, 0x2a, 0xd4, 0x97, 0x33, 0x01, 0x66, 0xae, 0x5f, 0xc7, 0x37, 0xd1, 0x34,
+	0xa3, 0x5e, 0x1b, 0xa4, 0xc1, 0x42, 0xbd, 0x3c, 0xa4, 0x14, 0x4b, 0x91, 0x5c, 0x09, 0xf1, 0x7f,
+	0x68, 0x32, 0xe8, 0x71, 0x32, 0x25, 0xf5, 0x24, 0xad, 0x3f, 0xec, 0x45, 0x9b, 0x30, 0x85, 0x08,
+	0x6f, 0xa1, 0xa2, 0x0d, 0x2e, 0x70, 0xb0, 0xd4, 0x90, 0x69, 0xd9, 0x54, 0x4d, 0x37, 0x35, 0xa5,
+	0x22, 0x35, 0xaa, 0x60, 0x27, 0x35, 0x31, 0x90, 0x9f, 0x79, 0x64, 0x26, 0x6b, 0xe0, 0xd1, 0x99,
+	0x17, 0x0f, 0xe4, 0x67, 0x1e, 0xbe, 0x85, 0x50, 0xcb, 0xef, 0x06, 0xb4, 0xc5, 0xc5, 0x1f, 0x7d,
+	0x56, 0xb6, 0xfc, 0x99, 0x6e, 0xd9, 0x8a, 0xd7, 0xa3, 0xce, 0x81, 0x16, 0x7c, 0x1b, 0x15, 0x5c,
+	0xa0, 0x21, 0x58, 0x6d, 0x46, 0x3d, 0x4e, 0xe6, 0xb2, 0x08, 0xfb, 0x42, 0xb0, 0x23, 0xd6, 0x63,
+	0x82, 0x1b, 0x97, 0xc4, 0x9e, 0x15, 0x81, 0x41, 0xdf, 0x3f, 0x05, 0x92, 0xcf, 0xda, 0xb3, 0x44,
+	0x98, 0x52, 0x10, 0xef, 0xd9, 0x4d, 0x6a, 0xe2, 0x58, 0xa8, 0x4b, 0x59, 0x97, 0xa0, 0xac, 0x63,
+	0x69, 0x88, 0xa5, 0xf8, 0x58, 0xa4, 0x10, 0x37, 0x50, 0x41, 0x26, 0x0e, 0x3c, 0x7a, 0xec, 0x02,
+	0xf9, 0x9a, 0xb9, 0xf7, 0x46, 0x8f, 0x77, 0xb6, 0xa5, 0x20, 0x76, 0x4e, 0xe3, 0x12, 0x6e, 0x22,
+	0x99, 0x4f, 0xcb, 0x76, 0x42, 0xc9, 0xf8, 0x3e, 0x9b, 0x65, 0x5d, 0x30, 0x9a, 0x4a, 0x11, 0x5b,
+	0xa7, 0x49, 0x0d, 0x1f, 0x28, 0x0a, 0x78, 0xdc, 0x69, 0x51, 0x0e, 0xe4, 0x87, 0xa2, 0xfc, 0x9b,
+	0xa6, 0x44, 0xb9, 0x6f, 0x0c, 0x48, 0x23, 0x5c, 0xaa, 0x1f, 0x6f, 0xeb, 0x4f, 0x49, 0x7c, 0x5b,
+	0x16, 0xb5, 0x6d, 0xf2, 0x7a, 0x6e, 0x9c, 0xad, 0xbb, 0x21, 0xb0, 0x86, 0x6d, 0xa7, 0x6c, 0xe9,
+	0x1a, 0x3e, 0x40, 0x0b, 0x09, 0x46, 0xc5, 0x8b, 0xbc, 0x51, 0xa4, 0xb5, 0x6c, 0x92, 0xce, 0xa5,
+	0x86, 0x95, 0x68, 0xaa, 0x9c, 0xb6, 0xd5, 0x06, 0x4e, 0xde, 0x5e, 0x69, 0x6b, 0x07, 0xf8, 0x88,
+	0xad, 0x1d, 0xe0, 0xb8, 0x8d, 0xfe, 0x48, 0x30, 0xad, 0x8e, 0x08, 0xbc, 0x15, 0xd0, 0x30, 0x7c,
+	0xec, 0x33, 0x9b, 0xbc, 0x53, 0xc8, 0xff, 0xb3, 0x91, 0x5b, 0x52, 0x7d, 0xa8, 0xc5, 0x11, 0xfd,
+	0x37, 0x9a, 0xb9, 0x8c, 0xef, 0xa1, 0xa5, 0x01, 0xbf, 0x22, 0xa9, 0x16, 0xf3, 0x5d, 0x20, 0x17,
+	0x6a, 0xc6, 0xdf, 0x63, 0x6c, 0xcb, 0x94, 0xfb, 0xc9, 0x51, 0xff, 0x4a, 0x87, 0x57, 0xf0, 0x7d,
+	0xb4, 0x9c, 0x90, 0x55, 0xe8, 0x15, 0xfa, 0xbd, 0x42, 0xff, 0x93, 0x8d, 0xd6, 0xe9, 0x1f, 0x60,
+	0x63, 0x3a, 0xb2, 0x84, 0x77, 0x51, 0x29, 0x81, 0xbb, 0x4e, 0xc8, 0xc9, 0x07, 0x45, 0x5d, 0xcd,
+	0xa6, 0xee, 0x3b, 0x21, 0x4f, 0xe5, 0x28, 0x2a, 0xc6, 0x24, 0x61, 0x4d, 0x91, 0x3e, 0x8e, 0x25,
+	0x89, 0xd1, 0x23, 0xa4, 0xa8, 0x18, 0x1f, 0xbd, 0x24, 0x89, 0x44, 0x3e, 0xcb, 0x8f, 0x3b, 0x7a,
+	0xd1, 0x33, 0x9c, 0x48, 0x5d, 0x8b, 0x13, 0x29, 0x31, 0x3a, 0x91, 0xcf, 0xf3, 0xe3, 0x12, 0x29,
+	0xba, 0x32, 0x12, 0x99, 0x94, 0xd3, 0xb6, 0x44, 0x22, 0x5f, 0x5c, 0x69, 0x6b, 0x38, 0x91, 0xba,
+	0x86, 0x1f, 0xa2, 0xf2, 0x00, 0x46, 0x06, 0x25, 0x00, 0xd6, 0x75, 0x42, 0x79, 0x8f, 0xbd, 0x54,
+	0xcc, 0x6b, 0x63, 0x98, 0x42, 0x7e, 0x18, 0xab, 0x23, 0xfe, 0xef, 0x34, 0x7b, 0x1d, 0x77, 0xd1,
+	0x4a, 0x32, 0x4b, 0x47, 0x67, 0x60, 0xd8, 0x2b, 0x35, 0xec, 0x7a, 0xf6, 0x30, 0x95, 0x92, 0xd1,
+	0x69, 0x84, 0x8e, 0x11, 0xd4, 0x7e, 0x41, 0xf3, 0xdb, 0xdd, 0x80, 0x3f, 0x31, 0x21, 0x0c, 0x7c,
+	0x2f, 0x84, 0x5a, 0x80, 0x56, 0xae, 0xf8, 0x47, 0x84, 0x31, 0x9a, 0x92, 0xb7, 0xbb, 0x21, 0x6f,
+	0x77, 0xf9, 0x2c, 0x6e, 0xfd, 0xf8, 0xfb, 0xd4, 0xb7, 0x7e, 0xf4, 0x8e, 0x57, 0x51, 0x31, 0x74,
+	0xba, 0x81, 0x0b, 0x16, 0xf7, 0x4f, 0x41, 0x5d, 0xfa, 0x79, 0xb3, 0xa0, 0x6a, 0x47, 0xa2, 0x74,
+	0x67, 0xe9, 0xfc, 0x73, 0x65, 0xe2, 0xfc, 0xb2, 0x62, 0x5c, 0x5c, 0x56, 0x8c, 0x4f, 0x97, 0x15,
+	0xe3, 0xe9, 0x97, 0xca, 0xc4, 0xf1, 0x8c, 0xfc, 0xc9, 0xb1, 0xf9, 0x33, 0x00, 0x00, 0xff, 0xff,
+	0xa0, 0xbb, 0x20, 0x2c, 0xca, 0x08, 0x00, 0x00,
+}
+
+func (m *RequestHeader) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *RequestHeader) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RequestHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.AuthRevision != 0 {
+		i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRevision))
+		i--
+		dAtA[i] = 0x18
+	}
+	if len(m.Username) > 0 {
+		i -= len(m.Username)
+		copy(dAtA[i:], m.Username)
+		i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Username)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.ID != 0 {
+		i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *InternalRaftRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *InternalRaftRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *InternalRaftRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.AuthRoleRevokePermission != nil {
+		{
+			size, err := m.AuthRoleRevokePermission.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x4b
+		i--
+		dAtA[i] = 0xa2
+	}
+	if m.AuthRoleGrantPermission != nil {
+		{
+			size, err := m.AuthRoleGrantPermission.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x4b
+		i--
+		dAtA[i] = 0x9a
+	}
+	if m.AuthRoleGet != nil {
+		{
+			size, err := m.AuthRoleGet.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x4b
+		i--
+		dAtA[i] = 0x92
+	}
+	if m.AuthRoleDelete != nil {
+		{
+			size, err := m.AuthRoleDelete.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x4b
+		i--
+		dAtA[i] = 0x8a
+	}
+	if m.AuthRoleAdd != nil {
+		{
+			size, err := m.AuthRoleAdd.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x4b
+		i--
+		dAtA[i] = 0x82
+	}
+	if m.AuthRoleList != nil {
+		{
+			size, err := m.AuthRoleList.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x45
+		i--
+		dAtA[i] = 0x9a
+	}
+	if m.AuthUserList != nil {
+		{
+			size, err := m.AuthUserList.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x45
+		i--
+		dAtA[i] = 0x92
+	}
+	if m.AuthUserRevokeRole != nil {
+		{
+			size, err := m.AuthUserRevokeRole.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x45
+		i--
+		dAtA[i] = 0x8a
+	}
+	if m.AuthUserGrantRole != nil {
+		{
+			size, err := m.AuthUserGrantRole.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x45
+		i--
+		dAtA[i] = 0x82
+	}
+	if m.AuthUserChangePassword != nil {
+		{
+			size, err := m.AuthUserChangePassword.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x44
+		i--
+		dAtA[i] = 0xfa
+	}
+	if m.AuthUserGet != nil {
+		{
+			size, err := m.AuthUserGet.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x44
+		i--
+		dAtA[i] = 0xf2
+	}
+	if m.AuthUserDelete != nil {
+		{
+			size, err := m.AuthUserDelete.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x44
+		i--
+		dAtA[i] = 0xea
+	}
+	if m.AuthUserAdd != nil {
+		{
+			size, err := m.AuthUserAdd.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x44
+		i--
+		dAtA[i] = 0xe2
+	}
+	if m.Authenticate != nil {
+		{
+			size, err := m.Authenticate.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x3f
+		i--
+		dAtA[i] = 0xa2
+	}
+	if m.AuthDisable != nil {
+		{
+			size, err := m.AuthDisable.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x3f
+		i--
+		dAtA[i] = 0x9a
+	}
+	if m.AuthEnable != nil {
+		{
+			size, err := m.AuthEnable.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x3e
+		i--
+		dAtA[i] = 0xc2
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x6
+		i--
+		dAtA[i] = 0xa2
+	}
+	if m.Alarm != nil {
+		{
+			size, err := m.Alarm.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x52
+	}
+	if m.LeaseRevoke != nil {
+		{
+			size, err := m.LeaseRevoke.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x4a
+	}
+	if m.LeaseGrant != nil {
+		{
+			size, err := m.LeaseGrant.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x42
+	}
+	if m.Compaction != nil {
+		{
+			size, err := m.Compaction.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x3a
+	}
+	if m.Txn != nil {
+		{
+			size, err := m.Txn.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x32
+	}
+	if m.DeleteRange != nil {
+		{
+			size, err := m.DeleteRange.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x2a
+	}
+	if m.Put != nil {
+		{
+			size, err := m.Put.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	if m.Range != nil {
+		{
+			size, err := m.Range.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.V2 != nil {
+		{
+			size, err := m.V2.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.ID != 0 {
+		i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *EmptyResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *EmptyResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EmptyResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *InternalAuthenticateRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *InternalAuthenticateRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *InternalAuthenticateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.SimpleToken) > 0 {
+		i -= len(m.SimpleToken)
+		copy(dAtA[i:], m.SimpleToken)
+		i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.SimpleToken)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	if len(m.Password) > 0 {
+		i -= len(m.Password)
+		copy(dAtA[i:], m.Password)
+		i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Password)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintRaftInternal(dAtA []byte, offset int, v uint64) int {
+	offset -= sovRaftInternal(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *RequestHeader) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRaftInternal(uint64(m.ID))
+	}
+	l = len(m.Username)
+	if l > 0 {
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthRevision != 0 {
+		n += 1 + sovRaftInternal(uint64(m.AuthRevision))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *InternalRaftRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRaftInternal(uint64(m.ID))
+	}
+	if m.V2 != nil {
+		l = m.V2.Size()
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.Range != nil {
+		l = m.Range.Size()
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.Put != nil {
+		l = m.Put.Size()
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.DeleteRange != nil {
+		l = m.DeleteRange.Size()
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.Txn != nil {
+		l = m.Txn.Size()
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.Compaction != nil {
+		l = m.Compaction.Size()
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.LeaseGrant != nil {
+		l = m.LeaseGrant.Size()
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.LeaseRevoke != nil {
+		l = m.LeaseRevoke.Size()
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.Alarm != nil {
+		l = m.Alarm.Size()
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthEnable != nil {
+		l = m.AuthEnable.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthDisable != nil {
+		l = m.AuthDisable.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.Authenticate != nil {
+		l = m.Authenticate.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthUserAdd != nil {
+		l = m.AuthUserAdd.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthUserDelete != nil {
+		l = m.AuthUserDelete.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthUserGet != nil {
+		l = m.AuthUserGet.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthUserChangePassword != nil {
+		l = m.AuthUserChangePassword.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthUserGrantRole != nil {
+		l = m.AuthUserGrantRole.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthUserRevokeRole != nil {
+		l = m.AuthUserRevokeRole.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthUserList != nil {
+		l = m.AuthUserList.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthRoleList != nil {
+		l = m.AuthRoleList.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthRoleAdd != nil {
+		l = m.AuthRoleAdd.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthRoleDelete != nil {
+		l = m.AuthRoleDelete.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthRoleGet != nil {
+		l = m.AuthRoleGet.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthRoleGrantPermission != nil {
+		l = m.AuthRoleGrantPermission.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthRoleRevokePermission != nil {
+		l = m.AuthRoleRevokePermission.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *EmptyResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *InternalAuthenticateRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	l = len(m.Password)
+	if l > 0 {
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	l = len(m.SimpleToken)
+	if l > 0 {
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func sovRaftInternal(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozRaftInternal(x uint64) (n int) {
+	return sovRaftInternal(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *RequestHeader) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaftInternal
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: RequestHeader: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: RequestHeader: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Username = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthRevision", wireType)
+			}
+			m.AuthRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.AuthRevision |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaftInternal(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaftInternal
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: InternalRaftRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: InternalRaftRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field V2", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.V2 == nil {
+				m.V2 = &Request{}
+			}
+			if err := m.V2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Range == nil {
+				m.Range = &RangeRequest{}
+			}
+			if err := m.Range.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Put", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Put == nil {
+				m.Put = &PutRequest{}
+			}
+			if err := m.Put.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DeleteRange", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.DeleteRange == nil {
+				m.DeleteRange = &DeleteRangeRequest{}
+			}
+			if err := m.DeleteRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Txn", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Txn == nil {
+				m.Txn = &TxnRequest{}
+			}
+			if err := m.Txn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Compaction", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Compaction == nil {
+				m.Compaction = &CompactionRequest{}
+			}
+			if err := m.Compaction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LeaseGrant", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.LeaseGrant == nil {
+				m.LeaseGrant = &LeaseGrantRequest{}
+			}
+			if err := m.LeaseGrant.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LeaseRevoke", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.LeaseRevoke == nil {
+				m.LeaseRevoke = &LeaseRevokeRequest{}
+			}
+			if err := m.LeaseRevoke.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Alarm == nil {
+				m.Alarm = &AlarmRequest{}
+			}
+			if err := m.Alarm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 100:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &RequestHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1000:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthEnable", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthEnable == nil {
+				m.AuthEnable = &AuthEnableRequest{}
+			}
+			if err := m.AuthEnable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1011:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthDisable", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthDisable == nil {
+				m.AuthDisable = &AuthDisableRequest{}
+			}
+			if err := m.AuthDisable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1012:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Authenticate", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Authenticate == nil {
+				m.Authenticate = &InternalAuthenticateRequest{}
+			}
+			if err := m.Authenticate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1100:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserAdd", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthUserAdd == nil {
+				m.AuthUserAdd = &AuthUserAddRequest{}
+			}
+			if err := m.AuthUserAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1101:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserDelete", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthUserDelete == nil {
+				m.AuthUserDelete = &AuthUserDeleteRequest{}
+			}
+			if err := m.AuthUserDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1102:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGet", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthUserGet == nil {
+				m.AuthUserGet = &AuthUserGetRequest{}
+			}
+			if err := m.AuthUserGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1103:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserChangePassword", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthUserChangePassword == nil {
+				m.AuthUserChangePassword = &AuthUserChangePasswordRequest{}
+			}
+			if err := m.AuthUserChangePassword.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1104:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGrantRole", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthUserGrantRole == nil {
+				m.AuthUserGrantRole = &AuthUserGrantRoleRequest{}
+			}
+			if err := m.AuthUserGrantRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1105:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserRevokeRole", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthUserRevokeRole == nil {
+				m.AuthUserRevokeRole = &AuthUserRevokeRoleRequest{}
+			}
+			if err := m.AuthUserRevokeRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1106:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserList", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthUserList == nil {
+				m.AuthUserList = &AuthUserListRequest{}
+			}
+			if err := m.AuthUserList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1107:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleList", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthRoleList == nil {
+				m.AuthRoleList = &AuthRoleListRequest{}
+			}
+			if err := m.AuthRoleList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1200:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleAdd", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthRoleAdd == nil {
+				m.AuthRoleAdd = &AuthRoleAddRequest{}
+			}
+			if err := m.AuthRoleAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1201:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleDelete", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthRoleDelete == nil {
+				m.AuthRoleDelete = &AuthRoleDeleteRequest{}
+			}
+			if err := m.AuthRoleDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1202:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGet", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthRoleGet == nil {
+				m.AuthRoleGet = &AuthRoleGetRequest{}
+			}
+			if err := m.AuthRoleGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1203:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGrantPermission", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthRoleGrantPermission == nil {
+				m.AuthRoleGrantPermission = &AuthRoleGrantPermissionRequest{}
+			}
+			if err := m.AuthRoleGrantPermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1204:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleRevokePermission", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthRoleRevokePermission == nil {
+				m.AuthRoleRevokePermission = &AuthRoleRevokePermissionRequest{}
+			}
+			if err := m.AuthRoleRevokePermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaftInternal(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *EmptyResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaftInternal
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: EmptyResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: EmptyResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaftInternal(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaftInternal
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: InternalAuthenticateRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: InternalAuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Password = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SimpleToken", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.SimpleToken = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaftInternal(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipRaftInternal(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowRaftInternal
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthRaftInternal
+			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthRaftInternal
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowRaftInternal
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipRaftInternal(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthRaftInternal
+				}
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthRaftInternal = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowRaftInternal   = fmt.Errorf("proto: integer overflow")
+)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto
new file mode 100644
index 0000000..25d45d3
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto
@@ -0,0 +1,74 @@
+syntax = "proto3";
+package etcdserverpb;
+
+import "gogoproto/gogo.proto";
+import "etcdserver.proto";
+import "rpc.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+
+message RequestHeader {
+  uint64 ID = 1;
+  // username is a username that is associated with an auth token of gRPC connection
+  string username = 2;
+  // auth_revision is a revision number of auth.authStore. It is not related to mvcc
+  uint64 auth_revision = 3;
+}
+
+// An InternalRaftRequest is the union of all requests which can be
+// sent via raft.
+message InternalRaftRequest {
+  RequestHeader header = 100;
+  uint64 ID = 1;
+
+  Request v2 = 2;
+
+  RangeRequest range = 3;
+  PutRequest put = 4;
+  DeleteRangeRequest delete_range = 5;
+  TxnRequest txn = 6;
+  CompactionRequest compaction = 7;
+
+  LeaseGrantRequest lease_grant = 8;
+  LeaseRevokeRequest lease_revoke = 9;
+
+  AlarmRequest alarm = 10;
+
+  AuthEnableRequest auth_enable = 1000;
+  AuthDisableRequest auth_disable = 1011;
+
+  InternalAuthenticateRequest authenticate = 1012;
+
+  AuthUserAddRequest auth_user_add = 1100;
+  AuthUserDeleteRequest auth_user_delete = 1101;
+  AuthUserGetRequest auth_user_get = 1102;
+  AuthUserChangePasswordRequest auth_user_change_password = 1103;
+  AuthUserGrantRoleRequest auth_user_grant_role = 1104;
+  AuthUserRevokeRoleRequest auth_user_revoke_role = 1105;
+  AuthUserListRequest auth_user_list = 1106;
+  AuthRoleListRequest auth_role_list = 1107;
+
+  AuthRoleAddRequest auth_role_add = 1200;
+  AuthRoleDeleteRequest auth_role_delete = 1201;
+  AuthRoleGetRequest auth_role_get = 1202;
+  AuthRoleGrantPermissionRequest auth_role_grant_permission = 1203;
+  AuthRoleRevokePermissionRequest auth_role_revoke_permission = 1204;
+}
+
+message EmptyResponse {
+}
+
+// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest?
+// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing.
+// For avoiding misusage the field, we have an internal version of AuthenticateRequest.
+message InternalAuthenticateRequest {
+  string name = 1;
+  string password = 2;
+
+  // simple_token is generated in API layer (etcdserver/v3_server.go)
+  string simple_token = 3;
+}
+
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go
new file mode 100644
index 0000000..31e121e
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go
@@ -0,0 +1,183 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserverpb
+
+import (
+	"fmt"
+	"strings"
+
+	proto "github.com/golang/protobuf/proto"
+)
+
+// InternalRaftStringer implements custom proto Stringer:
+// redact password, replace value fields with value_size fields.
+type InternalRaftStringer struct {
+	Request *InternalRaftRequest
+}
+
+func (as *InternalRaftStringer) String() string {
+	switch {
+	case as.Request.LeaseGrant != nil:
+		return fmt.Sprintf("header:<%s> lease_grant:<ttl:%d-second id:%016x>",
+			as.Request.Header.String(),
+			as.Request.LeaseGrant.TTL,
+			as.Request.LeaseGrant.ID,
+		)
+	case as.Request.LeaseRevoke != nil:
+		return fmt.Sprintf("header:<%s> lease_revoke:<id:%016x>",
+			as.Request.Header.String(),
+			as.Request.LeaseRevoke.ID,
+		)
+	case as.Request.Authenticate != nil:
+		return fmt.Sprintf("header:<%s> authenticate:<name:%s simple_token:%s>",
+			as.Request.Header.String(),
+			as.Request.Authenticate.Name,
+			as.Request.Authenticate.SimpleToken,
+		)
+	case as.Request.AuthUserAdd != nil:
+		return fmt.Sprintf("header:<%s> auth_user_add:<name:%s>",
+			as.Request.Header.String(),
+			as.Request.AuthUserAdd.Name,
+		)
+	case as.Request.AuthUserChangePassword != nil:
+		return fmt.Sprintf("header:<%s> auth_user_change_password:<name:%s>",
+			as.Request.Header.String(),
+			as.Request.AuthUserChangePassword.Name,
+		)
+	case as.Request.Put != nil:
+		return fmt.Sprintf("header:<%s> put:<%s>",
+			as.Request.Header.String(),
+			NewLoggablePutRequest(as.Request.Put).String(),
+		)
+	case as.Request.Txn != nil:
+		return fmt.Sprintf("header:<%s> txn:<%s>",
+			as.Request.Header.String(),
+			NewLoggableTxnRequest(as.Request.Txn).String(),
+		)
+	default:
+		// nothing to redact
+	}
+	return as.Request.String()
+}
+
+// txnRequestStringer implements a custom proto String to replace value bytes fields with value size
+// fields in any nested txn and put operations.
+type txnRequestStringer struct {
+	Request *TxnRequest
+}
+
+func NewLoggableTxnRequest(request *TxnRequest) *txnRequestStringer {
+	return &txnRequestStringer{request}
+}
+
+func (as *txnRequestStringer) String() string {
+	var compare []string
+	for _, c := range as.Request.Compare {
+		switch cv := c.TargetUnion.(type) {
+		case *Compare_Value:
+			compare = append(compare, newLoggableValueCompare(c, cv).String())
+		default:
+			// nothing to redact
+			compare = append(compare, c.String())
+		}
+	}
+	var success []string
+	for _, s := range as.Request.Success {
+		success = append(success, newLoggableRequestOp(s).String())
+	}
+	var failure []string
+	for _, f := range as.Request.Failure {
+		failure = append(failure, newLoggableRequestOp(f).String())
+	}
+	return fmt.Sprintf("compare:<%s> success:<%s> failure:<%s>",
+		strings.Join(compare, " "),
+		strings.Join(success, " "),
+		strings.Join(failure, " "),
+	)
+}
+
+// requestOpStringer implements a custom proto String to replace value bytes fields with value
+// size fields in any nested txn and put operations.
+type requestOpStringer struct {
+	Op *RequestOp
+}
+
+func newLoggableRequestOp(op *RequestOp) *requestOpStringer {
+	return &requestOpStringer{op}
+}
+
+func (as *requestOpStringer) String() string {
+	switch op := as.Op.Request.(type) {
+	case *RequestOp_RequestPut:
+		return fmt.Sprintf("request_put:<%s>", NewLoggablePutRequest(op.RequestPut).String())
+	case *RequestOp_RequestTxn:
+		return fmt.Sprintf("request_txn:<%s>", NewLoggableTxnRequest(op.RequestTxn).String())
+	default:
+		// nothing to redact
+	}
+	return as.Op.String()
+}
+
+// loggableValueCompare implements a custom proto String for Compare.Value union member types to
+// replace the value bytes field with a value size field.
+// To preserve proto encoding of the key and range_end bytes, a faked out proto type is used here.
+type loggableValueCompare struct {
+	Result    Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult"`
+	Target    Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget"`
+	Key       []byte                `protobuf:"bytes,3,opt,name=key,proto3"`
+	ValueSize int64                 `protobuf:"varint,7,opt,name=value_size,proto3"`
+	RangeEnd  []byte                `protobuf:"bytes,64,opt,name=range_end,proto3"`
+}
+
+func newLoggableValueCompare(c *Compare, cv *Compare_Value) *loggableValueCompare {
+	return &loggableValueCompare{
+		c.Result,
+		c.Target,
+		c.Key,
+		int64(len(cv.Value)),
+		c.RangeEnd,
+	}
+}
+
+func (m *loggableValueCompare) Reset()         { *m = loggableValueCompare{} }
+func (m *loggableValueCompare) String() string { return proto.CompactTextString(m) }
+func (*loggableValueCompare) ProtoMessage()    {}
+
+// loggablePutRequest implements a custom proto String to replace value bytes field with a value
+// size field.
+// To preserve proto encoding of the key bytes, a faked out proto type is used here.
+type loggablePutRequest struct {
+	Key         []byte `protobuf:"bytes,1,opt,name=key,proto3"`
+	ValueSize   int64  `protobuf:"varint,2,opt,name=value_size,proto3"`
+	Lease       int64  `protobuf:"varint,3,opt,name=lease,proto3"`
+	PrevKv      bool   `protobuf:"varint,4,opt,name=prev_kv,proto3"`
+	IgnoreValue bool   `protobuf:"varint,5,opt,name=ignore_value,proto3"`
+	IgnoreLease bool   `protobuf:"varint,6,opt,name=ignore_lease,proto3"`
+}
+
+func NewLoggablePutRequest(request *PutRequest) *loggablePutRequest {
+	return &loggablePutRequest{
+		request.Key,
+		int64(len(request.Value)),
+		request.Lease,
+		request.PrevKv,
+		request.IgnoreValue,
+		request.IgnoreLease,
+	}
+}
+
+func (m *loggablePutRequest) Reset()         { *m = loggablePutRequest{} }
+func (m *loggablePutRequest) String() string { return proto.CompactTextString(m) }
+func (*loggablePutRequest) ProtoMessage()    {}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go
new file mode 100644
index 0000000..a0cff8f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go
@@ -0,0 +1,24010 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: rpc.proto
+
+package etcdserverpb
+
+import (
+	context "context"
+	fmt "fmt"
+	io "io"
+	math "math"
+	math_bits "math/bits"
+
+	authpb "github.com/coreos/etcd/auth/authpb"
+	mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
+	_ "github.com/gogo/protobuf/gogoproto"
+	proto "github.com/golang/protobuf/proto"
+	_ "google.golang.org/genproto/googleapis/api/annotations"
+	grpc "google.golang.org/grpc"
+	codes "google.golang.org/grpc/codes"
+	status "google.golang.org/grpc/status"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type AlarmType int32
+
+const (
+	AlarmType_NONE    AlarmType = 0
+	AlarmType_NOSPACE AlarmType = 1
+	AlarmType_CORRUPT AlarmType = 2
+)
+
+var AlarmType_name = map[int32]string{
+	0: "NONE",
+	1: "NOSPACE",
+	2: "CORRUPT",
+}
+
+var AlarmType_value = map[string]int32{
+	"NONE":    0,
+	"NOSPACE": 1,
+	"CORRUPT": 2,
+}
+
+func (x AlarmType) String() string {
+	return proto.EnumName(AlarmType_name, int32(x))
+}
+
+func (AlarmType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{0}
+}
+
+type RangeRequest_SortOrder int32
+
+const (
+	RangeRequest_NONE    RangeRequest_SortOrder = 0
+	RangeRequest_ASCEND  RangeRequest_SortOrder = 1
+	RangeRequest_DESCEND RangeRequest_SortOrder = 2
+)
+
+var RangeRequest_SortOrder_name = map[int32]string{
+	0: "NONE",
+	1: "ASCEND",
+	2: "DESCEND",
+}
+
+var RangeRequest_SortOrder_value = map[string]int32{
+	"NONE":    0,
+	"ASCEND":  1,
+	"DESCEND": 2,
+}
+
+func (x RangeRequest_SortOrder) String() string {
+	return proto.EnumName(RangeRequest_SortOrder_name, int32(x))
+}
+
+func (RangeRequest_SortOrder) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{1, 0}
+}
+
+type RangeRequest_SortTarget int32
+
+const (
+	RangeRequest_KEY     RangeRequest_SortTarget = 0
+	RangeRequest_VERSION RangeRequest_SortTarget = 1
+	RangeRequest_CREATE  RangeRequest_SortTarget = 2
+	RangeRequest_MOD     RangeRequest_SortTarget = 3
+	RangeRequest_VALUE   RangeRequest_SortTarget = 4
+)
+
+var RangeRequest_SortTarget_name = map[int32]string{
+	0: "KEY",
+	1: "VERSION",
+	2: "CREATE",
+	3: "MOD",
+	4: "VALUE",
+}
+
+var RangeRequest_SortTarget_value = map[string]int32{
+	"KEY":     0,
+	"VERSION": 1,
+	"CREATE":  2,
+	"MOD":     3,
+	"VALUE":   4,
+}
+
+func (x RangeRequest_SortTarget) String() string {
+	return proto.EnumName(RangeRequest_SortTarget_name, int32(x))
+}
+
+func (RangeRequest_SortTarget) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{1, 1}
+}
+
+type Compare_CompareResult int32
+
+const (
+	Compare_EQUAL     Compare_CompareResult = 0
+	Compare_GREATER   Compare_CompareResult = 1
+	Compare_LESS      Compare_CompareResult = 2
+	Compare_NOT_EQUAL Compare_CompareResult = 3
+)
+
+var Compare_CompareResult_name = map[int32]string{
+	0: "EQUAL",
+	1: "GREATER",
+	2: "LESS",
+	3: "NOT_EQUAL",
+}
+
+var Compare_CompareResult_value = map[string]int32{
+	"EQUAL":     0,
+	"GREATER":   1,
+	"LESS":      2,
+	"NOT_EQUAL": 3,
+}
+
+func (x Compare_CompareResult) String() string {
+	return proto.EnumName(Compare_CompareResult_name, int32(x))
+}
+
+func (Compare_CompareResult) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{9, 0}
+}
+
+type Compare_CompareTarget int32
+
+const (
+	Compare_VERSION Compare_CompareTarget = 0
+	Compare_CREATE  Compare_CompareTarget = 1
+	Compare_MOD     Compare_CompareTarget = 2
+	Compare_VALUE   Compare_CompareTarget = 3
+	Compare_LEASE   Compare_CompareTarget = 4
+)
+
+var Compare_CompareTarget_name = map[int32]string{
+	0: "VERSION",
+	1: "CREATE",
+	2: "MOD",
+	3: "VALUE",
+	4: "LEASE",
+}
+
+var Compare_CompareTarget_value = map[string]int32{
+	"VERSION": 0,
+	"CREATE":  1,
+	"MOD":     2,
+	"VALUE":   3,
+	"LEASE":   4,
+}
+
+func (x Compare_CompareTarget) String() string {
+	return proto.EnumName(Compare_CompareTarget_name, int32(x))
+}
+
+func (Compare_CompareTarget) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{9, 1}
+}
+
+type WatchCreateRequest_FilterType int32
+
+const (
+	// filter out put event.
+	WatchCreateRequest_NOPUT WatchCreateRequest_FilterType = 0
+	// filter out delete event.
+	WatchCreateRequest_NODELETE WatchCreateRequest_FilterType = 1
+)
+
+var WatchCreateRequest_FilterType_name = map[int32]string{
+	0: "NOPUT",
+	1: "NODELETE",
+}
+
+var WatchCreateRequest_FilterType_value = map[string]int32{
+	"NOPUT":    0,
+	"NODELETE": 1,
+}
+
+func (x WatchCreateRequest_FilterType) String() string {
+	return proto.EnumName(WatchCreateRequest_FilterType_name, int32(x))
+}
+
+func (WatchCreateRequest_FilterType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{21, 0}
+}
+
+type AlarmRequest_AlarmAction int32
+
+const (
+	AlarmRequest_GET        AlarmRequest_AlarmAction = 0
+	AlarmRequest_ACTIVATE   AlarmRequest_AlarmAction = 1
+	AlarmRequest_DEACTIVATE AlarmRequest_AlarmAction = 2
+)
+
+var AlarmRequest_AlarmAction_name = map[int32]string{
+	0: "GET",
+	1: "ACTIVATE",
+	2: "DEACTIVATE",
+}
+
+var AlarmRequest_AlarmAction_value = map[string]int32{
+	"GET":        0,
+	"ACTIVATE":   1,
+	"DEACTIVATE": 2,
+}
+
+func (x AlarmRequest_AlarmAction) String() string {
+	return proto.EnumName(AlarmRequest_AlarmAction_name, int32(x))
+}
+
+func (AlarmRequest_AlarmAction) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{49, 0}
+}
+
+type ResponseHeader struct {
+	// cluster_id is the ID of the cluster which sent the response.
+	ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
+	// member_id is the ID of the member which sent the response.
+	MemberId uint64 `protobuf:"varint,2,opt,name=member_id,json=memberId,proto3" json:"member_id,omitempty"`
+	// revision is the key-value store revision when the request was applied.
+	// For watch progress responses, the header.revision indicates progress. All future events
+	// recieved in this stream are guaranteed to have a higher revision number than the
+	// header.revision number.
+	Revision int64 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"`
+	// raft_term is the raft term when the request was applied.
+	RaftTerm             uint64   `protobuf:"varint,4,opt,name=raft_term,json=raftTerm,proto3" json:"raft_term,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ResponseHeader) Reset()         { *m = ResponseHeader{} }
+func (m *ResponseHeader) String() string { return proto.CompactTextString(m) }
+func (*ResponseHeader) ProtoMessage()    {}
+func (*ResponseHeader) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{0}
+}
+func (m *ResponseHeader) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResponseHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ResponseHeader.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ResponseHeader) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResponseHeader.Merge(m, src)
+}
+func (m *ResponseHeader) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResponseHeader) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResponseHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResponseHeader proto.InternalMessageInfo
+
+func (m *ResponseHeader) GetClusterId() uint64 {
+	if m != nil {
+		return m.ClusterId
+	}
+	return 0
+}
+
+func (m *ResponseHeader) GetMemberId() uint64 {
+	if m != nil {
+		return m.MemberId
+	}
+	return 0
+}
+
+func (m *ResponseHeader) GetRevision() int64 {
+	if m != nil {
+		return m.Revision
+	}
+	return 0
+}
+
+func (m *ResponseHeader) GetRaftTerm() uint64 {
+	if m != nil {
+		return m.RaftTerm
+	}
+	return 0
+}
+
+type RangeRequest struct {
+	// key is the first key for the range. If range_end is not given, the request only looks up key.
+	Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	// range_end is the upper bound on the requested range [key, range_end).
+	// If range_end is '\0', the range is all keys >= key.
+	// If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
+	// then the range request gets all keys prefixed with key.
+	// If both key and range_end are '\0', then the range request returns all keys.
+	RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+	// limit is a limit on the number of keys returned for the request. When limit is set to 0,
+	// it is treated as no limit.
+	Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"`
+	// revision is the point-in-time of the key-value store to use for the range.
+	// If revision is less or equal to zero, the range is over the newest key-value store.
+	// If the revision has been compacted, ErrCompacted is returned as a response.
+	Revision int64 `protobuf:"varint,4,opt,name=revision,proto3" json:"revision,omitempty"`
+	// sort_order is the order for returned sorted results.
+	SortOrder RangeRequest_SortOrder `protobuf:"varint,5,opt,name=sort_order,json=sortOrder,proto3,enum=etcdserverpb.RangeRequest_SortOrder" json:"sort_order,omitempty"`
+	// sort_target is the key-value field to use for sorting.
+	SortTarget RangeRequest_SortTarget `protobuf:"varint,6,opt,name=sort_target,json=sortTarget,proto3,enum=etcdserverpb.RangeRequest_SortTarget" json:"sort_target,omitempty"`
+	// serializable sets the range request to use serializable member-local reads.
+	// Range requests are linearizable by default; linearizable requests have higher
+	// latency and lower throughput than serializable requests but reflect the current
+	// consensus of the cluster. For better performance, in exchange for possible stale reads,
+	// a serializable range request is served locally without needing to reach consensus
+	// with other nodes in the cluster.
+	Serializable bool `protobuf:"varint,7,opt,name=serializable,proto3" json:"serializable,omitempty"`
+	// keys_only when set returns only the keys and not the values.
+	KeysOnly bool `protobuf:"varint,8,opt,name=keys_only,json=keysOnly,proto3" json:"keys_only,omitempty"`
+	// count_only when set returns only the count of the keys in the range.
+	CountOnly bool `protobuf:"varint,9,opt,name=count_only,json=countOnly,proto3" json:"count_only,omitempty"`
+	// min_mod_revision is the lower bound for returned key mod revisions; all keys with
+	// lesser mod revisions will be filtered away.
+	MinModRevision int64 `protobuf:"varint,10,opt,name=min_mod_revision,json=minModRevision,proto3" json:"min_mod_revision,omitempty"`
+	// max_mod_revision is the upper bound for returned key mod revisions; all keys with
+	// greater mod revisions will be filtered away.
+	MaxModRevision int64 `protobuf:"varint,11,opt,name=max_mod_revision,json=maxModRevision,proto3" json:"max_mod_revision,omitempty"`
+	// min_create_revision is the lower bound for returned key create revisions; all keys with
+	// lesser create trevisions will be filtered away.
+	MinCreateRevision int64 `protobuf:"varint,12,opt,name=min_create_revision,json=minCreateRevision,proto3" json:"min_create_revision,omitempty"`
+	// max_create_revision is the upper bound for returned key create revisions; all keys with
+	// greater create revisions will be filtered away.
+	MaxCreateRevision    int64    `protobuf:"varint,13,opt,name=max_create_revision,json=maxCreateRevision,proto3" json:"max_create_revision,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *RangeRequest) Reset()         { *m = RangeRequest{} }
+func (m *RangeRequest) String() string { return proto.CompactTextString(m) }
+func (*RangeRequest) ProtoMessage()    {}
+func (*RangeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{1}
+}
+func (m *RangeRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *RangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_RangeRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *RangeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_RangeRequest.Merge(m, src)
+}
+func (m *RangeRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *RangeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_RangeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RangeRequest proto.InternalMessageInfo
+
+func (m *RangeRequest) GetKey() []byte {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *RangeRequest) GetRangeEnd() []byte {
+	if m != nil {
+		return m.RangeEnd
+	}
+	return nil
+}
+
+func (m *RangeRequest) GetLimit() int64 {
+	if m != nil {
+		return m.Limit
+	}
+	return 0
+}
+
+func (m *RangeRequest) GetRevision() int64 {
+	if m != nil {
+		return m.Revision
+	}
+	return 0
+}
+
+func (m *RangeRequest) GetSortOrder() RangeRequest_SortOrder {
+	if m != nil {
+		return m.SortOrder
+	}
+	return RangeRequest_NONE
+}
+
+func (m *RangeRequest) GetSortTarget() RangeRequest_SortTarget {
+	if m != nil {
+		return m.SortTarget
+	}
+	return RangeRequest_KEY
+}
+
+func (m *RangeRequest) GetSerializable() bool {
+	if m != nil {
+		return m.Serializable
+	}
+	return false
+}
+
+func (m *RangeRequest) GetKeysOnly() bool {
+	if m != nil {
+		return m.KeysOnly
+	}
+	return false
+}
+
+func (m *RangeRequest) GetCountOnly() bool {
+	if m != nil {
+		return m.CountOnly
+	}
+	return false
+}
+
+func (m *RangeRequest) GetMinModRevision() int64 {
+	if m != nil {
+		return m.MinModRevision
+	}
+	return 0
+}
+
+func (m *RangeRequest) GetMaxModRevision() int64 {
+	if m != nil {
+		return m.MaxModRevision
+	}
+	return 0
+}
+
+func (m *RangeRequest) GetMinCreateRevision() int64 {
+	if m != nil {
+		return m.MinCreateRevision
+	}
+	return 0
+}
+
+func (m *RangeRequest) GetMaxCreateRevision() int64 {
+	if m != nil {
+		return m.MaxCreateRevision
+	}
+	return 0
+}
+
+type RangeResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// kvs is the list of key-value pairs matched by the range request.
+	// kvs is empty when count is requested.
+	Kvs []*mvccpb.KeyValue `protobuf:"bytes,2,rep,name=kvs,proto3" json:"kvs,omitempty"`
+	// more indicates if there are more keys to return in the requested range.
+	More bool `protobuf:"varint,3,opt,name=more,proto3" json:"more,omitempty"`
+	// count is set to the number of keys within the range when requested.
+	Count                int64    `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *RangeResponse) Reset()         { *m = RangeResponse{} }
+func (m *RangeResponse) String() string { return proto.CompactTextString(m) }
+func (*RangeResponse) ProtoMessage()    {}
+func (*RangeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{2}
+}
+func (m *RangeResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *RangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_RangeResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *RangeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_RangeResponse.Merge(m, src)
+}
+func (m *RangeResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *RangeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_RangeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RangeResponse proto.InternalMessageInfo
+
+func (m *RangeResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *RangeResponse) GetKvs() []*mvccpb.KeyValue {
+	if m != nil {
+		return m.Kvs
+	}
+	return nil
+}
+
+func (m *RangeResponse) GetMore() bool {
+	if m != nil {
+		return m.More
+	}
+	return false
+}
+
+func (m *RangeResponse) GetCount() int64 {
+	if m != nil {
+		return m.Count
+	}
+	return 0
+}
+
+type PutRequest struct {
+	// key is the key, in bytes, to put into the key-value store.
+	Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	// value is the value, in bytes, to associate with the key in the key-value store.
+	Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+	// lease is the lease ID to associate with the key in the key-value store. A lease
+	// value of 0 indicates no lease.
+	Lease int64 `protobuf:"varint,3,opt,name=lease,proto3" json:"lease,omitempty"`
+	// If prev_kv is set, etcd gets the previous key-value pair before changing it.
+	// The previous key-value pair will be returned in the put response.
+	PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
+	// If ignore_value is set, etcd updates the key using its current value.
+	// Returns an error if the key does not exist.
+	IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,json=ignoreValue,proto3" json:"ignore_value,omitempty"`
+	// If ignore_lease is set, etcd updates the key using its current lease.
+	// Returns an error if the key does not exist.
+	IgnoreLease          bool     `protobuf:"varint,6,opt,name=ignore_lease,json=ignoreLease,proto3" json:"ignore_lease,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *PutRequest) Reset()         { *m = PutRequest{} }
+func (m *PutRequest) String() string { return proto.CompactTextString(m) }
+func (*PutRequest) ProtoMessage()    {}
+func (*PutRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{3}
+}
+func (m *PutRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *PutRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PutRequest.Merge(m, src)
+}
+func (m *PutRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *PutRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_PutRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PutRequest proto.InternalMessageInfo
+
+func (m *PutRequest) GetKey() []byte {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *PutRequest) GetValue() []byte {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func (m *PutRequest) GetLease() int64 {
+	if m != nil {
+		return m.Lease
+	}
+	return 0
+}
+
+func (m *PutRequest) GetPrevKv() bool {
+	if m != nil {
+		return m.PrevKv
+	}
+	return false
+}
+
+func (m *PutRequest) GetIgnoreValue() bool {
+	if m != nil {
+		return m.IgnoreValue
+	}
+	return false
+}
+
+func (m *PutRequest) GetIgnoreLease() bool {
+	if m != nil {
+		return m.IgnoreLease
+	}
+	return false
+}
+
+type PutResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// if prev_kv is set in the request, the previous key-value pair will be returned.
+	PrevKv               *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *PutResponse) Reset()         { *m = PutResponse{} }
+func (m *PutResponse) String() string { return proto.CompactTextString(m) }
+func (*PutResponse) ProtoMessage()    {}
+func (*PutResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{4}
+}
+func (m *PutResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *PutResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PutResponse.Merge(m, src)
+}
+func (m *PutResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *PutResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_PutResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PutResponse proto.InternalMessageInfo
+
+func (m *PutResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *PutResponse) GetPrevKv() *mvccpb.KeyValue {
+	if m != nil {
+		return m.PrevKv
+	}
+	return nil
+}
+
+type DeleteRangeRequest struct {
+	// key is the first key to delete in the range.
+	Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	// range_end is the key following the last key to delete for the range [key, range_end).
+	// If range_end is not given, the range is defined to contain only the key argument.
+	// If range_end is one bit larger than the given key, then the range is all the keys
+	// with the prefix (the given key).
+	// If range_end is '\0', the range is all keys greater than or equal to the key argument.
+	RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+	// If prev_kv is set, etcd gets the previous key-value pairs before deleting it.
+	// The previous key-value pairs will be returned in the delete response.
+	PrevKv               bool     `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DeleteRangeRequest) Reset()         { *m = DeleteRangeRequest{} }
+func (m *DeleteRangeRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteRangeRequest) ProtoMessage()    {}
+func (*DeleteRangeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{5}
+}
+func (m *DeleteRangeRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeleteRangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DeleteRangeRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DeleteRangeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeleteRangeRequest.Merge(m, src)
+}
+func (m *DeleteRangeRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeleteRangeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeleteRangeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteRangeRequest proto.InternalMessageInfo
+
+func (m *DeleteRangeRequest) GetKey() []byte {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *DeleteRangeRequest) GetRangeEnd() []byte {
+	if m != nil {
+		return m.RangeEnd
+	}
+	return nil
+}
+
+func (m *DeleteRangeRequest) GetPrevKv() bool {
+	if m != nil {
+		return m.PrevKv
+	}
+	return false
+}
+
+type DeleteRangeResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// deleted is the number of keys deleted by the delete range request.
+	Deleted int64 `protobuf:"varint,2,opt,name=deleted,proto3" json:"deleted,omitempty"`
+	// if prev_kv is set in the request, the previous key-value pairs will be returned.
+	PrevKvs              []*mvccpb.KeyValue `protobuf:"bytes,3,rep,name=prev_kvs,json=prevKvs,proto3" json:"prev_kvs,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
+	XXX_unrecognized     []byte             `json:"-"`
+	XXX_sizecache        int32              `json:"-"`
+}
+
+func (m *DeleteRangeResponse) Reset()         { *m = DeleteRangeResponse{} }
+func (m *DeleteRangeResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteRangeResponse) ProtoMessage()    {}
+func (*DeleteRangeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{6}
+}
+func (m *DeleteRangeResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeleteRangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DeleteRangeResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DeleteRangeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeleteRangeResponse.Merge(m, src)
+}
+func (m *DeleteRangeResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeleteRangeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeleteRangeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteRangeResponse proto.InternalMessageInfo
+
+func (m *DeleteRangeResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *DeleteRangeResponse) GetDeleted() int64 {
+	if m != nil {
+		return m.Deleted
+	}
+	return 0
+}
+
+func (m *DeleteRangeResponse) GetPrevKvs() []*mvccpb.KeyValue {
+	if m != nil {
+		return m.PrevKvs
+	}
+	return nil
+}
+
+type RequestOp struct {
+	// request is a union of request types accepted by a transaction.
+	//
+	// Types that are valid to be assigned to Request:
+	//	*RequestOp_RequestRange
+	//	*RequestOp_RequestPut
+	//	*RequestOp_RequestDeleteRange
+	//	*RequestOp_RequestTxn
+	Request              isRequestOp_Request `protobuf_oneof:"request"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *RequestOp) Reset()         { *m = RequestOp{} }
+func (m *RequestOp) String() string { return proto.CompactTextString(m) }
+func (*RequestOp) ProtoMessage()    {}
+func (*RequestOp) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{7}
+}
+func (m *RequestOp) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *RequestOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_RequestOp.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *RequestOp) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_RequestOp.Merge(m, src)
+}
+func (m *RequestOp) XXX_Size() int {
+	return m.Size()
+}
+func (m *RequestOp) XXX_DiscardUnknown() {
+	xxx_messageInfo_RequestOp.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RequestOp proto.InternalMessageInfo
+
+type isRequestOp_Request interface {
+	isRequestOp_Request()
+	MarshalTo([]byte) (int, error)
+	Size() int
+}
+
+type RequestOp_RequestRange struct {
+	RequestRange *RangeRequest `protobuf:"bytes,1,opt,name=request_range,json=requestRange,proto3,oneof"`
+}
+type RequestOp_RequestPut struct {
+	RequestPut *PutRequest `protobuf:"bytes,2,opt,name=request_put,json=requestPut,proto3,oneof"`
+}
+type RequestOp_RequestDeleteRange struct {
+	RequestDeleteRange *DeleteRangeRequest `protobuf:"bytes,3,opt,name=request_delete_range,json=requestDeleteRange,proto3,oneof"`
+}
+type RequestOp_RequestTxn struct {
+	RequestTxn *TxnRequest `protobuf:"bytes,4,opt,name=request_txn,json=requestTxn,proto3,oneof"`
+}
+
+func (*RequestOp_RequestRange) isRequestOp_Request()       {}
+func (*RequestOp_RequestPut) isRequestOp_Request()         {}
+func (*RequestOp_RequestDeleteRange) isRequestOp_Request() {}
+func (*RequestOp_RequestTxn) isRequestOp_Request()         {}
+
+func (m *RequestOp) GetRequest() isRequestOp_Request {
+	if m != nil {
+		return m.Request
+	}
+	return nil
+}
+
+func (m *RequestOp) GetRequestRange() *RangeRequest {
+	if x, ok := m.GetRequest().(*RequestOp_RequestRange); ok {
+		return x.RequestRange
+	}
+	return nil
+}
+
+func (m *RequestOp) GetRequestPut() *PutRequest {
+	if x, ok := m.GetRequest().(*RequestOp_RequestPut); ok {
+		return x.RequestPut
+	}
+	return nil
+}
+
+func (m *RequestOp) GetRequestDeleteRange() *DeleteRangeRequest {
+	if x, ok := m.GetRequest().(*RequestOp_RequestDeleteRange); ok {
+		return x.RequestDeleteRange
+	}
+	return nil
+}
+
+func (m *RequestOp) GetRequestTxn() *TxnRequest {
+	if x, ok := m.GetRequest().(*RequestOp_RequestTxn); ok {
+		return x.RequestTxn
+	}
+	return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*RequestOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+	return _RequestOp_OneofMarshaler, _RequestOp_OneofUnmarshaler, _RequestOp_OneofSizer, []interface{}{
+		(*RequestOp_RequestRange)(nil),
+		(*RequestOp_RequestPut)(nil),
+		(*RequestOp_RequestDeleteRange)(nil),
+		(*RequestOp_RequestTxn)(nil),
+	}
+}
+
+func _RequestOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+	m := msg.(*RequestOp)
+	// request
+	switch x := m.Request.(type) {
+	case *RequestOp_RequestRange:
+		_ = b.EncodeVarint(1<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.RequestRange); err != nil {
+			return err
+		}
+	case *RequestOp_RequestPut:
+		_ = b.EncodeVarint(2<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.RequestPut); err != nil {
+			return err
+		}
+	case *RequestOp_RequestDeleteRange:
+		_ = b.EncodeVarint(3<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.RequestDeleteRange); err != nil {
+			return err
+		}
+	case *RequestOp_RequestTxn:
+		_ = b.EncodeVarint(4<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.RequestTxn); err != nil {
+			return err
+		}
+	case nil:
+	default:
+		return fmt.Errorf("RequestOp.Request has unexpected type %T", x)
+	}
+	return nil
+}
+
+func _RequestOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+	m := msg.(*RequestOp)
+	switch tag {
+	case 1: // request.request_range
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(RangeRequest)
+		err := b.DecodeMessage(msg)
+		m.Request = &RequestOp_RequestRange{msg}
+		return true, err
+	case 2: // request.request_put
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(PutRequest)
+		err := b.DecodeMessage(msg)
+		m.Request = &RequestOp_RequestPut{msg}
+		return true, err
+	case 3: // request.request_delete_range
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(DeleteRangeRequest)
+		err := b.DecodeMessage(msg)
+		m.Request = &RequestOp_RequestDeleteRange{msg}
+		return true, err
+	case 4: // request.request_txn
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(TxnRequest)
+		err := b.DecodeMessage(msg)
+		m.Request = &RequestOp_RequestTxn{msg}
+		return true, err
+	default:
+		return false, nil
+	}
+}
+
+func _RequestOp_OneofSizer(msg proto.Message) (n int) {
+	m := msg.(*RequestOp)
+	// request
+	switch x := m.Request.(type) {
+	case *RequestOp_RequestRange:
+		s := proto.Size(x.RequestRange)
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *RequestOp_RequestPut:
+		s := proto.Size(x.RequestPut)
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *RequestOp_RequestDeleteRange:
+		s := proto.Size(x.RequestDeleteRange)
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *RequestOp_RequestTxn:
+		s := proto.Size(x.RequestTxn)
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case nil:
+	default:
+		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+	}
+	return n
+}
+
+type ResponseOp struct {
+	// response is a union of response types returned by a transaction.
+	//
+	// Types that are valid to be assigned to Response:
+	//	*ResponseOp_ResponseRange
+	//	*ResponseOp_ResponsePut
+	//	*ResponseOp_ResponseDeleteRange
+	//	*ResponseOp_ResponseTxn
+	Response             isResponseOp_Response `protobuf_oneof:"response"`
+	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
+	XXX_unrecognized     []byte                `json:"-"`
+	XXX_sizecache        int32                 `json:"-"`
+}
+
+func (m *ResponseOp) Reset()         { *m = ResponseOp{} }
+func (m *ResponseOp) String() string { return proto.CompactTextString(m) }
+func (*ResponseOp) ProtoMessage()    {}
+func (*ResponseOp) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{8}
+}
+func (m *ResponseOp) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResponseOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ResponseOp.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ResponseOp) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResponseOp.Merge(m, src)
+}
+func (m *ResponseOp) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResponseOp) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResponseOp.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResponseOp proto.InternalMessageInfo
+
+type isResponseOp_Response interface {
+	isResponseOp_Response()
+	MarshalTo([]byte) (int, error)
+	Size() int
+}
+
+type ResponseOp_ResponseRange struct {
+	ResponseRange *RangeResponse `protobuf:"bytes,1,opt,name=response_range,json=responseRange,proto3,oneof"`
+}
+type ResponseOp_ResponsePut struct {
+	ResponsePut *PutResponse `protobuf:"bytes,2,opt,name=response_put,json=responsePut,proto3,oneof"`
+}
+type ResponseOp_ResponseDeleteRange struct {
+	ResponseDeleteRange *DeleteRangeResponse `protobuf:"bytes,3,opt,name=response_delete_range,json=responseDeleteRange,proto3,oneof"`
+}
+type ResponseOp_ResponseTxn struct {
+	ResponseTxn *TxnResponse `protobuf:"bytes,4,opt,name=response_txn,json=responseTxn,proto3,oneof"`
+}
+
+func (*ResponseOp_ResponseRange) isResponseOp_Response()       {}
+func (*ResponseOp_ResponsePut) isResponseOp_Response()         {}
+func (*ResponseOp_ResponseDeleteRange) isResponseOp_Response() {}
+func (*ResponseOp_ResponseTxn) isResponseOp_Response()         {}
+
+func (m *ResponseOp) GetResponse() isResponseOp_Response {
+	if m != nil {
+		return m.Response
+	}
+	return nil
+}
+
+func (m *ResponseOp) GetResponseRange() *RangeResponse {
+	if x, ok := m.GetResponse().(*ResponseOp_ResponseRange); ok {
+		return x.ResponseRange
+	}
+	return nil
+}
+
+func (m *ResponseOp) GetResponsePut() *PutResponse {
+	if x, ok := m.GetResponse().(*ResponseOp_ResponsePut); ok {
+		return x.ResponsePut
+	}
+	return nil
+}
+
+func (m *ResponseOp) GetResponseDeleteRange() *DeleteRangeResponse {
+	if x, ok := m.GetResponse().(*ResponseOp_ResponseDeleteRange); ok {
+		return x.ResponseDeleteRange
+	}
+	return nil
+}
+
+func (m *ResponseOp) GetResponseTxn() *TxnResponse {
+	if x, ok := m.GetResponse().(*ResponseOp_ResponseTxn); ok {
+		return x.ResponseTxn
+	}
+	return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*ResponseOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+	return _ResponseOp_OneofMarshaler, _ResponseOp_OneofUnmarshaler, _ResponseOp_OneofSizer, []interface{}{
+		(*ResponseOp_ResponseRange)(nil),
+		(*ResponseOp_ResponsePut)(nil),
+		(*ResponseOp_ResponseDeleteRange)(nil),
+		(*ResponseOp_ResponseTxn)(nil),
+	}
+}
+
+func _ResponseOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+	m := msg.(*ResponseOp)
+	// response
+	switch x := m.Response.(type) {
+	case *ResponseOp_ResponseRange:
+		_ = b.EncodeVarint(1<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.ResponseRange); err != nil {
+			return err
+		}
+	case *ResponseOp_ResponsePut:
+		_ = b.EncodeVarint(2<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.ResponsePut); err != nil {
+			return err
+		}
+	case *ResponseOp_ResponseDeleteRange:
+		_ = b.EncodeVarint(3<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.ResponseDeleteRange); err != nil {
+			return err
+		}
+	case *ResponseOp_ResponseTxn:
+		_ = b.EncodeVarint(4<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.ResponseTxn); err != nil {
+			return err
+		}
+	case nil:
+	default:
+		return fmt.Errorf("ResponseOp.Response has unexpected type %T", x)
+	}
+	return nil
+}
+
+func _ResponseOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+	m := msg.(*ResponseOp)
+	switch tag {
+	case 1: // response.response_range
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(RangeResponse)
+		err := b.DecodeMessage(msg)
+		m.Response = &ResponseOp_ResponseRange{msg}
+		return true, err
+	case 2: // response.response_put
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(PutResponse)
+		err := b.DecodeMessage(msg)
+		m.Response = &ResponseOp_ResponsePut{msg}
+		return true, err
+	case 3: // response.response_delete_range
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(DeleteRangeResponse)
+		err := b.DecodeMessage(msg)
+		m.Response = &ResponseOp_ResponseDeleteRange{msg}
+		return true, err
+	case 4: // response.response_txn
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(TxnResponse)
+		err := b.DecodeMessage(msg)
+		m.Response = &ResponseOp_ResponseTxn{msg}
+		return true, err
+	default:
+		return false, nil
+	}
+}
+
+func _ResponseOp_OneofSizer(msg proto.Message) (n int) {
+	m := msg.(*ResponseOp)
+	// response
+	switch x := m.Response.(type) {
+	case *ResponseOp_ResponseRange:
+		s := proto.Size(x.ResponseRange)
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *ResponseOp_ResponsePut:
+		s := proto.Size(x.ResponsePut)
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *ResponseOp_ResponseDeleteRange:
+		s := proto.Size(x.ResponseDeleteRange)
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *ResponseOp_ResponseTxn:
+		s := proto.Size(x.ResponseTxn)
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case nil:
+	default:
+		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+	}
+	return n
+}
+
+type Compare struct {
+	// result is logical comparison operation for this comparison.
+	Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult" json:"result,omitempty"`
+	// target is the key-value field to inspect for the comparison.
+	Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget" json:"target,omitempty"`
+	// key is the subject key for the comparison operation.
+	Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
+	// Types that are valid to be assigned to TargetUnion:
+	//	*Compare_Version
+	//	*Compare_CreateRevision
+	//	*Compare_ModRevision
+	//	*Compare_Value
+	//	*Compare_Lease
+	TargetUnion isCompare_TargetUnion `protobuf_oneof:"target_union"`
+	// range_end compares the given target to all keys in the range [key, range_end).
+	// See RangeRequest for more details on key ranges.
+	RangeEnd             []byte   `protobuf:"bytes,64,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Compare) Reset()         { *m = Compare{} }
+func (m *Compare) String() string { return proto.CompactTextString(m) }
+func (*Compare) ProtoMessage()    {}
+func (*Compare) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{9}
+}
+func (m *Compare) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Compare) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Compare.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Compare) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Compare.Merge(m, src)
+}
+func (m *Compare) XXX_Size() int {
+	return m.Size()
+}
+func (m *Compare) XXX_DiscardUnknown() {
+	xxx_messageInfo_Compare.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Compare proto.InternalMessageInfo
+
+type isCompare_TargetUnion interface {
+	isCompare_TargetUnion()
+	MarshalTo([]byte) (int, error)
+	Size() int
+}
+
+type Compare_Version struct {
+	Version int64 `protobuf:"varint,4,opt,name=version,proto3,oneof"`
+}
+type Compare_CreateRevision struct {
+	CreateRevision int64 `protobuf:"varint,5,opt,name=create_revision,json=createRevision,proto3,oneof"`
+}
+type Compare_ModRevision struct {
+	ModRevision int64 `protobuf:"varint,6,opt,name=mod_revision,json=modRevision,proto3,oneof"`
+}
+type Compare_Value struct {
+	Value []byte `protobuf:"bytes,7,opt,name=value,proto3,oneof"`
+}
+type Compare_Lease struct {
+	Lease int64 `protobuf:"varint,8,opt,name=lease,proto3,oneof"`
+}
+
+func (*Compare_Version) isCompare_TargetUnion()        {}
+func (*Compare_CreateRevision) isCompare_TargetUnion() {}
+func (*Compare_ModRevision) isCompare_TargetUnion()    {}
+func (*Compare_Value) isCompare_TargetUnion()          {}
+func (*Compare_Lease) isCompare_TargetUnion()          {}
+
+func (m *Compare) GetTargetUnion() isCompare_TargetUnion {
+	if m != nil {
+		return m.TargetUnion
+	}
+	return nil
+}
+
+func (m *Compare) GetResult() Compare_CompareResult {
+	if m != nil {
+		return m.Result
+	}
+	return Compare_EQUAL
+}
+
+func (m *Compare) GetTarget() Compare_CompareTarget {
+	if m != nil {
+		return m.Target
+	}
+	return Compare_VERSION
+}
+
+func (m *Compare) GetKey() []byte {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *Compare) GetVersion() int64 {
+	if x, ok := m.GetTargetUnion().(*Compare_Version); ok {
+		return x.Version
+	}
+	return 0
+}
+
+func (m *Compare) GetCreateRevision() int64 {
+	if x, ok := m.GetTargetUnion().(*Compare_CreateRevision); ok {
+		return x.CreateRevision
+	}
+	return 0
+}
+
+func (m *Compare) GetModRevision() int64 {
+	if x, ok := m.GetTargetUnion().(*Compare_ModRevision); ok {
+		return x.ModRevision
+	}
+	return 0
+}
+
+func (m *Compare) GetValue() []byte {
+	if x, ok := m.GetTargetUnion().(*Compare_Value); ok {
+		return x.Value
+	}
+	return nil
+}
+
+func (m *Compare) GetLease() int64 {
+	if x, ok := m.GetTargetUnion().(*Compare_Lease); ok {
+		return x.Lease
+	}
+	return 0
+}
+
+func (m *Compare) GetRangeEnd() []byte {
+	if m != nil {
+		return m.RangeEnd
+	}
+	return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Compare) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+	return _Compare_OneofMarshaler, _Compare_OneofUnmarshaler, _Compare_OneofSizer, []interface{}{
+		(*Compare_Version)(nil),
+		(*Compare_CreateRevision)(nil),
+		(*Compare_ModRevision)(nil),
+		(*Compare_Value)(nil),
+		(*Compare_Lease)(nil),
+	}
+}
+
+func _Compare_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+	m := msg.(*Compare)
+	// target_union
+	switch x := m.TargetUnion.(type) {
+	case *Compare_Version:
+		_ = b.EncodeVarint(4<<3 | proto.WireVarint)
+		_ = b.EncodeVarint(uint64(x.Version))
+	case *Compare_CreateRevision:
+		_ = b.EncodeVarint(5<<3 | proto.WireVarint)
+		_ = b.EncodeVarint(uint64(x.CreateRevision))
+	case *Compare_ModRevision:
+		_ = b.EncodeVarint(6<<3 | proto.WireVarint)
+		_ = b.EncodeVarint(uint64(x.ModRevision))
+	case *Compare_Value:
+		_ = b.EncodeVarint(7<<3 | proto.WireBytes)
+		_ = b.EncodeRawBytes(x.Value)
+	case *Compare_Lease:
+		_ = b.EncodeVarint(8<<3 | proto.WireVarint)
+		_ = b.EncodeVarint(uint64(x.Lease))
+	case nil:
+	default:
+		return fmt.Errorf("Compare.TargetUnion has unexpected type %T", x)
+	}
+	return nil
+}
+
+func _Compare_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+	m := msg.(*Compare)
+	switch tag {
+	case 4: // target_union.version
+		if wire != proto.WireVarint {
+			return true, proto.ErrInternalBadWireType
+		}
+		x, err := b.DecodeVarint()
+		m.TargetUnion = &Compare_Version{int64(x)}
+		return true, err
+	case 5: // target_union.create_revision
+		if wire != proto.WireVarint {
+			return true, proto.ErrInternalBadWireType
+		}
+		x, err := b.DecodeVarint()
+		m.TargetUnion = &Compare_CreateRevision{int64(x)}
+		return true, err
+	case 6: // target_union.mod_revision
+		if wire != proto.WireVarint {
+			return true, proto.ErrInternalBadWireType
+		}
+		x, err := b.DecodeVarint()
+		m.TargetUnion = &Compare_ModRevision{int64(x)}
+		return true, err
+	case 7: // target_union.value
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		x, err := b.DecodeRawBytes(true)
+		m.TargetUnion = &Compare_Value{x}
+		return true, err
+	case 8: // target_union.lease
+		if wire != proto.WireVarint {
+			return true, proto.ErrInternalBadWireType
+		}
+		x, err := b.DecodeVarint()
+		m.TargetUnion = &Compare_Lease{int64(x)}
+		return true, err
+	default:
+		return false, nil
+	}
+}
+
+func _Compare_OneofSizer(msg proto.Message) (n int) {
+	m := msg.(*Compare)
+	// target_union
+	switch x := m.TargetUnion.(type) {
+	case *Compare_Version:
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(x.Version))
+	case *Compare_CreateRevision:
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(x.CreateRevision))
+	case *Compare_ModRevision:
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(x.ModRevision))
+	case *Compare_Value:
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(len(x.Value)))
+		n += len(x.Value)
+	case *Compare_Lease:
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(x.Lease))
+	case nil:
+	default:
+		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+	}
+	return n
+}
+
+// From google paxosdb paper:
+// Our implementation hinges around a powerful primitive which we call MultiOp. All other database
+// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically
+// and consists of three components:
+// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check
+// for the absence or presence of a value, or compare with a given value. Two different tests in the guard
+// may apply to the same or different entries in the database. All tests in the guard are applied and
+// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise
+// it executes f op (see item 3 below).
+// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or
+// lookup operation, and applies to a single database entry. Two different operations in the list may apply
+// to the same or different entries in the database. These operations are executed
+// if guard evaluates to
+// true.
+// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.
+type TxnRequest struct {
+	// compare is a list of predicates representing a conjunction of terms.
+	// If the comparisons succeed, then the success requests will be processed in order,
+	// and the response will contain their respective responses in order.
+	// If the comparisons fail, then the failure requests will be processed in order,
+	// and the response will contain their respective responses in order.
+	Compare []*Compare `protobuf:"bytes,1,rep,name=compare,proto3" json:"compare,omitempty"`
+	// success is a list of requests which will be applied when compare evaluates to true.
+	Success []*RequestOp `protobuf:"bytes,2,rep,name=success,proto3" json:"success,omitempty"`
+	// failure is a list of requests which will be applied when compare evaluates to false.
+	Failure              []*RequestOp `protobuf:"bytes,3,rep,name=failure,proto3" json:"failure,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
+}
+
+func (m *TxnRequest) Reset()         { *m = TxnRequest{} }
+func (m *TxnRequest) String() string { return proto.CompactTextString(m) }
+func (*TxnRequest) ProtoMessage()    {}
+func (*TxnRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{10}
+}
+func (m *TxnRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TxnRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_TxnRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *TxnRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TxnRequest.Merge(m, src)
+}
+func (m *TxnRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *TxnRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_TxnRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TxnRequest proto.InternalMessageInfo
+
+func (m *TxnRequest) GetCompare() []*Compare {
+	if m != nil {
+		return m.Compare
+	}
+	return nil
+}
+
+func (m *TxnRequest) GetSuccess() []*RequestOp {
+	if m != nil {
+		return m.Success
+	}
+	return nil
+}
+
+func (m *TxnRequest) GetFailure() []*RequestOp {
+	if m != nil {
+		return m.Failure
+	}
+	return nil
+}
+
+type TxnResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// succeeded is set to true if the compare evaluated to true or false otherwise.
+	Succeeded bool `protobuf:"varint,2,opt,name=succeeded,proto3" json:"succeeded,omitempty"`
+	// responses is a list of responses corresponding to the results from applying
+	// success if succeeded is true or failure if succeeded is false.
+	Responses            []*ResponseOp `protobuf:"bytes,3,rep,name=responses,proto3" json:"responses,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *TxnResponse) Reset()         { *m = TxnResponse{} }
+func (m *TxnResponse) String() string { return proto.CompactTextString(m) }
+func (*TxnResponse) ProtoMessage()    {}
+func (*TxnResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{11}
+}
+func (m *TxnResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TxnResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_TxnResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *TxnResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TxnResponse.Merge(m, src)
+}
+func (m *TxnResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *TxnResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_TxnResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TxnResponse proto.InternalMessageInfo
+
+func (m *TxnResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *TxnResponse) GetSucceeded() bool {
+	if m != nil {
+		return m.Succeeded
+	}
+	return false
+}
+
+func (m *TxnResponse) GetResponses() []*ResponseOp {
+	if m != nil {
+		return m.Responses
+	}
+	return nil
+}
+
+// CompactionRequest compacts the key-value store up to a given revision. All superseded keys
+// with a revision less than the compaction revision will be removed.
+type CompactionRequest struct {
+	// revision is the key-value store revision for the compaction operation.
+	Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"`
+	// physical is set so the RPC will wait until the compaction is physically
+	// applied to the local database such that compacted entries are totally
+	// removed from the backend database.
+	Physical             bool     `protobuf:"varint,2,opt,name=physical,proto3" json:"physical,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *CompactionRequest) Reset()         { *m = CompactionRequest{} }
+func (m *CompactionRequest) String() string { return proto.CompactTextString(m) }
+func (*CompactionRequest) ProtoMessage()    {}
+func (*CompactionRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{12}
+}
+func (m *CompactionRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CompactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_CompactionRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *CompactionRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CompactionRequest.Merge(m, src)
+}
+func (m *CompactionRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *CompactionRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_CompactionRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompactionRequest proto.InternalMessageInfo
+
+func (m *CompactionRequest) GetRevision() int64 {
+	if m != nil {
+		return m.Revision
+	}
+	return 0
+}
+
+func (m *CompactionRequest) GetPhysical() bool {
+	if m != nil {
+		return m.Physical
+	}
+	return false
+}
+
+type CompactionResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *CompactionResponse) Reset()         { *m = CompactionResponse{} }
+func (m *CompactionResponse) String() string { return proto.CompactTextString(m) }
+func (*CompactionResponse) ProtoMessage()    {}
+func (*CompactionResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{13}
+}
+func (m *CompactionResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CompactionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_CompactionResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *CompactionResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CompactionResponse.Merge(m, src)
+}
+func (m *CompactionResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *CompactionResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_CompactionResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompactionResponse proto.InternalMessageInfo
+
+func (m *CompactionResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type HashRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *HashRequest) Reset()         { *m = HashRequest{} }
+func (m *HashRequest) String() string { return proto.CompactTextString(m) }
+func (*HashRequest) ProtoMessage()    {}
+func (*HashRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{14}
+}
+func (m *HashRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *HashRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_HashRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *HashRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HashRequest.Merge(m, src)
+}
+func (m *HashRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *HashRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_HashRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HashRequest proto.InternalMessageInfo
+
+type HashKVRequest struct {
+	// revision is the key-value store revision for the hash operation.
+	Revision             int64    `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *HashKVRequest) Reset()         { *m = HashKVRequest{} }
+func (m *HashKVRequest) String() string { return proto.CompactTextString(m) }
+func (*HashKVRequest) ProtoMessage()    {}
+func (*HashKVRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{15}
+}
+func (m *HashKVRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *HashKVRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_HashKVRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *HashKVRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HashKVRequest.Merge(m, src)
+}
+func (m *HashKVRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *HashKVRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_HashKVRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HashKVRequest proto.InternalMessageInfo
+
+func (m *HashKVRequest) GetRevision() int64 {
+	if m != nil {
+		return m.Revision
+	}
+	return 0
+}
+
+type HashKVResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// hash is the hash value computed from the responding member's MVCC keys up to a given revision.
+	Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"`
+	// compact_revision is the compacted revision of key-value store when hash begins.
+	CompactRevision      int64    `protobuf:"varint,3,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *HashKVResponse) Reset()         { *m = HashKVResponse{} }
+func (m *HashKVResponse) String() string { return proto.CompactTextString(m) }
+func (*HashKVResponse) ProtoMessage()    {}
+func (*HashKVResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{16}
+}
+func (m *HashKVResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *HashKVResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_HashKVResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *HashKVResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HashKVResponse.Merge(m, src)
+}
+func (m *HashKVResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *HashKVResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_HashKVResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HashKVResponse proto.InternalMessageInfo
+
+func (m *HashKVResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *HashKVResponse) GetHash() uint32 {
+	if m != nil {
+		return m.Hash
+	}
+	return 0
+}
+
+func (m *HashKVResponse) GetCompactRevision() int64 {
+	if m != nil {
+		return m.CompactRevision
+	}
+	return 0
+}
+
+type HashResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// hash is the hash value computed from the responding member's KV's backend.
+	Hash                 uint32   `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *HashResponse) Reset()         { *m = HashResponse{} }
+func (m *HashResponse) String() string { return proto.CompactTextString(m) }
+func (*HashResponse) ProtoMessage()    {}
+func (*HashResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{17}
+}
+func (m *HashResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *HashResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_HashResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *HashResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HashResponse.Merge(m, src)
+}
+func (m *HashResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *HashResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_HashResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HashResponse proto.InternalMessageInfo
+
+func (m *HashResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *HashResponse) GetHash() uint32 {
+	if m != nil {
+		return m.Hash
+	}
+	return 0
+}
+
+type SnapshotRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *SnapshotRequest) Reset()         { *m = SnapshotRequest{} }
+func (m *SnapshotRequest) String() string { return proto.CompactTextString(m) }
+func (*SnapshotRequest) ProtoMessage()    {}
+func (*SnapshotRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{18}
+}
+func (m *SnapshotRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *SnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_SnapshotRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *SnapshotRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SnapshotRequest.Merge(m, src)
+}
+func (m *SnapshotRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *SnapshotRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_SnapshotRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SnapshotRequest proto.InternalMessageInfo
+
+type SnapshotResponse struct {
+	// header has the current key-value store information. The first header in the snapshot
+	// stream indicates the point in time of the snapshot.
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// remaining_bytes is the number of blob bytes to be sent after this message
+	RemainingBytes uint64 `protobuf:"varint,2,opt,name=remaining_bytes,json=remainingBytes,proto3" json:"remaining_bytes,omitempty"`
+	// blob contains the next chunk of the snapshot in the snapshot stream.
+	Blob                 []byte   `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *SnapshotResponse) Reset()         { *m = SnapshotResponse{} }
+func (m *SnapshotResponse) String() string { return proto.CompactTextString(m) }
+func (*SnapshotResponse) ProtoMessage()    {}
+func (*SnapshotResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{19}
+}
+func (m *SnapshotResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *SnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_SnapshotResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *SnapshotResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SnapshotResponse.Merge(m, src)
+}
+func (m *SnapshotResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *SnapshotResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_SnapshotResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SnapshotResponse proto.InternalMessageInfo
+
+func (m *SnapshotResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *SnapshotResponse) GetRemainingBytes() uint64 {
+	if m != nil {
+		return m.RemainingBytes
+	}
+	return 0
+}
+
+func (m *SnapshotResponse) GetBlob() []byte {
+	if m != nil {
+		return m.Blob
+	}
+	return nil
+}
+
+type WatchRequest struct {
+	// request_union is a request to either create a new watcher or cancel an existing watcher.
+	//
+	// Types that are valid to be assigned to RequestUnion:
+	//	*WatchRequest_CreateRequest
+	//	*WatchRequest_CancelRequest
+	//	*WatchRequest_ProgressRequest
+	RequestUnion         isWatchRequest_RequestUnion `protobuf_oneof:"request_union"`
+	XXX_NoUnkeyedLiteral struct{}                    `json:"-"`
+	XXX_unrecognized     []byte                      `json:"-"`
+	XXX_sizecache        int32                       `json:"-"`
+}
+
+func (m *WatchRequest) Reset()         { *m = WatchRequest{} }
+func (m *WatchRequest) String() string { return proto.CompactTextString(m) }
+func (*WatchRequest) ProtoMessage()    {}
+func (*WatchRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{20}
+}
+func (m *WatchRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *WatchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_WatchRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *WatchRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WatchRequest.Merge(m, src)
+}
+func (m *WatchRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *WatchRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_WatchRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WatchRequest proto.InternalMessageInfo
+
+type isWatchRequest_RequestUnion interface {
+	isWatchRequest_RequestUnion()
+	MarshalTo([]byte) (int, error)
+	Size() int
+}
+
+type WatchRequest_CreateRequest struct {
+	CreateRequest *WatchCreateRequest `protobuf:"bytes,1,opt,name=create_request,json=createRequest,proto3,oneof"`
+}
+type WatchRequest_CancelRequest struct {
+	CancelRequest *WatchCancelRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,proto3,oneof"`
+}
+type WatchRequest_ProgressRequest struct {
+	ProgressRequest *WatchProgressRequest `protobuf:"bytes,3,opt,name=progress_request,json=progressRequest,proto3,oneof"`
+}
+
+func (*WatchRequest_CreateRequest) isWatchRequest_RequestUnion()   {}
+func (*WatchRequest_CancelRequest) isWatchRequest_RequestUnion()   {}
+func (*WatchRequest_ProgressRequest) isWatchRequest_RequestUnion() {}
+
+func (m *WatchRequest) GetRequestUnion() isWatchRequest_RequestUnion {
+	if m != nil {
+		return m.RequestUnion
+	}
+	return nil
+}
+
+func (m *WatchRequest) GetCreateRequest() *WatchCreateRequest {
+	if x, ok := m.GetRequestUnion().(*WatchRequest_CreateRequest); ok {
+		return x.CreateRequest
+	}
+	return nil
+}
+
+func (m *WatchRequest) GetCancelRequest() *WatchCancelRequest {
+	if x, ok := m.GetRequestUnion().(*WatchRequest_CancelRequest); ok {
+		return x.CancelRequest
+	}
+	return nil
+}
+
+func (m *WatchRequest) GetProgressRequest() *WatchProgressRequest {
+	if x, ok := m.GetRequestUnion().(*WatchRequest_ProgressRequest); ok {
+		return x.ProgressRequest
+	}
+	return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*WatchRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+	return _WatchRequest_OneofMarshaler, _WatchRequest_OneofUnmarshaler, _WatchRequest_OneofSizer, []interface{}{
+		(*WatchRequest_CreateRequest)(nil),
+		(*WatchRequest_CancelRequest)(nil),
+		(*WatchRequest_ProgressRequest)(nil),
+	}
+}
+
+func _WatchRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+	m := msg.(*WatchRequest)
+	// request_union
+	switch x := m.RequestUnion.(type) {
+	case *WatchRequest_CreateRequest:
+		_ = b.EncodeVarint(1<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.CreateRequest); err != nil {
+			return err
+		}
+	case *WatchRequest_CancelRequest:
+		_ = b.EncodeVarint(2<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.CancelRequest); err != nil {
+			return err
+		}
+	case *WatchRequest_ProgressRequest:
+		_ = b.EncodeVarint(3<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.ProgressRequest); err != nil {
+			return err
+		}
+	case nil:
+	default:
+		return fmt.Errorf("WatchRequest.RequestUnion has unexpected type %T", x)
+	}
+	return nil
+}
+
+func _WatchRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+	m := msg.(*WatchRequest)
+	switch tag {
+	case 1: // request_union.create_request
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(WatchCreateRequest)
+		err := b.DecodeMessage(msg)
+		m.RequestUnion = &WatchRequest_CreateRequest{msg}
+		return true, err
+	case 2: // request_union.cancel_request
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(WatchCancelRequest)
+		err := b.DecodeMessage(msg)
+		m.RequestUnion = &WatchRequest_CancelRequest{msg}
+		return true, err
+	case 3: // request_union.progress_request
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(WatchProgressRequest)
+		err := b.DecodeMessage(msg)
+		m.RequestUnion = &WatchRequest_ProgressRequest{msg}
+		return true, err
+	default:
+		return false, nil
+	}
+}
+
+func _WatchRequest_OneofSizer(msg proto.Message) (n int) {
+	m := msg.(*WatchRequest)
+	// request_union
+	switch x := m.RequestUnion.(type) {
+	case *WatchRequest_CreateRequest:
+		s := proto.Size(x.CreateRequest)
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *WatchRequest_CancelRequest:
+		s := proto.Size(x.CancelRequest)
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *WatchRequest_ProgressRequest:
+		s := proto.Size(x.ProgressRequest)
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case nil:
+	default:
+		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+	}
+	return n
+}
+
+type WatchCreateRequest struct {
+	// key is the key to register for watching.
+	Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	// range_end is the end of the range [key, range_end) to watch. If range_end is not given,
+	// only the key argument is watched. If range_end is equal to '\0', all keys greater than
+	// or equal to the key argument are watched.
+	// If the range_end is one bit larger than the given key,
+	// then all keys with the prefix (the given key) will be watched.
+	RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+	// start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
+	StartRevision int64 `protobuf:"varint,3,opt,name=start_revision,json=startRevision,proto3" json:"start_revision,omitempty"`
+	// progress_notify is set so that the etcd server will periodically send a WatchResponse with
+	// no events to the new watcher if there are no recent events. It is useful when clients
+	// wish to recover a disconnected watcher starting from a recent known revision.
+	// The etcd server may decide how often it will send notifications based on current load.
+	ProgressNotify bool `protobuf:"varint,4,opt,name=progress_notify,json=progressNotify,proto3" json:"progress_notify,omitempty"`
+	// filters filter the events at server side before it sends back to the watcher.
+	Filters []WatchCreateRequest_FilterType `protobuf:"varint,5,rep,packed,name=filters,proto3,enum=etcdserverpb.WatchCreateRequest_FilterType" json:"filters,omitempty"`
+	// If prev_kv is set, created watcher gets the previous KV before the event happens.
+	// If the previous KV is already compacted, nothing will be returned.
+	PrevKv bool `protobuf:"varint,6,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
+	// If watch_id is provided and non-zero, it will be assigned to this watcher.
+	// Since creating a watcher in etcd is not a synchronous operation,
+	// this can be used ensure that ordering is correct when creating multiple
+	// watchers on the same stream. Creating a watcher with an ID already in
+	// use on the stream will cause an error to be returned.
+	WatchId int64 `protobuf:"varint,7,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
+	// fragment enables splitting large revisions into multiple watch responses.
+	Fragment             bool     `protobuf:"varint,8,opt,name=fragment,proto3" json:"fragment,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *WatchCreateRequest) Reset()         { *m = WatchCreateRequest{} }
+func (m *WatchCreateRequest) String() string { return proto.CompactTextString(m) }
+func (*WatchCreateRequest) ProtoMessage()    {}
+func (*WatchCreateRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{21}
+}
+func (m *WatchCreateRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *WatchCreateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_WatchCreateRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *WatchCreateRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WatchCreateRequest.Merge(m, src)
+}
+func (m *WatchCreateRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *WatchCreateRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_WatchCreateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WatchCreateRequest proto.InternalMessageInfo
+
+func (m *WatchCreateRequest) GetKey() []byte {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *WatchCreateRequest) GetRangeEnd() []byte {
+	if m != nil {
+		return m.RangeEnd
+	}
+	return nil
+}
+
+func (m *WatchCreateRequest) GetStartRevision() int64 {
+	if m != nil {
+		return m.StartRevision
+	}
+	return 0
+}
+
+func (m *WatchCreateRequest) GetProgressNotify() bool {
+	if m != nil {
+		return m.ProgressNotify
+	}
+	return false
+}
+
+func (m *WatchCreateRequest) GetFilters() []WatchCreateRequest_FilterType {
+	if m != nil {
+		return m.Filters
+	}
+	return nil
+}
+
+func (m *WatchCreateRequest) GetPrevKv() bool {
+	if m != nil {
+		return m.PrevKv
+	}
+	return false
+}
+
+func (m *WatchCreateRequest) GetWatchId() int64 {
+	if m != nil {
+		return m.WatchId
+	}
+	return 0
+}
+
+func (m *WatchCreateRequest) GetFragment() bool {
+	if m != nil {
+		return m.Fragment
+	}
+	return false
+}
+
+type WatchCancelRequest struct {
+	// watch_id is the watcher id to cancel so that no more events are transmitted.
+	WatchId              int64    `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *WatchCancelRequest) Reset()         { *m = WatchCancelRequest{} }
+func (m *WatchCancelRequest) String() string { return proto.CompactTextString(m) }
+func (*WatchCancelRequest) ProtoMessage()    {}
+func (*WatchCancelRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{22}
+}
+func (m *WatchCancelRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *WatchCancelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_WatchCancelRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *WatchCancelRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WatchCancelRequest.Merge(m, src)
+}
+func (m *WatchCancelRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *WatchCancelRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_WatchCancelRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WatchCancelRequest proto.InternalMessageInfo
+
+func (m *WatchCancelRequest) GetWatchId() int64 {
+	if m != nil {
+		return m.WatchId
+	}
+	return 0
+}
+
+// Requests the a watch stream progress status be sent in the watch response stream as soon as
+// possible.
+type WatchProgressRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *WatchProgressRequest) Reset()         { *m = WatchProgressRequest{} }
+func (m *WatchProgressRequest) String() string { return proto.CompactTextString(m) }
+func (*WatchProgressRequest) ProtoMessage()    {}
+func (*WatchProgressRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{23}
+}
+func (m *WatchProgressRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *WatchProgressRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_WatchProgressRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *WatchProgressRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WatchProgressRequest.Merge(m, src)
+}
+func (m *WatchProgressRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *WatchProgressRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_WatchProgressRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WatchProgressRequest proto.InternalMessageInfo
+
+type WatchResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// watch_id is the ID of the watcher that corresponds to the response.
+	WatchId int64 `protobuf:"varint,2,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
+	// created is set to true if the response is for a create watch request.
+	// The client should record the watch_id and expect to receive events for
+	// the created watcher from the same stream.
+	// All events sent to the created watcher will attach with the same watch_id.
+	Created bool `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"`
+	// canceled is set to true if the response is for a cancel watch request.
+	// No further events will be sent to the canceled watcher.
+	Canceled bool `protobuf:"varint,4,opt,name=canceled,proto3" json:"canceled,omitempty"`
+	// compact_revision is set to the minimum index if a watcher tries to watch
+	// at a compacted index.
+	//
+	// This happens when creating a watcher at a compacted revision or the watcher cannot
+	// catch up with the progress of the key-value store.
+	//
+	// The client should treat the watcher as canceled and should not try to create any
+	// watcher with the same start_revision again.
+	CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"`
+	// cancel_reason indicates the reason for canceling the watcher.
+	CancelReason string `protobuf:"bytes,6,opt,name=cancel_reason,json=cancelReason,proto3" json:"cancel_reason,omitempty"`
+	// framgment is true if large watch response was split over multiple responses.
+	Fragment             bool            `protobuf:"varint,7,opt,name=fragment,proto3" json:"fragment,omitempty"`
+	Events               []*mvccpb.Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *WatchResponse) Reset()         { *m = WatchResponse{} }
+func (m *WatchResponse) String() string { return proto.CompactTextString(m) }
+func (*WatchResponse) ProtoMessage()    {}
+func (*WatchResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{24}
+}
+func (m *WatchResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *WatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_WatchResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *WatchResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WatchResponse.Merge(m, src)
+}
+func (m *WatchResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *WatchResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_WatchResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WatchResponse proto.InternalMessageInfo
+
+func (m *WatchResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *WatchResponse) GetWatchId() int64 {
+	if m != nil {
+		return m.WatchId
+	}
+	return 0
+}
+
+func (m *WatchResponse) GetCreated() bool {
+	if m != nil {
+		return m.Created
+	}
+	return false
+}
+
+func (m *WatchResponse) GetCanceled() bool {
+	if m != nil {
+		return m.Canceled
+	}
+	return false
+}
+
+func (m *WatchResponse) GetCompactRevision() int64 {
+	if m != nil {
+		return m.CompactRevision
+	}
+	return 0
+}
+
+func (m *WatchResponse) GetCancelReason() string {
+	if m != nil {
+		return m.CancelReason
+	}
+	return ""
+}
+
+func (m *WatchResponse) GetFragment() bool {
+	if m != nil {
+		return m.Fragment
+	}
+	return false
+}
+
+func (m *WatchResponse) GetEvents() []*mvccpb.Event {
+	if m != nil {
+		return m.Events
+	}
+	return nil
+}
+
+type LeaseGrantRequest struct {
+	// TTL is the advisory time-to-live in seconds. Expired lease will return -1.
+	TTL int64 `protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"`
+	// ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.
+	ID                   int64    `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LeaseGrantRequest) Reset()         { *m = LeaseGrantRequest{} }
+func (m *LeaseGrantRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseGrantRequest) ProtoMessage()    {}
+func (*LeaseGrantRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{25}
+}
+func (m *LeaseGrantRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseGrantRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseGrantRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseGrantRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseGrantRequest.Merge(m, src)
+}
+func (m *LeaseGrantRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseGrantRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseGrantRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseGrantRequest proto.InternalMessageInfo
+
+func (m *LeaseGrantRequest) GetTTL() int64 {
+	if m != nil {
+		return m.TTL
+	}
+	return 0
+}
+
+func (m *LeaseGrantRequest) GetID() int64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+type LeaseGrantResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// ID is the lease ID for the granted lease.
+	ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
+	// TTL is the server chosen lease time-to-live in seconds.
+	TTL                  int64    `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
+	Error                string   `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LeaseGrantResponse) Reset()         { *m = LeaseGrantResponse{} }
+func (m *LeaseGrantResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseGrantResponse) ProtoMessage()    {}
+func (*LeaseGrantResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{26}
+}
+func (m *LeaseGrantResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseGrantResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseGrantResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseGrantResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseGrantResponse.Merge(m, src)
+}
+func (m *LeaseGrantResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseGrantResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseGrantResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseGrantResponse proto.InternalMessageInfo
+
+func (m *LeaseGrantResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *LeaseGrantResponse) GetID() int64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+func (m *LeaseGrantResponse) GetTTL() int64 {
+	if m != nil {
+		return m.TTL
+	}
+	return 0
+}
+
+func (m *LeaseGrantResponse) GetError() string {
+	if m != nil {
+		return m.Error
+	}
+	return ""
+}
+
+type LeaseRevokeRequest struct {
+	// ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.
+	ID                   int64    `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LeaseRevokeRequest) Reset()         { *m = LeaseRevokeRequest{} }
+func (m *LeaseRevokeRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseRevokeRequest) ProtoMessage()    {}
+func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{27}
+}
+func (m *LeaseRevokeRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseRevokeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseRevokeRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseRevokeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseRevokeRequest.Merge(m, src)
+}
+func (m *LeaseRevokeRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseRevokeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseRevokeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseRevokeRequest proto.InternalMessageInfo
+
+func (m *LeaseRevokeRequest) GetID() int64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+type LeaseRevokeResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *LeaseRevokeResponse) Reset()         { *m = LeaseRevokeResponse{} }
+func (m *LeaseRevokeResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseRevokeResponse) ProtoMessage()    {}
+func (*LeaseRevokeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{28}
+}
+func (m *LeaseRevokeResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseRevokeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseRevokeResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseRevokeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseRevokeResponse.Merge(m, src)
+}
+func (m *LeaseRevokeResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseRevokeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseRevokeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseRevokeResponse proto.InternalMessageInfo
+
+func (m *LeaseRevokeResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type LeaseKeepAliveRequest struct {
+	// ID is the lease ID for the lease to keep alive.
+	ID                   int64    `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LeaseKeepAliveRequest) Reset()         { *m = LeaseKeepAliveRequest{} }
+func (m *LeaseKeepAliveRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseKeepAliveRequest) ProtoMessage()    {}
+func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{29}
+}
+func (m *LeaseKeepAliveRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseKeepAliveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseKeepAliveRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseKeepAliveRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseKeepAliveRequest.Merge(m, src)
+}
+func (m *LeaseKeepAliveRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseKeepAliveRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseKeepAliveRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseKeepAliveRequest proto.InternalMessageInfo
+
+func (m *LeaseKeepAliveRequest) GetID() int64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+type LeaseKeepAliveResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// ID is the lease ID from the keep alive request.
+	ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
+	// TTL is the new time-to-live for the lease.
+	TTL                  int64    `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LeaseKeepAliveResponse) Reset()         { *m = LeaseKeepAliveResponse{} }
+func (m *LeaseKeepAliveResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseKeepAliveResponse) ProtoMessage()    {}
+func (*LeaseKeepAliveResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{30}
+}
+func (m *LeaseKeepAliveResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseKeepAliveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseKeepAliveResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseKeepAliveResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseKeepAliveResponse.Merge(m, src)
+}
+func (m *LeaseKeepAliveResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseKeepAliveResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseKeepAliveResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseKeepAliveResponse proto.InternalMessageInfo
+
+func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *LeaseKeepAliveResponse) GetID() int64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+func (m *LeaseKeepAliveResponse) GetTTL() int64 {
+	if m != nil {
+		return m.TTL
+	}
+	return 0
+}
+
+type LeaseTimeToLiveRequest struct {
+	// ID is the lease ID for the lease.
+	ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	// keys is true to query all the keys attached to this lease.
+	Keys                 bool     `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LeaseTimeToLiveRequest) Reset()         { *m = LeaseTimeToLiveRequest{} }
+func (m *LeaseTimeToLiveRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseTimeToLiveRequest) ProtoMessage()    {}
+func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{31}
+}
+func (m *LeaseTimeToLiveRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseTimeToLiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseTimeToLiveRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseTimeToLiveRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseTimeToLiveRequest.Merge(m, src)
+}
+func (m *LeaseTimeToLiveRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseTimeToLiveRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseTimeToLiveRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseTimeToLiveRequest proto.InternalMessageInfo
+
+func (m *LeaseTimeToLiveRequest) GetID() int64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+func (m *LeaseTimeToLiveRequest) GetKeys() bool {
+	if m != nil {
+		return m.Keys
+	}
+	return false
+}
+
+type LeaseTimeToLiveResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// ID is the lease ID from the keep alive request.
+	ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
+	// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
+	TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
+	// GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
+	GrantedTTL int64 `protobuf:"varint,4,opt,name=grantedTTL,proto3" json:"grantedTTL,omitempty"`
+	// Keys is the list of keys attached to this lease.
+	Keys                 [][]byte `protobuf:"bytes,5,rep,name=keys,proto3" json:"keys,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LeaseTimeToLiveResponse) Reset()         { *m = LeaseTimeToLiveResponse{} }
+func (m *LeaseTimeToLiveResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseTimeToLiveResponse) ProtoMessage()    {}
+func (*LeaseTimeToLiveResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{32}
+}
+func (m *LeaseTimeToLiveResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseTimeToLiveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseTimeToLiveResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseTimeToLiveResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseTimeToLiveResponse.Merge(m, src)
+}
+func (m *LeaseTimeToLiveResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseTimeToLiveResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseTimeToLiveResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseTimeToLiveResponse proto.InternalMessageInfo
+
+func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *LeaseTimeToLiveResponse) GetID() int64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+func (m *LeaseTimeToLiveResponse) GetTTL() int64 {
+	if m != nil {
+		return m.TTL
+	}
+	return 0
+}
+
+func (m *LeaseTimeToLiveResponse) GetGrantedTTL() int64 {
+	if m != nil {
+		return m.GrantedTTL
+	}
+	return 0
+}
+
+func (m *LeaseTimeToLiveResponse) GetKeys() [][]byte {
+	if m != nil {
+		return m.Keys
+	}
+	return nil
+}
+
+type LeaseLeasesRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LeaseLeasesRequest) Reset()         { *m = LeaseLeasesRequest{} }
+func (m *LeaseLeasesRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseLeasesRequest) ProtoMessage()    {}
+func (*LeaseLeasesRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{33}
+}
+func (m *LeaseLeasesRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseLeasesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseLeasesRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseLeasesRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseLeasesRequest.Merge(m, src)
+}
+func (m *LeaseLeasesRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseLeasesRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseLeasesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseLeasesRequest proto.InternalMessageInfo
+
+type LeaseStatus struct {
+	ID                   int64    `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LeaseStatus) Reset()         { *m = LeaseStatus{} }
+func (m *LeaseStatus) String() string { return proto.CompactTextString(m) }
+func (*LeaseStatus) ProtoMessage()    {}
+func (*LeaseStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{34}
+}
+func (m *LeaseStatus) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseStatus.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseStatus.Merge(m, src)
+}
+func (m *LeaseStatus) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseStatus proto.InternalMessageInfo
+
+func (m *LeaseStatus) GetID() int64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+type LeaseLeasesResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	Leases               []*LeaseStatus  `protobuf:"bytes,2,rep,name=leases,proto3" json:"leases,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *LeaseLeasesResponse) Reset()         { *m = LeaseLeasesResponse{} }
+func (m *LeaseLeasesResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseLeasesResponse) ProtoMessage()    {}
+func (*LeaseLeasesResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{35}
+}
+func (m *LeaseLeasesResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseLeasesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseLeasesResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseLeasesResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseLeasesResponse.Merge(m, src)
+}
+func (m *LeaseLeasesResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseLeasesResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseLeasesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseLeasesResponse proto.InternalMessageInfo
+
+func (m *LeaseLeasesResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *LeaseLeasesResponse) GetLeases() []*LeaseStatus {
+	if m != nil {
+		return m.Leases
+	}
+	return nil
+}
+
+type Member struct {
+	// ID is the member ID for this member.
+	ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	// name is the human-readable name of the member. If the member is not started, the name will be an empty string.
+	Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+	// peerURLs is the list of URLs the member exposes to the cluster for communication.
+	PeerURLs []string `protobuf:"bytes,3,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"`
+	// clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.
+	ClientURLs           []string `protobuf:"bytes,4,rep,name=clientURLs,proto3" json:"clientURLs,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Member) Reset()         { *m = Member{} }
+func (m *Member) String() string { return proto.CompactTextString(m) }
+func (*Member) ProtoMessage()    {}
+func (*Member) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{36}
+}
+func (m *Member) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Member) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Member.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Member) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Member.Merge(m, src)
+}
+func (m *Member) XXX_Size() int {
+	return m.Size()
+}
+func (m *Member) XXX_DiscardUnknown() {
+	xxx_messageInfo_Member.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Member proto.InternalMessageInfo
+
+func (m *Member) GetID() uint64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+func (m *Member) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *Member) GetPeerURLs() []string {
+	if m != nil {
+		return m.PeerURLs
+	}
+	return nil
+}
+
+func (m *Member) GetClientURLs() []string {
+	if m != nil {
+		return m.ClientURLs
+	}
+	return nil
+}
+
+type MemberAddRequest struct {
+	// peerURLs is the list of URLs the added member will use to communicate with the cluster.
+	PeerURLs             []string `protobuf:"bytes,1,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *MemberAddRequest) Reset()         { *m = MemberAddRequest{} }
+func (m *MemberAddRequest) String() string { return proto.CompactTextString(m) }
+func (*MemberAddRequest) ProtoMessage()    {}
+func (*MemberAddRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{37}
+}
+func (m *MemberAddRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MemberAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MemberAddRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MemberAddRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MemberAddRequest.Merge(m, src)
+}
+func (m *MemberAddRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *MemberAddRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_MemberAddRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberAddRequest proto.InternalMessageInfo
+
+func (m *MemberAddRequest) GetPeerURLs() []string {
+	if m != nil {
+		return m.PeerURLs
+	}
+	return nil
+}
+
+type MemberAddResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// member is the member information for the added member.
+	Member *Member `protobuf:"bytes,2,opt,name=member,proto3" json:"member,omitempty"`
+	// members is a list of all members after adding the new member.
+	Members              []*Member `protobuf:"bytes,3,rep,name=members,proto3" json:"members,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *MemberAddResponse) Reset()         { *m = MemberAddResponse{} }
+func (m *MemberAddResponse) String() string { return proto.CompactTextString(m) }
+func (*MemberAddResponse) ProtoMessage()    {}
+func (*MemberAddResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{38}
+}
+func (m *MemberAddResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MemberAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MemberAddResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MemberAddResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MemberAddResponse.Merge(m, src)
+}
+func (m *MemberAddResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *MemberAddResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_MemberAddResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberAddResponse proto.InternalMessageInfo
+
+func (m *MemberAddResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *MemberAddResponse) GetMember() *Member {
+	if m != nil {
+		return m.Member
+	}
+	return nil
+}
+
+func (m *MemberAddResponse) GetMembers() []*Member {
+	if m != nil {
+		return m.Members
+	}
+	return nil
+}
+
+type MemberRemoveRequest struct {
+	// ID is the member ID of the member to remove.
+	ID                   uint64   `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *MemberRemoveRequest) Reset()         { *m = MemberRemoveRequest{} }
+func (m *MemberRemoveRequest) String() string { return proto.CompactTextString(m) }
+func (*MemberRemoveRequest) ProtoMessage()    {}
+func (*MemberRemoveRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{39}
+}
+func (m *MemberRemoveRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MemberRemoveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MemberRemoveRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MemberRemoveRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MemberRemoveRequest.Merge(m, src)
+}
+func (m *MemberRemoveRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *MemberRemoveRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_MemberRemoveRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberRemoveRequest proto.InternalMessageInfo
+
+func (m *MemberRemoveRequest) GetID() uint64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+type MemberRemoveResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// members is a list of all members after removing the member.
+	Members              []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *MemberRemoveResponse) Reset()         { *m = MemberRemoveResponse{} }
+func (m *MemberRemoveResponse) String() string { return proto.CompactTextString(m) }
+func (*MemberRemoveResponse) ProtoMessage()    {}
+func (*MemberRemoveResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{40}
+}
+func (m *MemberRemoveResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MemberRemoveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MemberRemoveResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MemberRemoveResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MemberRemoveResponse.Merge(m, src)
+}
+func (m *MemberRemoveResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *MemberRemoveResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_MemberRemoveResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberRemoveResponse proto.InternalMessageInfo
+
+func (m *MemberRemoveResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *MemberRemoveResponse) GetMembers() []*Member {
+	if m != nil {
+		return m.Members
+	}
+	return nil
+}
+
+type MemberUpdateRequest struct {
+	// ID is the member ID of the member to update.
+	ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	// peerURLs is the new list of URLs the member will use to communicate with the cluster.
+	PeerURLs             []string `protobuf:"bytes,2,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *MemberUpdateRequest) Reset()         { *m = MemberUpdateRequest{} }
+func (m *MemberUpdateRequest) String() string { return proto.CompactTextString(m) }
+func (*MemberUpdateRequest) ProtoMessage()    {}
+func (*MemberUpdateRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{41}
+}
+func (m *MemberUpdateRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MemberUpdateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MemberUpdateRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MemberUpdateRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MemberUpdateRequest.Merge(m, src)
+}
+func (m *MemberUpdateRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *MemberUpdateRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_MemberUpdateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberUpdateRequest proto.InternalMessageInfo
+
+func (m *MemberUpdateRequest) GetID() uint64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+func (m *MemberUpdateRequest) GetPeerURLs() []string {
+	if m != nil {
+		return m.PeerURLs
+	}
+	return nil
+}
+
+type MemberUpdateResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// members is a list of all members after updating the member.
+	Members              []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *MemberUpdateResponse) Reset()         { *m = MemberUpdateResponse{} }
+func (m *MemberUpdateResponse) String() string { return proto.CompactTextString(m) }
+func (*MemberUpdateResponse) ProtoMessage()    {}
+func (*MemberUpdateResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{42}
+}
+func (m *MemberUpdateResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MemberUpdateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MemberUpdateResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MemberUpdateResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MemberUpdateResponse.Merge(m, src)
+}
+func (m *MemberUpdateResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *MemberUpdateResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_MemberUpdateResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberUpdateResponse proto.InternalMessageInfo
+
+func (m *MemberUpdateResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *MemberUpdateResponse) GetMembers() []*Member {
+	if m != nil {
+		return m.Members
+	}
+	return nil
+}
+
+type MemberListRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *MemberListRequest) Reset()         { *m = MemberListRequest{} }
+func (m *MemberListRequest) String() string { return proto.CompactTextString(m) }
+func (*MemberListRequest) ProtoMessage()    {}
+func (*MemberListRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{43}
+}
+func (m *MemberListRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MemberListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MemberListRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MemberListRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MemberListRequest.Merge(m, src)
+}
+func (m *MemberListRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *MemberListRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_MemberListRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberListRequest proto.InternalMessageInfo
+
+type MemberListResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// members is a list of all members associated with the cluster.
+	Members              []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *MemberListResponse) Reset()         { *m = MemberListResponse{} }
+func (m *MemberListResponse) String() string { return proto.CompactTextString(m) }
+func (*MemberListResponse) ProtoMessage()    {}
+func (*MemberListResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{44}
+}
+func (m *MemberListResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MemberListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MemberListResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MemberListResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MemberListResponse.Merge(m, src)
+}
+func (m *MemberListResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *MemberListResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_MemberListResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberListResponse proto.InternalMessageInfo
+
+func (m *MemberListResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *MemberListResponse) GetMembers() []*Member {
+	if m != nil {
+		return m.Members
+	}
+	return nil
+}
+
+type DefragmentRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DefragmentRequest) Reset()         { *m = DefragmentRequest{} }
+func (m *DefragmentRequest) String() string { return proto.CompactTextString(m) }
+func (*DefragmentRequest) ProtoMessage()    {}
+func (*DefragmentRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{45}
+}
+func (m *DefragmentRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DefragmentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DefragmentRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DefragmentRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DefragmentRequest.Merge(m, src)
+}
+func (m *DefragmentRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *DefragmentRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_DefragmentRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DefragmentRequest proto.InternalMessageInfo
+
+type DefragmentResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *DefragmentResponse) Reset()         { *m = DefragmentResponse{} }
+func (m *DefragmentResponse) String() string { return proto.CompactTextString(m) }
+func (*DefragmentResponse) ProtoMessage()    {}
+func (*DefragmentResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{46}
+}
+func (m *DefragmentResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DefragmentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DefragmentResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DefragmentResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DefragmentResponse.Merge(m, src)
+}
+func (m *DefragmentResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *DefragmentResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_DefragmentResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DefragmentResponse proto.InternalMessageInfo
+
+func (m *DefragmentResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type MoveLeaderRequest struct {
+	// targetID is the node ID for the new leader.
+	TargetID             uint64   `protobuf:"varint,1,opt,name=targetID,proto3" json:"targetID,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *MoveLeaderRequest) Reset()         { *m = MoveLeaderRequest{} }
+func (m *MoveLeaderRequest) String() string { return proto.CompactTextString(m) }
+func (*MoveLeaderRequest) ProtoMessage()    {}
+func (*MoveLeaderRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{47}
+}
+func (m *MoveLeaderRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MoveLeaderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MoveLeaderRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MoveLeaderRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MoveLeaderRequest.Merge(m, src)
+}
+func (m *MoveLeaderRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *MoveLeaderRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_MoveLeaderRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MoveLeaderRequest proto.InternalMessageInfo
+
+func (m *MoveLeaderRequest) GetTargetID() uint64 {
+	if m != nil {
+		return m.TargetID
+	}
+	return 0
+}
+
+type MoveLeaderResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *MoveLeaderResponse) Reset()         { *m = MoveLeaderResponse{} }
+func (m *MoveLeaderResponse) String() string { return proto.CompactTextString(m) }
+func (*MoveLeaderResponse) ProtoMessage()    {}
+func (*MoveLeaderResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{48}
+}
+func (m *MoveLeaderResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MoveLeaderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MoveLeaderResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MoveLeaderResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MoveLeaderResponse.Merge(m, src)
+}
+func (m *MoveLeaderResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *MoveLeaderResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_MoveLeaderResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MoveLeaderResponse proto.InternalMessageInfo
+
+func (m *MoveLeaderResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AlarmRequest struct {
+	// action is the kind of alarm request to issue. The action
+	// may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a
+	// raised alarm.
+	Action AlarmRequest_AlarmAction `protobuf:"varint,1,opt,name=action,proto3,enum=etcdserverpb.AlarmRequest_AlarmAction" json:"action,omitempty"`
+	// memberID is the ID of the member associated with the alarm. If memberID is 0, the
+	// alarm request covers all members.
+	MemberID uint64 `protobuf:"varint,2,opt,name=memberID,proto3" json:"memberID,omitempty"`
+	// alarm is the type of alarm to consider for this request.
+	Alarm                AlarmType `protobuf:"varint,3,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *AlarmRequest) Reset()         { *m = AlarmRequest{} }
+func (m *AlarmRequest) String() string { return proto.CompactTextString(m) }
+func (*AlarmRequest) ProtoMessage()    {}
+func (*AlarmRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{49}
+}
+func (m *AlarmRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AlarmRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AlarmRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AlarmRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmRequest.Merge(m, src)
+}
+func (m *AlarmRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AlarmRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmRequest proto.InternalMessageInfo
+
+func (m *AlarmRequest) GetAction() AlarmRequest_AlarmAction {
+	if m != nil {
+		return m.Action
+	}
+	return AlarmRequest_GET
+}
+
+func (m *AlarmRequest) GetMemberID() uint64 {
+	if m != nil {
+		return m.MemberID
+	}
+	return 0
+}
+
+func (m *AlarmRequest) GetAlarm() AlarmType {
+	if m != nil {
+		return m.Alarm
+	}
+	return AlarmType_NONE
+}
+
+type AlarmMember struct {
+	// memberID is the ID of the member associated with the raised alarm.
+	MemberID uint64 `protobuf:"varint,1,opt,name=memberID,proto3" json:"memberID,omitempty"`
+	// alarm is the type of alarm which has been raised.
+	Alarm                AlarmType `protobuf:"varint,2,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *AlarmMember) Reset()         { *m = AlarmMember{} }
+func (m *AlarmMember) String() string { return proto.CompactTextString(m) }
+func (*AlarmMember) ProtoMessage()    {}
+func (*AlarmMember) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{50}
+}
+func (m *AlarmMember) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AlarmMember) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AlarmMember.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AlarmMember) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmMember.Merge(m, src)
+}
+func (m *AlarmMember) XXX_Size() int {
+	return m.Size()
+}
+func (m *AlarmMember) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmMember.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmMember proto.InternalMessageInfo
+
+func (m *AlarmMember) GetMemberID() uint64 {
+	if m != nil {
+		return m.MemberID
+	}
+	return 0
+}
+
+func (m *AlarmMember) GetAlarm() AlarmType {
+	if m != nil {
+		return m.Alarm
+	}
+	return AlarmType_NONE
+}
+
+type AlarmResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// alarms is a list of alarms associated with the alarm request.
+	Alarms               []*AlarmMember `protobuf:"bytes,2,rep,name=alarms,proto3" json:"alarms,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
+	XXX_unrecognized     []byte         `json:"-"`
+	XXX_sizecache        int32          `json:"-"`
+}
+
+func (m *AlarmResponse) Reset()         { *m = AlarmResponse{} }
+func (m *AlarmResponse) String() string { return proto.CompactTextString(m) }
+func (*AlarmResponse) ProtoMessage()    {}
+func (*AlarmResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{51}
+}
+func (m *AlarmResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AlarmResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AlarmResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AlarmResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmResponse.Merge(m, src)
+}
+func (m *AlarmResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AlarmResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmResponse proto.InternalMessageInfo
+
+func (m *AlarmResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *AlarmResponse) GetAlarms() []*AlarmMember {
+	if m != nil {
+		return m.Alarms
+	}
+	return nil
+}
+
+type StatusRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *StatusRequest) Reset()         { *m = StatusRequest{} }
+func (m *StatusRequest) String() string { return proto.CompactTextString(m) }
+func (*StatusRequest) ProtoMessage()    {}
+func (*StatusRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{52}
+}
+func (m *StatusRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *StatusRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_StatusRequest.Merge(m, src)
+}
+func (m *StatusRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *StatusRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_StatusRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StatusRequest proto.InternalMessageInfo
+
+type StatusResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// version is the cluster protocol version used by the responding member.
+	Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+	// dbSize is the size of the backend database, in bytes, of the responding member.
+	DbSize int64 `protobuf:"varint,3,opt,name=dbSize,proto3" json:"dbSize,omitempty"`
+	// leader is the member ID which the responding member believes is the current leader.
+	Leader uint64 `protobuf:"varint,4,opt,name=leader,proto3" json:"leader,omitempty"`
+	// raftIndex is the current raft index of the responding member.
+	RaftIndex uint64 `protobuf:"varint,5,opt,name=raftIndex,proto3" json:"raftIndex,omitempty"`
+	// raftTerm is the current raft term of the responding member.
+	RaftTerm             uint64   `protobuf:"varint,6,opt,name=raftTerm,proto3" json:"raftTerm,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *StatusResponse) Reset()         { *m = StatusResponse{} }
+func (m *StatusResponse) String() string { return proto.CompactTextString(m) }
+func (*StatusResponse) ProtoMessage()    {}
+func (*StatusResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{53}
+}
+func (m *StatusResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *StatusResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_StatusResponse.Merge(m, src)
+}
+func (m *StatusResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *StatusResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_StatusResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StatusResponse proto.InternalMessageInfo
+
+func (m *StatusResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *StatusResponse) GetVersion() string {
+	if m != nil {
+		return m.Version
+	}
+	return ""
+}
+
+func (m *StatusResponse) GetDbSize() int64 {
+	if m != nil {
+		return m.DbSize
+	}
+	return 0
+}
+
+func (m *StatusResponse) GetLeader() uint64 {
+	if m != nil {
+		return m.Leader
+	}
+	return 0
+}
+
+func (m *StatusResponse) GetRaftIndex() uint64 {
+	if m != nil {
+		return m.RaftIndex
+	}
+	return 0
+}
+
+func (m *StatusResponse) GetRaftTerm() uint64 {
+	if m != nil {
+		return m.RaftTerm
+	}
+	return 0
+}
+
+type AuthEnableRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthEnableRequest) Reset()         { *m = AuthEnableRequest{} }
+func (m *AuthEnableRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthEnableRequest) ProtoMessage()    {}
+func (*AuthEnableRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{54}
+}
+func (m *AuthEnableRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthEnableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthEnableRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthEnableRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthEnableRequest.Merge(m, src)
+}
+func (m *AuthEnableRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthEnableRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthEnableRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthEnableRequest proto.InternalMessageInfo
+
+type AuthDisableRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthDisableRequest) Reset()         { *m = AuthDisableRequest{} }
+func (m *AuthDisableRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthDisableRequest) ProtoMessage()    {}
+func (*AuthDisableRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{55}
+}
+func (m *AuthDisableRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthDisableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthDisableRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthDisableRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthDisableRequest.Merge(m, src)
+}
+func (m *AuthDisableRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthDisableRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthDisableRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthDisableRequest proto.InternalMessageInfo
+
+type AuthenticateRequest struct {
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Password             string   `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthenticateRequest) Reset()         { *m = AuthenticateRequest{} }
+func (m *AuthenticateRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthenticateRequest) ProtoMessage()    {}
+func (*AuthenticateRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{56}
+}
+func (m *AuthenticateRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthenticateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthenticateRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthenticateRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthenticateRequest.Merge(m, src)
+}
+func (m *AuthenticateRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthenticateRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthenticateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthenticateRequest proto.InternalMessageInfo
+
+func (m *AuthenticateRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *AuthenticateRequest) GetPassword() string {
+	if m != nil {
+		return m.Password
+	}
+	return ""
+}
+
+type AuthUserAddRequest struct {
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Password             string   `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthUserAddRequest) Reset()         { *m = AuthUserAddRequest{} }
+func (m *AuthUserAddRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserAddRequest) ProtoMessage()    {}
+func (*AuthUserAddRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{57}
+}
+func (m *AuthUserAddRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserAddRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserAddRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserAddRequest.Merge(m, src)
+}
+func (m *AuthUserAddRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserAddRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserAddRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserAddRequest proto.InternalMessageInfo
+
+func (m *AuthUserAddRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *AuthUserAddRequest) GetPassword() string {
+	if m != nil {
+		return m.Password
+	}
+	return ""
+}
+
+type AuthUserGetRequest struct {
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthUserGetRequest) Reset()         { *m = AuthUserGetRequest{} }
+func (m *AuthUserGetRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserGetRequest) ProtoMessage()    {}
+func (*AuthUserGetRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{58}
+}
+func (m *AuthUserGetRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserGetRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserGetRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserGetRequest.Merge(m, src)
+}
+func (m *AuthUserGetRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserGetRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserGetRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserGetRequest proto.InternalMessageInfo
+
+func (m *AuthUserGetRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+type AuthUserDeleteRequest struct {
+	// name is the name of the user to delete.
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthUserDeleteRequest) Reset()         { *m = AuthUserDeleteRequest{} }
+func (m *AuthUserDeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserDeleteRequest) ProtoMessage()    {}
+func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{59}
+}
+func (m *AuthUserDeleteRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserDeleteRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserDeleteRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserDeleteRequest.Merge(m, src)
+}
+func (m *AuthUserDeleteRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserDeleteRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserDeleteRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserDeleteRequest proto.InternalMessageInfo
+
+func (m *AuthUserDeleteRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+type AuthUserChangePasswordRequest struct {
+	// name is the name of the user whose password is being changed.
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// password is the new password for the user.
+	Password             string   `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthUserChangePasswordRequest) Reset()         { *m = AuthUserChangePasswordRequest{} }
+func (m *AuthUserChangePasswordRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserChangePasswordRequest) ProtoMessage()    {}
+func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{60}
+}
+func (m *AuthUserChangePasswordRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserChangePasswordRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserChangePasswordRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserChangePasswordRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserChangePasswordRequest.Merge(m, src)
+}
+func (m *AuthUserChangePasswordRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserChangePasswordRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserChangePasswordRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserChangePasswordRequest proto.InternalMessageInfo
+
+func (m *AuthUserChangePasswordRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *AuthUserChangePasswordRequest) GetPassword() string {
+	if m != nil {
+		return m.Password
+	}
+	return ""
+}
+
+type AuthUserGrantRoleRequest struct {
+	// user is the name of the user which should be granted a given role.
+	User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
+	// role is the name of the role to grant to the user.
+	Role                 string   `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthUserGrantRoleRequest) Reset()         { *m = AuthUserGrantRoleRequest{} }
+func (m *AuthUserGrantRoleRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserGrantRoleRequest) ProtoMessage()    {}
+func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{61}
+}
+func (m *AuthUserGrantRoleRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserGrantRoleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserGrantRoleRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserGrantRoleRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserGrantRoleRequest.Merge(m, src)
+}
+func (m *AuthUserGrantRoleRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserGrantRoleRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserGrantRoleRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserGrantRoleRequest proto.InternalMessageInfo
+
+func (m *AuthUserGrantRoleRequest) GetUser() string {
+	if m != nil {
+		return m.User
+	}
+	return ""
+}
+
+func (m *AuthUserGrantRoleRequest) GetRole() string {
+	if m != nil {
+		return m.Role
+	}
+	return ""
+}
+
+type AuthUserRevokeRoleRequest struct {
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Role                 string   `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthUserRevokeRoleRequest) Reset()         { *m = AuthUserRevokeRoleRequest{} }
+func (m *AuthUserRevokeRoleRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserRevokeRoleRequest) ProtoMessage()    {}
+func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{62}
+}
+func (m *AuthUserRevokeRoleRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserRevokeRoleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserRevokeRoleRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserRevokeRoleRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserRevokeRoleRequest.Merge(m, src)
+}
+func (m *AuthUserRevokeRoleRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserRevokeRoleRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserRevokeRoleRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserRevokeRoleRequest proto.InternalMessageInfo
+
+func (m *AuthUserRevokeRoleRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *AuthUserRevokeRoleRequest) GetRole() string {
+	if m != nil {
+		return m.Role
+	}
+	return ""
+}
+
+type AuthRoleAddRequest struct {
+	// name is the name of the role to add to the authentication system.
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthRoleAddRequest) Reset()         { *m = AuthRoleAddRequest{} }
+func (m *AuthRoleAddRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleAddRequest) ProtoMessage()    {}
+func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{63}
+}
+func (m *AuthRoleAddRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleAddRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleAddRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleAddRequest.Merge(m, src)
+}
+func (m *AuthRoleAddRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleAddRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleAddRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleAddRequest proto.InternalMessageInfo
+
+func (m *AuthRoleAddRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+type AuthRoleGetRequest struct {
+	Role                 string   `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthRoleGetRequest) Reset()         { *m = AuthRoleGetRequest{} }
+func (m *AuthRoleGetRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleGetRequest) ProtoMessage()    {}
+func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{64}
+}
+func (m *AuthRoleGetRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleGetRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleGetRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleGetRequest.Merge(m, src)
+}
+func (m *AuthRoleGetRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleGetRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleGetRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleGetRequest proto.InternalMessageInfo
+
+func (m *AuthRoleGetRequest) GetRole() string {
+	if m != nil {
+		return m.Role
+	}
+	return ""
+}
+
+type AuthUserListRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthUserListRequest) Reset()         { *m = AuthUserListRequest{} }
+func (m *AuthUserListRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserListRequest) ProtoMessage()    {}
+func (*AuthUserListRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{65}
+}
+func (m *AuthUserListRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserListRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserListRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserListRequest.Merge(m, src)
+}
+func (m *AuthUserListRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserListRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserListRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserListRequest proto.InternalMessageInfo
+
+type AuthRoleListRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthRoleListRequest) Reset()         { *m = AuthRoleListRequest{} }
+func (m *AuthRoleListRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleListRequest) ProtoMessage()    {}
+func (*AuthRoleListRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{66}
+}
+func (m *AuthRoleListRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleListRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleListRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleListRequest.Merge(m, src)
+}
+func (m *AuthRoleListRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleListRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleListRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleListRequest proto.InternalMessageInfo
+
+type AuthRoleDeleteRequest struct {
+	Role                 string   `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthRoleDeleteRequest) Reset()         { *m = AuthRoleDeleteRequest{} }
+func (m *AuthRoleDeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleDeleteRequest) ProtoMessage()    {}
+func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{67}
+}
+func (m *AuthRoleDeleteRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleDeleteRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleDeleteRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleDeleteRequest.Merge(m, src)
+}
+func (m *AuthRoleDeleteRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleDeleteRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleDeleteRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleDeleteRequest proto.InternalMessageInfo
+
+func (m *AuthRoleDeleteRequest) GetRole() string {
+	if m != nil {
+		return m.Role
+	}
+	return ""
+}
+
+type AuthRoleGrantPermissionRequest struct {
+	// name is the name of the role which will be granted the permission.
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// perm is the permission to grant to the role.
+	Perm                 *authpb.Permission `protobuf:"bytes,2,opt,name=perm,proto3" json:"perm,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
+	XXX_unrecognized     []byte             `json:"-"`
+	XXX_sizecache        int32              `json:"-"`
+}
+
+func (m *AuthRoleGrantPermissionRequest) Reset()         { *m = AuthRoleGrantPermissionRequest{} }
+func (m *AuthRoleGrantPermissionRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleGrantPermissionRequest) ProtoMessage()    {}
+func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{68}
+}
+func (m *AuthRoleGrantPermissionRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleGrantPermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleGrantPermissionRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleGrantPermissionRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleGrantPermissionRequest.Merge(m, src)
+}
+func (m *AuthRoleGrantPermissionRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleGrantPermissionRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleGrantPermissionRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleGrantPermissionRequest proto.InternalMessageInfo
+
+func (m *AuthRoleGrantPermissionRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission {
+	if m != nil {
+		return m.Perm
+	}
+	return nil
+}
+
+type AuthRoleRevokePermissionRequest struct {
+	Role                 string   `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
+	Key                  string   `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+	RangeEnd             string   `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthRoleRevokePermissionRequest) Reset()         { *m = AuthRoleRevokePermissionRequest{} }
+func (m *AuthRoleRevokePermissionRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleRevokePermissionRequest) ProtoMessage()    {}
+func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{69}
+}
+func (m *AuthRoleRevokePermissionRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleRevokePermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleRevokePermissionRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleRevokePermissionRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleRevokePermissionRequest.Merge(m, src)
+}
+func (m *AuthRoleRevokePermissionRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleRevokePermissionRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleRevokePermissionRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleRevokePermissionRequest proto.InternalMessageInfo
+
+func (m *AuthRoleRevokePermissionRequest) GetRole() string {
+	if m != nil {
+		return m.Role
+	}
+	return ""
+}
+
+func (m *AuthRoleRevokePermissionRequest) GetKey() string {
+	if m != nil {
+		return m.Key
+	}
+	return ""
+}
+
+func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() string {
+	if m != nil {
+		return m.RangeEnd
+	}
+	return ""
+}
+
+type AuthEnableResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthEnableResponse) Reset()         { *m = AuthEnableResponse{} }
+func (m *AuthEnableResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthEnableResponse) ProtoMessage()    {}
+func (*AuthEnableResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{70}
+}
+func (m *AuthEnableResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthEnableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthEnableResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthEnableResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthEnableResponse.Merge(m, src)
+}
+func (m *AuthEnableResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthEnableResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthEnableResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthEnableResponse proto.InternalMessageInfo
+
+func (m *AuthEnableResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthDisableResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthDisableResponse) Reset()         { *m = AuthDisableResponse{} }
+func (m *AuthDisableResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthDisableResponse) ProtoMessage()    {}
+func (*AuthDisableResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{71}
+}
+func (m *AuthDisableResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthDisableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthDisableResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthDisableResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthDisableResponse.Merge(m, src)
+}
+func (m *AuthDisableResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthDisableResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthDisableResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthDisableResponse proto.InternalMessageInfo
+
+func (m *AuthDisableResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthenticateResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// token is an authorized token that can be used in succeeding RPCs
+	Token                string   `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthenticateResponse) Reset()         { *m = AuthenticateResponse{} }
+func (m *AuthenticateResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthenticateResponse) ProtoMessage()    {}
+func (*AuthenticateResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{72}
+}
+func (m *AuthenticateResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthenticateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthenticateResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthenticateResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthenticateResponse.Merge(m, src)
+}
+func (m *AuthenticateResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthenticateResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthenticateResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthenticateResponse proto.InternalMessageInfo
+
+func (m *AuthenticateResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *AuthenticateResponse) GetToken() string {
+	if m != nil {
+		return m.Token
+	}
+	return ""
+}
+
+type AuthUserAddResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthUserAddResponse) Reset()         { *m = AuthUserAddResponse{} }
+func (m *AuthUserAddResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserAddResponse) ProtoMessage()    {}
+func (*AuthUserAddResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{73}
+}
+func (m *AuthUserAddResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserAddResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserAddResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserAddResponse.Merge(m, src)
+}
+func (m *AuthUserAddResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserAddResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserAddResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserAddResponse proto.InternalMessageInfo
+
+func (m *AuthUserAddResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthUserGetResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	Roles                []string        `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthUserGetResponse) Reset()         { *m = AuthUserGetResponse{} }
+func (m *AuthUserGetResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserGetResponse) ProtoMessage()    {}
+func (*AuthUserGetResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{74}
+}
+func (m *AuthUserGetResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserGetResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserGetResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserGetResponse.Merge(m, src)
+}
+func (m *AuthUserGetResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserGetResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserGetResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserGetResponse proto.InternalMessageInfo
+
+func (m *AuthUserGetResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *AuthUserGetResponse) GetRoles() []string {
+	if m != nil {
+		return m.Roles
+	}
+	return nil
+}
+
+type AuthUserDeleteResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthUserDeleteResponse) Reset()         { *m = AuthUserDeleteResponse{} }
+func (m *AuthUserDeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserDeleteResponse) ProtoMessage()    {}
+func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{75}
+}
+func (m *AuthUserDeleteResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserDeleteResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserDeleteResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserDeleteResponse.Merge(m, src)
+}
+func (m *AuthUserDeleteResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserDeleteResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserDeleteResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserDeleteResponse proto.InternalMessageInfo
+
+func (m *AuthUserDeleteResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthUserChangePasswordResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthUserChangePasswordResponse) Reset()         { *m = AuthUserChangePasswordResponse{} }
+func (m *AuthUserChangePasswordResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserChangePasswordResponse) ProtoMessage()    {}
+func (*AuthUserChangePasswordResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{76}
+}
+func (m *AuthUserChangePasswordResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserChangePasswordResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserChangePasswordResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserChangePasswordResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserChangePasswordResponse.Merge(m, src)
+}
+func (m *AuthUserChangePasswordResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserChangePasswordResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserChangePasswordResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserChangePasswordResponse proto.InternalMessageInfo
+
+func (m *AuthUserChangePasswordResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthUserGrantRoleResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthUserGrantRoleResponse) Reset()         { *m = AuthUserGrantRoleResponse{} }
+func (m *AuthUserGrantRoleResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserGrantRoleResponse) ProtoMessage()    {}
+func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{77}
+}
+func (m *AuthUserGrantRoleResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserGrantRoleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserGrantRoleResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserGrantRoleResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserGrantRoleResponse.Merge(m, src)
+}
+func (m *AuthUserGrantRoleResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserGrantRoleResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserGrantRoleResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserGrantRoleResponse proto.InternalMessageInfo
+
+func (m *AuthUserGrantRoleResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthUserRevokeRoleResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthUserRevokeRoleResponse) Reset()         { *m = AuthUserRevokeRoleResponse{} }
+func (m *AuthUserRevokeRoleResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserRevokeRoleResponse) ProtoMessage()    {}
+func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{78}
+}
+func (m *AuthUserRevokeRoleResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserRevokeRoleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserRevokeRoleResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserRevokeRoleResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserRevokeRoleResponse.Merge(m, src)
+}
+func (m *AuthUserRevokeRoleResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserRevokeRoleResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserRevokeRoleResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserRevokeRoleResponse proto.InternalMessageInfo
+
+func (m *AuthUserRevokeRoleResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthRoleAddResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthRoleAddResponse) Reset()         { *m = AuthRoleAddResponse{} }
+func (m *AuthRoleAddResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleAddResponse) ProtoMessage()    {}
+func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{79}
+}
+func (m *AuthRoleAddResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleAddResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleAddResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleAddResponse.Merge(m, src)
+}
+func (m *AuthRoleAddResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleAddResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleAddResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleAddResponse proto.InternalMessageInfo
+
+func (m *AuthRoleAddResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthRoleGetResponse struct {
+	Header               *ResponseHeader      `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	Perm                 []*authpb.Permission `protobuf:"bytes,2,rep,name=perm,proto3" json:"perm,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
+	XXX_unrecognized     []byte               `json:"-"`
+	XXX_sizecache        int32                `json:"-"`
+}
+
+func (m *AuthRoleGetResponse) Reset()         { *m = AuthRoleGetResponse{} }
+func (m *AuthRoleGetResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleGetResponse) ProtoMessage()    {}
+func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{80}
+}
+func (m *AuthRoleGetResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleGetResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleGetResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleGetResponse.Merge(m, src)
+}
+func (m *AuthRoleGetResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleGetResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleGetResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleGetResponse proto.InternalMessageInfo
+
+func (m *AuthRoleGetResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *AuthRoleGetResponse) GetPerm() []*authpb.Permission {
+	if m != nil {
+		return m.Perm
+	}
+	return nil
+}
+
+type AuthRoleListResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	Roles                []string        `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthRoleListResponse) Reset()         { *m = AuthRoleListResponse{} }
+func (m *AuthRoleListResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleListResponse) ProtoMessage()    {}
+func (*AuthRoleListResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{81}
+}
+func (m *AuthRoleListResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleListResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleListResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleListResponse.Merge(m, src)
+}
+func (m *AuthRoleListResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleListResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleListResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleListResponse proto.InternalMessageInfo
+
+func (m *AuthRoleListResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *AuthRoleListResponse) GetRoles() []string {
+	if m != nil {
+		return m.Roles
+	}
+	return nil
+}
+
+type AuthUserListResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	Users                []string        `protobuf:"bytes,2,rep,name=users,proto3" json:"users,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthUserListResponse) Reset()         { *m = AuthUserListResponse{} }
+func (m *AuthUserListResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserListResponse) ProtoMessage()    {}
+func (*AuthUserListResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{82}
+}
+func (m *AuthUserListResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserListResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserListResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserListResponse.Merge(m, src)
+}
+func (m *AuthUserListResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserListResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserListResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserListResponse proto.InternalMessageInfo
+
+func (m *AuthUserListResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *AuthUserListResponse) GetUsers() []string {
+	if m != nil {
+		return m.Users
+	}
+	return nil
+}
+
+type AuthRoleDeleteResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthRoleDeleteResponse) Reset()         { *m = AuthRoleDeleteResponse{} }
+func (m *AuthRoleDeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleDeleteResponse) ProtoMessage()    {}
+func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{83}
+}
+func (m *AuthRoleDeleteResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleDeleteResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleDeleteResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleDeleteResponse.Merge(m, src)
+}
+func (m *AuthRoleDeleteResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleDeleteResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleDeleteResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleDeleteResponse proto.InternalMessageInfo
+
+func (m *AuthRoleDeleteResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthRoleGrantPermissionResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthRoleGrantPermissionResponse) Reset()         { *m = AuthRoleGrantPermissionResponse{} }
+func (m *AuthRoleGrantPermissionResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleGrantPermissionResponse) ProtoMessage()    {}
+func (*AuthRoleGrantPermissionResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{84}
+}
+func (m *AuthRoleGrantPermissionResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleGrantPermissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleGrantPermissionResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleGrantPermissionResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleGrantPermissionResponse.Merge(m, src)
+}
+func (m *AuthRoleGrantPermissionResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleGrantPermissionResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleGrantPermissionResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleGrantPermissionResponse proto.InternalMessageInfo
+
+func (m *AuthRoleGrantPermissionResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthRoleRevokePermissionResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthRoleRevokePermissionResponse) Reset()         { *m = AuthRoleRevokePermissionResponse{} }
+func (m *AuthRoleRevokePermissionResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleRevokePermissionResponse) ProtoMessage()    {}
+func (*AuthRoleRevokePermissionResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{85}
+}
+func (m *AuthRoleRevokePermissionResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleRevokePermissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleRevokePermissionResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleRevokePermissionResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleRevokePermissionResponse.Merge(m, src)
+}
+func (m *AuthRoleRevokePermissionResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleRevokePermissionResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleRevokePermissionResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleRevokePermissionResponse proto.InternalMessageInfo
+
+func (m *AuthRoleRevokePermissionResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterEnum("etcdserverpb.AlarmType", AlarmType_name, AlarmType_value)
+	proto.RegisterEnum("etcdserverpb.RangeRequest_SortOrder", RangeRequest_SortOrder_name, RangeRequest_SortOrder_value)
+	proto.RegisterEnum("etcdserverpb.RangeRequest_SortTarget", RangeRequest_SortTarget_name, RangeRequest_SortTarget_value)
+	proto.RegisterEnum("etcdserverpb.Compare_CompareResult", Compare_CompareResult_name, Compare_CompareResult_value)
+	proto.RegisterEnum("etcdserverpb.Compare_CompareTarget", Compare_CompareTarget_name, Compare_CompareTarget_value)
+	proto.RegisterEnum("etcdserverpb.WatchCreateRequest_FilterType", WatchCreateRequest_FilterType_name, WatchCreateRequest_FilterType_value)
+	proto.RegisterEnum("etcdserverpb.AlarmRequest_AlarmAction", AlarmRequest_AlarmAction_name, AlarmRequest_AlarmAction_value)
+	proto.RegisterType((*ResponseHeader)(nil), "etcdserverpb.ResponseHeader")
+	proto.RegisterType((*RangeRequest)(nil), "etcdserverpb.RangeRequest")
+	proto.RegisterType((*RangeResponse)(nil), "etcdserverpb.RangeResponse")
+	proto.RegisterType((*PutRequest)(nil), "etcdserverpb.PutRequest")
+	proto.RegisterType((*PutResponse)(nil), "etcdserverpb.PutResponse")
+	proto.RegisterType((*DeleteRangeRequest)(nil), "etcdserverpb.DeleteRangeRequest")
+	proto.RegisterType((*DeleteRangeResponse)(nil), "etcdserverpb.DeleteRangeResponse")
+	proto.RegisterType((*RequestOp)(nil), "etcdserverpb.RequestOp")
+	proto.RegisterType((*ResponseOp)(nil), "etcdserverpb.ResponseOp")
+	proto.RegisterType((*Compare)(nil), "etcdserverpb.Compare")
+	proto.RegisterType((*TxnRequest)(nil), "etcdserverpb.TxnRequest")
+	proto.RegisterType((*TxnResponse)(nil), "etcdserverpb.TxnResponse")
+	proto.RegisterType((*CompactionRequest)(nil), "etcdserverpb.CompactionRequest")
+	proto.RegisterType((*CompactionResponse)(nil), "etcdserverpb.CompactionResponse")
+	proto.RegisterType((*HashRequest)(nil), "etcdserverpb.HashRequest")
+	proto.RegisterType((*HashKVRequest)(nil), "etcdserverpb.HashKVRequest")
+	proto.RegisterType((*HashKVResponse)(nil), "etcdserverpb.HashKVResponse")
+	proto.RegisterType((*HashResponse)(nil), "etcdserverpb.HashResponse")
+	proto.RegisterType((*SnapshotRequest)(nil), "etcdserverpb.SnapshotRequest")
+	proto.RegisterType((*SnapshotResponse)(nil), "etcdserverpb.SnapshotResponse")
+	proto.RegisterType((*WatchRequest)(nil), "etcdserverpb.WatchRequest")
+	proto.RegisterType((*WatchCreateRequest)(nil), "etcdserverpb.WatchCreateRequest")
+	proto.RegisterType((*WatchCancelRequest)(nil), "etcdserverpb.WatchCancelRequest")
+	proto.RegisterType((*WatchProgressRequest)(nil), "etcdserverpb.WatchProgressRequest")
+	proto.RegisterType((*WatchResponse)(nil), "etcdserverpb.WatchResponse")
+	proto.RegisterType((*LeaseGrantRequest)(nil), "etcdserverpb.LeaseGrantRequest")
+	proto.RegisterType((*LeaseGrantResponse)(nil), "etcdserverpb.LeaseGrantResponse")
+	proto.RegisterType((*LeaseRevokeRequest)(nil), "etcdserverpb.LeaseRevokeRequest")
+	proto.RegisterType((*LeaseRevokeResponse)(nil), "etcdserverpb.LeaseRevokeResponse")
+	proto.RegisterType((*LeaseKeepAliveRequest)(nil), "etcdserverpb.LeaseKeepAliveRequest")
+	proto.RegisterType((*LeaseKeepAliveResponse)(nil), "etcdserverpb.LeaseKeepAliveResponse")
+	proto.RegisterType((*LeaseTimeToLiveRequest)(nil), "etcdserverpb.LeaseTimeToLiveRequest")
+	proto.RegisterType((*LeaseTimeToLiveResponse)(nil), "etcdserverpb.LeaseTimeToLiveResponse")
+	proto.RegisterType((*LeaseLeasesRequest)(nil), "etcdserverpb.LeaseLeasesRequest")
+	proto.RegisterType((*LeaseStatus)(nil), "etcdserverpb.LeaseStatus")
+	proto.RegisterType((*LeaseLeasesResponse)(nil), "etcdserverpb.LeaseLeasesResponse")
+	proto.RegisterType((*Member)(nil), "etcdserverpb.Member")
+	proto.RegisterType((*MemberAddRequest)(nil), "etcdserverpb.MemberAddRequest")
+	proto.RegisterType((*MemberAddResponse)(nil), "etcdserverpb.MemberAddResponse")
+	proto.RegisterType((*MemberRemoveRequest)(nil), "etcdserverpb.MemberRemoveRequest")
+	proto.RegisterType((*MemberRemoveResponse)(nil), "etcdserverpb.MemberRemoveResponse")
+	proto.RegisterType((*MemberUpdateRequest)(nil), "etcdserverpb.MemberUpdateRequest")
+	proto.RegisterType((*MemberUpdateResponse)(nil), "etcdserverpb.MemberUpdateResponse")
+	proto.RegisterType((*MemberListRequest)(nil), "etcdserverpb.MemberListRequest")
+	proto.RegisterType((*MemberListResponse)(nil), "etcdserverpb.MemberListResponse")
+	proto.RegisterType((*DefragmentRequest)(nil), "etcdserverpb.DefragmentRequest")
+	proto.RegisterType((*DefragmentResponse)(nil), "etcdserverpb.DefragmentResponse")
+	proto.RegisterType((*MoveLeaderRequest)(nil), "etcdserverpb.MoveLeaderRequest")
+	proto.RegisterType((*MoveLeaderResponse)(nil), "etcdserverpb.MoveLeaderResponse")
+	proto.RegisterType((*AlarmRequest)(nil), "etcdserverpb.AlarmRequest")
+	proto.RegisterType((*AlarmMember)(nil), "etcdserverpb.AlarmMember")
+	proto.RegisterType((*AlarmResponse)(nil), "etcdserverpb.AlarmResponse")
+	proto.RegisterType((*StatusRequest)(nil), "etcdserverpb.StatusRequest")
+	proto.RegisterType((*StatusResponse)(nil), "etcdserverpb.StatusResponse")
+	proto.RegisterType((*AuthEnableRequest)(nil), "etcdserverpb.AuthEnableRequest")
+	proto.RegisterType((*AuthDisableRequest)(nil), "etcdserverpb.AuthDisableRequest")
+	proto.RegisterType((*AuthenticateRequest)(nil), "etcdserverpb.AuthenticateRequest")
+	proto.RegisterType((*AuthUserAddRequest)(nil), "etcdserverpb.AuthUserAddRequest")
+	proto.RegisterType((*AuthUserGetRequest)(nil), "etcdserverpb.AuthUserGetRequest")
+	proto.RegisterType((*AuthUserDeleteRequest)(nil), "etcdserverpb.AuthUserDeleteRequest")
+	proto.RegisterType((*AuthUserChangePasswordRequest)(nil), "etcdserverpb.AuthUserChangePasswordRequest")
+	proto.RegisterType((*AuthUserGrantRoleRequest)(nil), "etcdserverpb.AuthUserGrantRoleRequest")
+	proto.RegisterType((*AuthUserRevokeRoleRequest)(nil), "etcdserverpb.AuthUserRevokeRoleRequest")
+	proto.RegisterType((*AuthRoleAddRequest)(nil), "etcdserverpb.AuthRoleAddRequest")
+	proto.RegisterType((*AuthRoleGetRequest)(nil), "etcdserverpb.AuthRoleGetRequest")
+	proto.RegisterType((*AuthUserListRequest)(nil), "etcdserverpb.AuthUserListRequest")
+	proto.RegisterType((*AuthRoleListRequest)(nil), "etcdserverpb.AuthRoleListRequest")
+	proto.RegisterType((*AuthRoleDeleteRequest)(nil), "etcdserverpb.AuthRoleDeleteRequest")
+	proto.RegisterType((*AuthRoleGrantPermissionRequest)(nil), "etcdserverpb.AuthRoleGrantPermissionRequest")
+	proto.RegisterType((*AuthRoleRevokePermissionRequest)(nil), "etcdserverpb.AuthRoleRevokePermissionRequest")
+	proto.RegisterType((*AuthEnableResponse)(nil), "etcdserverpb.AuthEnableResponse")
+	proto.RegisterType((*AuthDisableResponse)(nil), "etcdserverpb.AuthDisableResponse")
+	proto.RegisterType((*AuthenticateResponse)(nil), "etcdserverpb.AuthenticateResponse")
+	proto.RegisterType((*AuthUserAddResponse)(nil), "etcdserverpb.AuthUserAddResponse")
+	proto.RegisterType((*AuthUserGetResponse)(nil), "etcdserverpb.AuthUserGetResponse")
+	proto.RegisterType((*AuthUserDeleteResponse)(nil), "etcdserverpb.AuthUserDeleteResponse")
+	proto.RegisterType((*AuthUserChangePasswordResponse)(nil), "etcdserverpb.AuthUserChangePasswordResponse")
+	proto.RegisterType((*AuthUserGrantRoleResponse)(nil), "etcdserverpb.AuthUserGrantRoleResponse")
+	proto.RegisterType((*AuthUserRevokeRoleResponse)(nil), "etcdserverpb.AuthUserRevokeRoleResponse")
+	proto.RegisterType((*AuthRoleAddResponse)(nil), "etcdserverpb.AuthRoleAddResponse")
+	proto.RegisterType((*AuthRoleGetResponse)(nil), "etcdserverpb.AuthRoleGetResponse")
+	proto.RegisterType((*AuthRoleListResponse)(nil), "etcdserverpb.AuthRoleListResponse")
+	proto.RegisterType((*AuthUserListResponse)(nil), "etcdserverpb.AuthUserListResponse")
+	proto.RegisterType((*AuthRoleDeleteResponse)(nil), "etcdserverpb.AuthRoleDeleteResponse")
+	proto.RegisterType((*AuthRoleGrantPermissionResponse)(nil), "etcdserverpb.AuthRoleGrantPermissionResponse")
+	proto.RegisterType((*AuthRoleRevokePermissionResponse)(nil), "etcdserverpb.AuthRoleRevokePermissionResponse")
+}
+
+func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) }
+
+var fileDescriptor_77a6da22d6a3feb1 = []byte{
+	// 3711 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5b, 0x5b, 0x73, 0x1b, 0x47,
+	0x76, 0xe6, 0x00, 0x04, 0x40, 0x1c, 0x5c, 0x08, 0x35, 0x29, 0x09, 0x84, 0x24, 0x8a, 0x6a, 0xdd,
+	0xa8, 0x8b, 0x09, 0x9b, 0x76, 0xf2, 0xa0, 0xa4, 0x5c, 0xa6, 0x48, 0x58, 0xa4, 0x49, 0x91, 0xf4,
+	0x10, 0x94, 0x9d, 0x2a, 0x27, 0xac, 0x21, 0xd0, 0x22, 0x11, 0x02, 0x33, 0xc8, 0xcc, 0x00, 0x22,
+	0x15, 0x57, 0x52, 0xe5, 0x38, 0xae, 0x3c, 0xc7, 0x55, 0xa9, 0x24, 0xaf, 0x5b, 0x5b, 0x2e, 0xff,
+	0x82, 0xfd, 0x0b, 0x5b, 0xfb, 0xb2, 0xbb, 0xb5, 0x7f, 0x60, 0xcb, 0xbb, 0x2f, 0xfb, 0x0b, 0xf6,
+	0xf2, 0xb4, 0xd5, 0xb7, 0x99, 0x9e, 0x1b, 0x48, 0x1b, 0xb6, 0x5f, 0xa8, 0xe9, 0xd3, 0xa7, 0xcf,
+	0x39, 0x7d, 0xba, 0xcf, 0x39, 0xdd, 0x5f, 0x43, 0x90, 0xb7, 0xfb, 0xad, 0xa5, 0xbe, 0x6d, 0xb9,
+	0x16, 0x2a, 0x12, 0xb7, 0xd5, 0x76, 0x88, 0x3d, 0x24, 0x76, 0xff, 0xb0, 0x36, 0x7b, 0x64, 0x1d,
+	0x59, 0xac, 0xa3, 0x4e, 0xbf, 0x38, 0x4f, 0x6d, 0x8e, 0xf2, 0xd4, 0x7b, 0xc3, 0x56, 0x8b, 0xfd,
+	0xe9, 0x1f, 0xd6, 0x4f, 0x86, 0xa2, 0xeb, 0x1a, 0xeb, 0x32, 0x06, 0xee, 0x31, 0xfb, 0xd3, 0x3f,
+	0x64, 0xff, 0x88, 0xce, 0xeb, 0x47, 0x96, 0x75, 0xd4, 0x25, 0x75, 0xa3, 0xdf, 0xa9, 0x1b, 0xa6,
+	0x69, 0xb9, 0x86, 0xdb, 0xb1, 0x4c, 0x87, 0xf7, 0xe2, 0xff, 0xd4, 0xa0, 0xac, 0x13, 0xa7, 0x6f,
+	0x99, 0x0e, 0x59, 0x27, 0x46, 0x9b, 0xd8, 0xe8, 0x06, 0x40, 0xab, 0x3b, 0x70, 0x5c, 0x62, 0x1f,
+	0x74, 0xda, 0x55, 0x6d, 0x41, 0x5b, 0x9c, 0xd4, 0xf3, 0x82, 0xb2, 0xd1, 0x46, 0xd7, 0x20, 0xdf,
+	0x23, 0xbd, 0x43, 0xde, 0x9b, 0x62, 0xbd, 0x53, 0x9c, 0xb0, 0xd1, 0x46, 0x35, 0x98, 0xb2, 0xc9,
+	0xb0, 0xe3, 0x74, 0x2c, 0xb3, 0x9a, 0x5e, 0xd0, 0x16, 0xd3, 0xba, 0xd7, 0xa6, 0x03, 0x6d, 0xe3,
+	0xa5, 0x7b, 0xe0, 0x12, 0xbb, 0x57, 0x9d, 0xe4, 0x03, 0x29, 0xa1, 0x49, 0xec, 0x1e, 0xfe, 0x3c,
+	0x03, 0x45, 0xdd, 0x30, 0x8f, 0x88, 0x4e, 0xfe, 0x65, 0x40, 0x1c, 0x17, 0x55, 0x20, 0x7d, 0x42,
+	0xce, 0x98, 0xfa, 0xa2, 0x4e, 0x3f, 0xf9, 0x78, 0xf3, 0x88, 0x1c, 0x10, 0x93, 0x2b, 0x2e, 0xd2,
+	0xf1, 0xe6, 0x11, 0x69, 0x98, 0x6d, 0x34, 0x0b, 0x99, 0x6e, 0xa7, 0xd7, 0x71, 0x85, 0x56, 0xde,
+	0x08, 0x98, 0x33, 0x19, 0x32, 0x67, 0x15, 0xc0, 0xb1, 0x6c, 0xf7, 0xc0, 0xb2, 0xdb, 0xc4, 0xae,
+	0x66, 0x16, 0xb4, 0xc5, 0xf2, 0xf2, 0x9d, 0x25, 0x75, 0x21, 0x96, 0x54, 0x83, 0x96, 0xf6, 0x2c,
+	0xdb, 0xdd, 0xa1, 0xbc, 0x7a, 0xde, 0x91, 0x9f, 0xe8, 0x7d, 0x28, 0x30, 0x21, 0xae, 0x61, 0x1f,
+	0x11, 0xb7, 0x9a, 0x65, 0x52, 0xee, 0x9e, 0x23, 0xa5, 0xc9, 0x98, 0x75, 0xa6, 0x9e, 0x7f, 0x23,
+	0x0c, 0x45, 0x87, 0xd8, 0x1d, 0xa3, 0xdb, 0x79, 0x6d, 0x1c, 0x76, 0x49, 0x35, 0xb7, 0xa0, 0x2d,
+	0x4e, 0xe9, 0x01, 0x1a, 0x9d, 0xff, 0x09, 0x39, 0x73, 0x0e, 0x2c, 0xb3, 0x7b, 0x56, 0x9d, 0x62,
+	0x0c, 0x53, 0x94, 0xb0, 0x63, 0x76, 0xcf, 0xd8, 0xa2, 0x59, 0x03, 0xd3, 0xe5, 0xbd, 0x79, 0xd6,
+	0x9b, 0x67, 0x14, 0xd6, 0xbd, 0x08, 0x95, 0x5e, 0xc7, 0x3c, 0xe8, 0x59, 0xed, 0x03, 0xcf, 0x21,
+	0xc0, 0x1c, 0x52, 0xee, 0x75, 0xcc, 0xe7, 0x56, 0x5b, 0x97, 0x6e, 0xa1, 0x9c, 0xc6, 0x69, 0x90,
+	0xb3, 0x20, 0x38, 0x8d, 0x53, 0x95, 0x73, 0x09, 0x66, 0xa8, 0xcc, 0x96, 0x4d, 0x0c, 0x97, 0xf8,
+	0xcc, 0x45, 0xc6, 0x7c, 0xa9, 0xd7, 0x31, 0x57, 0x59, 0x4f, 0x80, 0xdf, 0x38, 0x8d, 0xf0, 0x97,
+	0x04, 0xbf, 0x71, 0x1a, 0xe4, 0xc7, 0x4b, 0x90, 0xf7, 0x7c, 0x8e, 0xa6, 0x60, 0x72, 0x7b, 0x67,
+	0xbb, 0x51, 0x99, 0x40, 0x00, 0xd9, 0x95, 0xbd, 0xd5, 0xc6, 0xf6, 0x5a, 0x45, 0x43, 0x05, 0xc8,
+	0xad, 0x35, 0x78, 0x23, 0x85, 0x9f, 0x02, 0xf8, 0xde, 0x45, 0x39, 0x48, 0x6f, 0x36, 0xfe, 0xa1,
+	0x32, 0x41, 0x79, 0x5e, 0x34, 0xf4, 0xbd, 0x8d, 0x9d, 0xed, 0x8a, 0x46, 0x07, 0xaf, 0xea, 0x8d,
+	0x95, 0x66, 0xa3, 0x92, 0xa2, 0x1c, 0xcf, 0x77, 0xd6, 0x2a, 0x69, 0x94, 0x87, 0xcc, 0x8b, 0x95,
+	0xad, 0xfd, 0x46, 0x65, 0x12, 0x7f, 0xa9, 0x41, 0x49, 0xac, 0x17, 0x8f, 0x09, 0xf4, 0x0e, 0x64,
+	0x8f, 0x59, 0x5c, 0xb0, 0xad, 0x58, 0x58, 0xbe, 0x1e, 0x5a, 0xdc, 0x40, 0xec, 0xe8, 0x82, 0x17,
+	0x61, 0x48, 0x9f, 0x0c, 0x9d, 0x6a, 0x6a, 0x21, 0xbd, 0x58, 0x58, 0xae, 0x2c, 0xf1, 0x80, 0x5d,
+	0xda, 0x24, 0x67, 0x2f, 0x8c, 0xee, 0x80, 0xe8, 0xb4, 0x13, 0x21, 0x98, 0xec, 0x59, 0x36, 0x61,
+	0x3b, 0x76, 0x4a, 0x67, 0xdf, 0x74, 0x1b, 0xb3, 0x45, 0x13, 0xbb, 0x95, 0x37, 0xf0, 0xd7, 0x1a,
+	0xc0, 0xee, 0xc0, 0x4d, 0x0e, 0x8d, 0x59, 0xc8, 0x0c, 0xa9, 0x60, 0x11, 0x16, 0xbc, 0xc1, 0x62,
+	0x82, 0x18, 0x0e, 0xf1, 0x62, 0x82, 0x36, 0xd0, 0x55, 0xc8, 0xf5, 0x6d, 0x32, 0x3c, 0x38, 0x19,
+	0x32, 0x25, 0x53, 0x7a, 0x96, 0x36, 0x37, 0x87, 0xe8, 0x16, 0x14, 0x3b, 0x47, 0xa6, 0x65, 0x93,
+	0x03, 0x2e, 0x2b, 0xc3, 0x7a, 0x0b, 0x9c, 0xc6, 0xec, 0x56, 0x58, 0xb8, 0xe0, 0xac, 0xca, 0xb2,
+	0x45, 0x49, 0xd8, 0x84, 0x02, 0x33, 0x75, 0x2c, 0xf7, 0x3d, 0xf0, 0x6d, 0x4c, 0xb1, 0x61, 0x51,
+	0x17, 0x0a, 0xab, 0xf1, 0x27, 0x80, 0xd6, 0x48, 0x97, 0xb8, 0x64, 0x9c, 0xec, 0xa1, 0xf8, 0x24,
+	0xad, 0xfa, 0x04, 0xff, 0xb7, 0x06, 0x33, 0x01, 0xf1, 0x63, 0x4d, 0xab, 0x0a, 0xb9, 0x36, 0x13,
+	0xc6, 0x2d, 0x48, 0xeb, 0xb2, 0x89, 0x1e, 0xc1, 0x94, 0x30, 0xc0, 0xa9, 0xa6, 0x13, 0x36, 0x4d,
+	0x8e, 0xdb, 0xe4, 0xe0, 0xaf, 0x53, 0x90, 0x17, 0x13, 0xdd, 0xe9, 0xa3, 0x15, 0x28, 0xd9, 0xbc,
+	0x71, 0xc0, 0xe6, 0x23, 0x2c, 0xaa, 0x25, 0x27, 0xa1, 0xf5, 0x09, 0xbd, 0x28, 0x86, 0x30, 0x32,
+	0xfa, 0x3b, 0x28, 0x48, 0x11, 0xfd, 0x81, 0x2b, 0x5c, 0x5e, 0x0d, 0x0a, 0xf0, 0xf7, 0xdf, 0xfa,
+	0x84, 0x0e, 0x82, 0x7d, 0x77, 0xe0, 0xa2, 0x26, 0xcc, 0xca, 0xc1, 0x7c, 0x36, 0xc2, 0x8c, 0x34,
+	0x93, 0xb2, 0x10, 0x94, 0x12, 0x5d, 0xaa, 0xf5, 0x09, 0x1d, 0x89, 0xf1, 0x4a, 0xa7, 0x6a, 0x92,
+	0x7b, 0xca, 0x93, 0x77, 0xc4, 0xa4, 0xe6, 0xa9, 0x19, 0x35, 0xa9, 0x79, 0x6a, 0x3e, 0xcd, 0x43,
+	0x4e, 0xb4, 0xf0, 0xcf, 0x52, 0x00, 0x72, 0x35, 0x76, 0xfa, 0x68, 0x0d, 0xca, 0xb6, 0x68, 0x05,
+	0xbc, 0x75, 0x2d, 0xd6, 0x5b, 0x62, 0x11, 0x27, 0xf4, 0x92, 0x1c, 0xc4, 0x8d, 0x7b, 0x17, 0x8a,
+	0x9e, 0x14, 0xdf, 0x61, 0x73, 0x31, 0x0e, 0xf3, 0x24, 0x14, 0xe4, 0x00, 0xea, 0xb2, 0x8f, 0xe0,
+	0xb2, 0x37, 0x3e, 0xc6, 0x67, 0xb7, 0x46, 0xf8, 0xcc, 0x13, 0x38, 0x23, 0x25, 0xa8, 0x5e, 0x53,
+	0x0d, 0xf3, 0xdd, 0x36, 0x17, 0xe3, 0xb6, 0xa8, 0x61, 0xd4, 0x71, 0x40, 0xeb, 0x25, 0x6f, 0xe2,
+	0x3f, 0xa4, 0x21, 0xb7, 0x6a, 0xf5, 0xfa, 0x86, 0x4d, 0x57, 0x23, 0x6b, 0x13, 0x67, 0xd0, 0x75,
+	0x99, 0xbb, 0xca, 0xcb, 0xb7, 0x83, 0x12, 0x05, 0x9b, 0xfc, 0x57, 0x67, 0xac, 0xba, 0x18, 0x42,
+	0x07, 0x8b, 0xf2, 0x98, 0xba, 0xc0, 0x60, 0x51, 0x1c, 0xc5, 0x10, 0x19, 0xc8, 0x69, 0x3f, 0x90,
+	0x6b, 0x90, 0x1b, 0x12, 0xdb, 0x2f, 0xe9, 0xeb, 0x13, 0xba, 0x24, 0xa0, 0x07, 0x30, 0x1d, 0x2e,
+	0x2f, 0x19, 0xc1, 0x53, 0x6e, 0x05, 0xab, 0xd1, 0x6d, 0x28, 0x06, 0x6a, 0x5c, 0x56, 0xf0, 0x15,
+	0x7a, 0x4a, 0x89, 0xbb, 0x22, 0xf3, 0x2a, 0xad, 0xc7, 0xc5, 0xf5, 0x09, 0x99, 0x59, 0xaf, 0xc8,
+	0xcc, 0x3a, 0x25, 0x46, 0x89, 0xdc, 0x1a, 0x48, 0x32, 0xef, 0x05, 0x93, 0x0c, 0x7e, 0x0f, 0x4a,
+	0x01, 0x07, 0xd1, 0xba, 0xd3, 0xf8, 0x70, 0x7f, 0x65, 0x8b, 0x17, 0xa9, 0x67, 0xac, 0x2e, 0xe9,
+	0x15, 0x8d, 0xd6, 0xba, 0xad, 0xc6, 0xde, 0x5e, 0x25, 0x85, 0x4a, 0x90, 0xdf, 0xde, 0x69, 0x1e,
+	0x70, 0xae, 0x34, 0x7e, 0xe6, 0x49, 0x10, 0x45, 0x4e, 0xa9, 0x6d, 0x13, 0x4a, 0x6d, 0xd3, 0x64,
+	0x6d, 0x4b, 0xf9, 0xb5, 0x8d, 0x95, 0xb9, 0xad, 0xc6, 0xca, 0x5e, 0xa3, 0x32, 0xf9, 0xb4, 0x0c,
+	0x45, 0xee, 0xdf, 0x83, 0x81, 0x49, 0x4b, 0xed, 0x4f, 0x34, 0x00, 0x3f, 0x9a, 0x50, 0x1d, 0x72,
+	0x2d, 0xae, 0xa7, 0xaa, 0xb1, 0x64, 0x74, 0x39, 0x76, 0xc9, 0x74, 0xc9, 0x85, 0xde, 0x82, 0x9c,
+	0x33, 0x68, 0xb5, 0x88, 0x23, 0x4b, 0xde, 0xd5, 0x70, 0x3e, 0x14, 0xd9, 0x4a, 0x97, 0x7c, 0x74,
+	0xc8, 0x4b, 0xa3, 0xd3, 0x1d, 0xb0, 0x02, 0x38, 0x7a, 0x88, 0xe0, 0xc3, 0xff, 0xa7, 0x41, 0x41,
+	0xd9, 0xbc, 0xdf, 0x31, 0x09, 0x5f, 0x87, 0x3c, 0xb3, 0x81, 0xb4, 0x45, 0x1a, 0x9e, 0xd2, 0x7d,
+	0x02, 0xfa, 0x5b, 0xc8, 0xcb, 0x08, 0x90, 0x99, 0xb8, 0x1a, 0x2f, 0x76, 0xa7, 0xaf, 0xfb, 0xac,
+	0x78, 0x13, 0x2e, 0x31, 0xaf, 0xb4, 0xe8, 0xe1, 0x5a, 0xfa, 0x51, 0x3d, 0x7e, 0x6a, 0xa1, 0xe3,
+	0x67, 0x0d, 0xa6, 0xfa, 0xc7, 0x67, 0x4e, 0xa7, 0x65, 0x74, 0x85, 0x15, 0x5e, 0x1b, 0x7f, 0x00,
+	0x48, 0x15, 0x36, 0xce, 0x74, 0x71, 0x09, 0x0a, 0xeb, 0x86, 0x73, 0x2c, 0x4c, 0xc2, 0x8f, 0xa0,
+	0x44, 0x9b, 0x9b, 0x2f, 0x2e, 0x60, 0x23, 0xbb, 0x1c, 0x48, 0xee, 0xb1, 0x7c, 0x8e, 0x60, 0xf2,
+	0xd8, 0x70, 0x8e, 0xd9, 0x44, 0x4b, 0x3a, 0xfb, 0x46, 0x0f, 0xa0, 0xd2, 0xe2, 0x93, 0x3c, 0x08,
+	0x5d, 0x19, 0xa6, 0x05, 0xdd, 0x3b, 0x09, 0x7e, 0x0c, 0x45, 0x3e, 0x87, 0xef, 0xdb, 0x08, 0x7c,
+	0x09, 0xa6, 0xf7, 0x4c, 0xa3, 0xef, 0x1c, 0x5b, 0xb2, 0xba, 0xd1, 0x49, 0x57, 0x7c, 0xda, 0x58,
+	0x1a, 0xef, 0xc3, 0xb4, 0x4d, 0x7a, 0x46, 0xc7, 0xec, 0x98, 0x47, 0x07, 0x87, 0x67, 0x2e, 0x71,
+	0xc4, 0x85, 0xa9, 0xec, 0x91, 0x9f, 0x52, 0x2a, 0x35, 0xed, 0xb0, 0x6b, 0x1d, 0x8a, 0x34, 0xc7,
+	0xbe, 0xf1, 0x17, 0x29, 0x28, 0x7e, 0x64, 0xb8, 0x2d, 0xb9, 0x74, 0x68, 0x03, 0xca, 0x5e, 0x72,
+	0x63, 0x14, 0x61, 0x4b, 0xa8, 0xc4, 0xb2, 0x31, 0xf2, 0x28, 0x2d, 0xab, 0x63, 0xa9, 0xa5, 0x12,
+	0x98, 0x28, 0xc3, 0x6c, 0x91, 0xae, 0x27, 0x2a, 0x95, 0x2c, 0x8a, 0x31, 0xaa, 0xa2, 0x54, 0x02,
+	0xda, 0x81, 0x4a, 0xdf, 0xb6, 0x8e, 0x6c, 0xe2, 0x38, 0x9e, 0x30, 0x5e, 0xc6, 0x70, 0x8c, 0xb0,
+	0x5d, 0xc1, 0xea, 0x8b, 0x9b, 0xee, 0x07, 0x49, 0x4f, 0xa7, 0xfd, 0xf3, 0x0c, 0x4f, 0x4e, 0xbf,
+	0x4e, 0x01, 0x8a, 0x4e, 0xea, 0xdb, 0x1e, 0xf1, 0xee, 0x42, 0xd9, 0x71, 0x0d, 0x3b, 0xb2, 0xd9,
+	0x4a, 0x8c, 0xea, 0x65, 0xfc, 0xfb, 0xe0, 0x19, 0x74, 0x60, 0x5a, 0x6e, 0xe7, 0xe5, 0x99, 0x38,
+	0x25, 0x97, 0x25, 0x79, 0x9b, 0x51, 0x51, 0x03, 0x72, 0x2f, 0x3b, 0x5d, 0x97, 0xd8, 0x4e, 0x35,
+	0xb3, 0x90, 0x5e, 0x2c, 0x2f, 0x3f, 0x3a, 0x6f, 0x19, 0x96, 0xde, 0x67, 0xfc, 0xcd, 0xb3, 0x3e,
+	0xd1, 0xe5, 0x58, 0xf5, 0xe4, 0x99, 0x0d, 0x9c, 0xc6, 0xe7, 0x60, 0xea, 0x15, 0x15, 0x41, 0x6f,
+	0xd9, 0x39, 0x7e, 0x58, 0x64, 0x6d, 0x7e, 0xc9, 0x7e, 0x69, 0x1b, 0x47, 0x3d, 0x62, 0xba, 0xf2,
+	0x1e, 0x28, 0xdb, 0xf8, 0x2e, 0x80, 0xaf, 0x86, 0xa6, 0xfc, 0xed, 0x9d, 0xdd, 0xfd, 0x66, 0x65,
+	0x02, 0x15, 0x61, 0x6a, 0x7b, 0x67, 0xad, 0xb1, 0xd5, 0xa0, 0xf5, 0x01, 0xd7, 0xa5, 0x4b, 0x03,
+	0x6b, 0xa9, 0xea, 0xd4, 0x02, 0x3a, 0xf1, 0x15, 0x98, 0x8d, 0x5b, 0x40, 0x7a, 0x16, 0x2d, 0x89,
+	0x5d, 0x3a, 0x56, 0xa8, 0xa8, 0xaa, 0x53, 0xc1, 0xe9, 0x56, 0x21, 0xc7, 0x77, 0x6f, 0x5b, 0x1c,
+	0xce, 0x65, 0x93, 0x3a, 0x82, 0x6f, 0x46, 0xd2, 0x16, 0xab, 0xe4, 0xb5, 0x63, 0xd3, 0x4b, 0x26,
+	0x36, 0xbd, 0xa0, 0xdb, 0x50, 0xf2, 0xa2, 0xc1, 0x70, 0xc4, 0x59, 0x20, 0xaf, 0x17, 0xe5, 0x46,
+	0xa7, 0xb4, 0x80, 0xd3, 0x73, 0x41, 0xa7, 0xa3, 0xbb, 0x90, 0x25, 0x43, 0x62, 0xba, 0x4e, 0xb5,
+	0xc0, 0x2a, 0x46, 0x49, 0x9e, 0xdd, 0x1b, 0x94, 0xaa, 0x8b, 0x4e, 0xfc, 0x37, 0x70, 0x89, 0xdd,
+	0x91, 0x9e, 0xd9, 0x86, 0xa9, 0x5e, 0xe6, 0x9a, 0xcd, 0x2d, 0xe1, 0x6e, 0xfa, 0x89, 0xca, 0x90,
+	0xda, 0x58, 0x13, 0x4e, 0x48, 0x6d, 0xac, 0xe1, 0xcf, 0x34, 0x40, 0xea, 0xb8, 0xb1, 0xfc, 0x1c,
+	0x12, 0x2e, 0xd5, 0xa7, 0x7d, 0xf5, 0xb3, 0x90, 0x21, 0xb6, 0x6d, 0xd9, 0xcc, 0xa3, 0x79, 0x9d,
+	0x37, 0xf0, 0x1d, 0x61, 0x83, 0x4e, 0x86, 0xd6, 0x89, 0x17, 0x83, 0x5c, 0x9a, 0xe6, 0x99, 0xba,
+	0x09, 0x33, 0x01, 0xae, 0xb1, 0x2a, 0xd7, 0x7d, 0xb8, 0xcc, 0x84, 0x6d, 0x12, 0xd2, 0x5f, 0xe9,
+	0x76, 0x86, 0x89, 0x5a, 0xfb, 0x70, 0x25, 0xcc, 0xf8, 0xc3, 0xfa, 0x08, 0xff, 0xbd, 0xd0, 0xd8,
+	0xec, 0xf4, 0x48, 0xd3, 0xda, 0x4a, 0xb6, 0x8d, 0x66, 0xf6, 0x13, 0x72, 0xe6, 0x88, 0x12, 0xcf,
+	0xbe, 0xf1, 0x4f, 0x35, 0xb8, 0x1a, 0x19, 0xfe, 0x03, 0xaf, 0xea, 0x3c, 0xc0, 0x11, 0xdd, 0x3e,
+	0xa4, 0x4d, 0x3b, 0x38, 0xba, 0xa0, 0x50, 0x3c, 0x3b, 0x69, 0x2e, 0x2b, 0x0a, 0x3b, 0x67, 0xc5,
+	0x9a, 0xb3, 0x3f, 0x5e, 0xc4, 0xdf, 0x80, 0x02, 0x23, 0xec, 0xb9, 0x86, 0x3b, 0x70, 0x22, 0x8b,
+	0xf1, 0x6f, 0x62, 0x0b, 0xc8, 0x41, 0x63, 0xcd, 0xeb, 0x2d, 0xc8, 0xb2, 0x83, 0xb5, 0x3c, 0x56,
+	0x86, 0x6e, 0x32, 0x8a, 0x1d, 0xba, 0x60, 0xc4, 0xc7, 0x90, 0x7d, 0xce, 0xd0, 0x48, 0xc5, 0xb2,
+	0x49, 0xb9, 0x14, 0xa6, 0xd1, 0xe3, 0x18, 0x49, 0x5e, 0x67, 0xdf, 0xec, 0x14, 0x46, 0x88, 0xbd,
+	0xaf, 0x6f, 0xf1, 0xd3, 0x5e, 0x5e, 0xf7, 0xda, 0xd4, 0x65, 0xad, 0x6e, 0x87, 0x98, 0x2e, 0xeb,
+	0x9d, 0x64, 0xbd, 0x0a, 0x05, 0x2f, 0x41, 0x85, 0x6b, 0x5a, 0x69, 0xb7, 0x95, 0xd3, 0x94, 0x27,
+	0x4f, 0x0b, 0xca, 0xc3, 0x5f, 0x69, 0x70, 0x49, 0x19, 0x30, 0x96, 0x63, 0x1e, 0x43, 0x96, 0x63,
+	0xae, 0xa2, 0x70, 0xcf, 0x06, 0x47, 0x71, 0x35, 0xba, 0xe0, 0x41, 0x4b, 0x90, 0xe3, 0x5f, 0xf2,
+	0x48, 0x1b, 0xcf, 0x2e, 0x99, 0xf0, 0x5d, 0x98, 0x11, 0x24, 0xd2, 0xb3, 0xe2, 0xf6, 0x36, 0x73,
+	0x28, 0xfe, 0x14, 0x66, 0x83, 0x6c, 0x63, 0x4d, 0x49, 0x31, 0x32, 0x75, 0x11, 0x23, 0x57, 0xa4,
+	0x91, 0xfb, 0xfd, 0xb6, 0x72, 0x2c, 0x08, 0xaf, 0xba, 0xba, 0x22, 0xa9, 0xd0, 0x8a, 0x78, 0x13,
+	0x90, 0x22, 0x7e, 0xd4, 0x09, 0xcc, 0xc8, 0xed, 0xb0, 0xd5, 0x71, 0xbc, 0xd3, 0xe7, 0x6b, 0x40,
+	0x2a, 0xf1, 0xc7, 0x36, 0x68, 0x8d, 0xc8, 0xa2, 0x26, 0x0d, 0xfa, 0x00, 0x90, 0x4a, 0x1c, 0x2b,
+	0xa3, 0xd7, 0xe1, 0xd2, 0x73, 0x6b, 0x48, 0x53, 0x03, 0xa5, 0xfa, 0x21, 0xc3, 0xef, 0xa2, 0xde,
+	0xb2, 0x79, 0x6d, 0xaa, 0x5c, 0x1d, 0x30, 0x96, 0xf2, 0x5f, 0x6a, 0x50, 0x5c, 0xe9, 0x1a, 0x76,
+	0x4f, 0x2a, 0x7e, 0x17, 0xb2, 0xfc, 0x86, 0x25, 0x40, 0x8d, 0x7b, 0x41, 0x31, 0x2a, 0x2f, 0x6f,
+	0xac, 0xf0, 0xfb, 0x98, 0x18, 0x45, 0x0d, 0x17, 0xef, 0x1e, 0x6b, 0xa1, 0x77, 0x90, 0x35, 0xf4,
+	0x06, 0x64, 0x0c, 0x3a, 0x84, 0xa5, 0xe0, 0x72, 0xf8, 0x6e, 0xcb, 0xa4, 0xb1, 0x73, 0x20, 0xe7,
+	0xc2, 0xef, 0x40, 0x41, 0xd1, 0x40, 0x6f, 0xef, 0xcf, 0x1a, 0xe2, 0xd0, 0xb6, 0xb2, 0xda, 0xdc,
+	0x78, 0xc1, 0x2f, 0xf5, 0x65, 0x80, 0xb5, 0x86, 0xd7, 0x4e, 0xe1, 0x8f, 0xc5, 0x28, 0x91, 0xef,
+	0x54, 0x7b, 0xb4, 0x24, 0x7b, 0x52, 0x17, 0xb2, 0xe7, 0x14, 0x4a, 0x62, 0xfa, 0xe3, 0xa6, 0x6f,
+	0x26, 0x2f, 0x21, 0x7d, 0x2b, 0xc6, 0xeb, 0x82, 0x11, 0x4f, 0x43, 0x49, 0x24, 0x74, 0xb1, 0xff,
+	0x7e, 0xa1, 0x41, 0x59, 0x52, 0xc6, 0x05, 0x5f, 0x25, 0x6e, 0xc4, 0x2b, 0x80, 0x87, 0x1a, 0x5d,
+	0x81, 0x6c, 0xfb, 0x70, 0xaf, 0xf3, 0x5a, 0x02, 0xe5, 0xa2, 0x45, 0xe9, 0x5d, 0xae, 0x87, 0xbf,
+	0x56, 0x89, 0x16, 0xba, 0xce, 0x1f, 0xb2, 0x36, 0xcc, 0x36, 0x39, 0x65, 0x67, 0xca, 0x49, 0xdd,
+	0x27, 0xb0, 0x0b, 0xb5, 0x78, 0xd5, 0x62, 0x07, 0x49, 0xf5, 0x95, 0x6b, 0x06, 0x2e, 0xad, 0x0c,
+	0xdc, 0xe3, 0x86, 0x69, 0x1c, 0x76, 0x65, 0xc6, 0xa2, 0x65, 0x96, 0x12, 0xd7, 0x3a, 0x8e, 0x4a,
+	0x6d, 0xc0, 0x0c, 0xa5, 0x12, 0xd3, 0xed, 0xb4, 0x94, 0xf4, 0x26, 0x8b, 0x98, 0x16, 0x2a, 0x62,
+	0x86, 0xe3, 0xbc, 0xb2, 0xec, 0xb6, 0x98, 0x9a, 0xd7, 0xc6, 0x6b, 0x5c, 0xf8, 0xbe, 0x13, 0x28,
+	0x53, 0xdf, 0x56, 0xca, 0xa2, 0x2f, 0xe5, 0x19, 0x71, 0x47, 0x48, 0xc1, 0x8f, 0xe0, 0xb2, 0xe4,
+	0x14, 0xc0, 0xe4, 0x08, 0xe6, 0x1d, 0xb8, 0x21, 0x99, 0x57, 0x8f, 0xe9, 0x45, 0x6d, 0x57, 0x28,
+	0xfc, 0xae, 0x76, 0x3e, 0x85, 0xaa, 0x67, 0x27, 0x3b, 0x2c, 0x5b, 0x5d, 0xd5, 0x80, 0x81, 0x23,
+	0xf6, 0x4c, 0x5e, 0x67, 0xdf, 0x94, 0x66, 0x5b, 0x5d, 0xef, 0x48, 0x40, 0xbf, 0xf1, 0x2a, 0xcc,
+	0x49, 0x19, 0xe2, 0x18, 0x1b, 0x14, 0x12, 0x31, 0x28, 0x4e, 0x88, 0x70, 0x18, 0x1d, 0x3a, 0xda,
+	0xed, 0x2a, 0x67, 0xd0, 0xb5, 0x4c, 0xa6, 0xa6, 0xc8, 0xbc, 0xcc, 0x77, 0x04, 0x35, 0x4c, 0xad,
+	0x18, 0x82, 0x4c, 0x05, 0xa8, 0x64, 0xb1, 0x10, 0x94, 0x1c, 0x59, 0x88, 0x88, 0xe8, 0x4f, 0x60,
+	0xde, 0x33, 0x82, 0xfa, 0x6d, 0x97, 0xd8, 0xbd, 0x8e, 0xe3, 0x28, 0x50, 0x56, 0xdc, 0xc4, 0xef,
+	0xc1, 0x64, 0x9f, 0x88, 0x9c, 0x52, 0x58, 0x46, 0x4b, 0xfc, 0xed, 0x79, 0x49, 0x19, 0xcc, 0xfa,
+	0x71, 0x1b, 0x6e, 0x4a, 0xe9, 0xdc, 0xa3, 0xb1, 0xe2, 0xc3, 0x46, 0xc9, 0x0b, 0x3e, 0x77, 0x6b,
+	0xf4, 0x82, 0x9f, 0xe6, 0x6b, 0xef, 0xc1, 0xab, 0x1f, 0x70, 0x47, 0xca, 0xd8, 0x1a, 0xab, 0x56,
+	0x6c, 0x72, 0x9f, 0x7a, 0x21, 0x39, 0x96, 0xb0, 0x43, 0x98, 0x0d, 0x46, 0xf2, 0x58, 0x69, 0x6c,
+	0x16, 0x32, 0xae, 0x75, 0x42, 0x64, 0x12, 0xe3, 0x0d, 0x69, 0xb0, 0x17, 0xe6, 0x63, 0x19, 0x6c,
+	0xf8, 0xc2, 0xd8, 0x96, 0x1c, 0xd7, 0x5e, 0xba, 0x9a, 0xf2, 0xf0, 0xc5, 0x1b, 0x78, 0x1b, 0xae,
+	0x84, 0xd3, 0xc4, 0x58, 0x26, 0xbf, 0xe0, 0x1b, 0x38, 0x2e, 0x93, 0x8c, 0x25, 0xf7, 0x43, 0x3f,
+	0x19, 0x28, 0x09, 0x65, 0x2c, 0x91, 0x3a, 0xd4, 0xe2, 0xf2, 0xcb, 0xf7, 0xb1, 0x5f, 0xbd, 0x74,
+	0x33, 0x96, 0x30, 0xc7, 0x17, 0x36, 0xfe, 0xf2, 0xfb, 0x39, 0x22, 0x3d, 0x32, 0x47, 0x88, 0x20,
+	0xf1, 0xb3, 0xd8, 0x0f, 0xb0, 0xe9, 0x84, 0x0e, 0x3f, 0x81, 0x8e, 0xab, 0x83, 0xd6, 0x10, 0x4f,
+	0x07, 0x6b, 0xc8, 0x8d, 0xad, 0xa6, 0xdd, 0xb1, 0x16, 0xe3, 0x23, 0x3f, 0x77, 0x46, 0x32, 0xf3,
+	0x58, 0x82, 0x3f, 0x86, 0x85, 0xe4, 0xa4, 0x3c, 0x8e, 0xe4, 0x87, 0x75, 0xc8, 0x7b, 0x07, 0x4a,
+	0xe5, 0x77, 0x1b, 0x05, 0xc8, 0x6d, 0xef, 0xec, 0xed, 0xae, 0xac, 0x36, 0xf8, 0x0f, 0x37, 0x56,
+	0x77, 0x74, 0x7d, 0x7f, 0xb7, 0x59, 0x49, 0x2d, 0xff, 0x29, 0x0d, 0xa9, 0xcd, 0x17, 0xe8, 0x1f,
+	0x21, 0xc3, 0x5f, 0x31, 0x47, 0x3c, 0x5d, 0xd7, 0x46, 0x3d, 0xd4, 0xe2, 0x6b, 0x9f, 0xfd, 0xe6,
+	0xf7, 0x5f, 0xa6, 0x2e, 0xe3, 0x4a, 0x7d, 0xf8, 0xf6, 0x21, 0x71, 0x8d, 0xfa, 0xc9, 0xb0, 0xce,
+	0xea, 0xc3, 0x13, 0xed, 0x21, 0xda, 0x87, 0xf4, 0xee, 0xc0, 0x45, 0x89, 0xcf, 0xda, 0xb5, 0xe4,
+	0xf7, 0x5b, 0x3c, 0xc7, 0x04, 0xcf, 0xe0, 0xb2, 0x22, 0xb8, 0x3f, 0x70, 0xa9, 0xd8, 0x01, 0x14,
+	0xd4, 0x17, 0xd8, 0x73, 0xdf, 0xbb, 0x6b, 0xe7, 0xbf, 0xee, 0xe2, 0x5b, 0x4c, 0xdd, 0x35, 0x7c,
+	0x45, 0x51, 0xc7, 0xdf, 0x89, 0xd5, 0xd9, 0x34, 0x4f, 0x4d, 0x94, 0xf8, 0x22, 0x5e, 0x4b, 0x7e,
+	0xf4, 0x8d, 0x9d, 0x8d, 0x7b, 0x6a, 0x52, 0xb1, 0xa6, 0x78, 0xf3, 0x6d, 0xb9, 0xe8, 0x66, 0xcc,
+	0x9b, 0x9f, 0xfa, 0xba, 0x55, 0x5b, 0x48, 0x66, 0x10, 0x8a, 0x16, 0x98, 0xa2, 0x1a, 0xbe, 0xac,
+	0x28, 0x6a, 0x79, 0x6c, 0x4f, 0xb4, 0x87, 0xcb, 0x47, 0x90, 0x61, 0xe8, 0x31, 0xfa, 0x27, 0xf9,
+	0x51, 0x8b, 0x81, 0xd1, 0x13, 0x16, 0x3f, 0x80, 0x3b, 0xe3, 0x2a, 0x53, 0x86, 0x70, 0x49, 0x2a,
+	0x63, 0xf8, 0xf1, 0x13, 0xed, 0xe1, 0xa2, 0xf6, 0xa6, 0xb6, 0xfc, 0xc7, 0x49, 0xc8, 0x30, 0xb8,
+	0x08, 0x59, 0x00, 0x3e, 0x9a, 0x1a, 0x9e, 0x65, 0x04, 0x9f, 0x0d, 0xcf, 0x32, 0x0a, 0xc4, 0xe2,
+	0x79, 0xa6, 0xb8, 0x8a, 0x67, 0xa4, 0x62, 0x86, 0x44, 0xd5, 0x19, 0xb8, 0x46, 0x7d, 0x3a, 0x14,
+	0x80, 0x19, 0x0f, 0x33, 0x14, 0x27, 0x30, 0x80, 0xaa, 0x86, 0x77, 0x48, 0x0c, 0xa2, 0x8a, 0x31,
+	0xd3, 0x79, 0x1d, 0x5f, 0x55, 0x3c, 0xcb, 0xd5, 0xda, 0x8c, 0x91, 0xea, 0xfd, 0x0f, 0x0d, 0xca,
+	0x41, 0x5c, 0x14, 0xdd, 0x8e, 0x91, 0x1c, 0x86, 0x57, 0x6b, 0x77, 0x46, 0x33, 0x25, 0x59, 0xc0,
+	0xd5, 0x9f, 0x10, 0xd2, 0x37, 0x28, 0xa3, 0x70, 0x3c, 0xfa, 0x42, 0x83, 0xe9, 0x10, 0xd8, 0x89,
+	0xe2, 0x34, 0x44, 0xa0, 0xd4, 0xda, 0xdd, 0x73, 0xb8, 0x84, 0x21, 0xf7, 0x98, 0x21, 0x0b, 0xf8,
+	0x5a, 0xc4, 0x15, 0x6e, 0xa7, 0x47, 0x5c, 0x4b, 0x18, 0xe3, 0x2d, 0x03, 0x07, 0x26, 0x63, 0x97,
+	0x21, 0x00, 0x74, 0xc6, 0x2e, 0x43, 0x10, 0xd5, 0x1c, 0xb1, 0x0c, 0x1c, 0x8d, 0xa4, 0x5b, 0xfc,
+	0xcf, 0x69, 0xc8, 0xad, 0xf2, 0x5f, 0x4f, 0x22, 0x07, 0xf2, 0x1e, 0x02, 0x88, 0xe6, 0xe3, 0xd0,
+	0x18, 0xff, 0xb6, 0x50, 0xbb, 0x99, 0xd8, 0x2f, 0xb4, 0xdf, 0x65, 0xda, 0x6f, 0xe2, 0x9a, 0xd4,
+	0x2e, 0x7e, 0xa4, 0x59, 0xe7, 0xd7, 0xfe, 0xba, 0xd1, 0x6e, 0xd3, 0x89, 0xff, 0x3b, 0x14, 0x55,
+	0x98, 0x0e, 0xdd, 0x8a, 0x45, 0x81, 0x54, 0xa4, 0xaf, 0x86, 0x47, 0xb1, 0x08, 0xed, 0x8b, 0x4c,
+	0x3b, 0xc6, 0x37, 0x12, 0xb4, 0xdb, 0x8c, 0x3d, 0x60, 0x00, 0x87, 0xd9, 0xe2, 0x0d, 0x08, 0xa0,
+	0x78, 0xf1, 0x06, 0x04, 0x51, 0xba, 0x73, 0x0d, 0x18, 0x30, 0x76, 0x6a, 0xc0, 0x2b, 0x00, 0x1f,
+	0x54, 0x43, 0xb1, 0x7e, 0x55, 0xae, 0x4e, 0xe1, 0x90, 0x8f, 0xe2, 0x71, 0xd1, 0x3d, 0x17, 0x52,
+	0xdd, 0xed, 0x38, 0x34, 0xf4, 0x97, 0xbf, 0xca, 0x42, 0xe1, 0xb9, 0xd1, 0x31, 0x5d, 0x62, 0x1a,
+	0x66, 0x8b, 0xa0, 0x97, 0x90, 0x61, 0xa5, 0x31, 0x9c, 0xe5, 0x54, 0xac, 0x29, 0x9c, 0xe5, 0x02,
+	0x40, 0x0c, 0xbe, 0xc3, 0x34, 0xcf, 0xe3, 0x39, 0xa9, 0xb9, 0xe7, 0x8b, 0xaf, 0x33, 0x0c, 0x85,
+	0x4e, 0xf8, 0x9f, 0x21, 0x2b, 0xe0, 0xf9, 0x90, 0xb0, 0x00, 0xb6, 0x52, 0xbb, 0x1e, 0xdf, 0x99,
+	0xb4, 0xbd, 0x54, 0x55, 0x0e, 0xe3, 0xa5, 0xba, 0x5e, 0x03, 0xf8, 0x00, 0x61, 0xd8, 0xb9, 0x11,
+	0x3c, 0xb1, 0xb6, 0x90, 0xcc, 0x20, 0xf4, 0x3e, 0x60, 0x7a, 0x6f, 0xe3, 0xf9, 0x38, 0xbd, 0x6d,
+	0x8f, 0x9f, 0xea, 0x3e, 0x84, 0xc9, 0x75, 0xc3, 0x39, 0x46, 0xa1, 0x62, 0xa7, 0xfc, 0xe0, 0xa1,
+	0x56, 0x8b, 0xeb, 0x12, 0x9a, 0x6e, 0x33, 0x4d, 0x37, 0x70, 0x35, 0x4e, 0xd3, 0xb1, 0xe1, 0xd0,
+	0xea, 0x81, 0x8e, 0x21, 0xcb, 0x7f, 0x03, 0x11, 0xf6, 0x65, 0xe0, 0x77, 0x14, 0x61, 0x5f, 0x06,
+	0x7f, 0x36, 0x71, 0x31, 0x4d, 0x2e, 0x4c, 0xc9, 0x1f, 0x1e, 0xa0, 0x1b, 0xa1, 0xa5, 0x09, 0xfe,
+	0x48, 0xa1, 0x36, 0x9f, 0xd4, 0x2d, 0xf4, 0xdd, 0x67, 0xfa, 0x6e, 0xe1, 0xeb, 0xb1, 0x6b, 0x27,
+	0xb8, 0x9f, 0x68, 0x0f, 0xdf, 0xd4, 0x68, 0x99, 0x00, 0x1f, 0x64, 0x8d, 0x44, 0x47, 0x18, 0xaf,
+	0x8d, 0x44, 0x47, 0x04, 0x9f, 0xc5, 0xcb, 0x4c, 0xf9, 0x63, 0x7c, 0x3f, 0x4e, 0xb9, 0x6b, 0x1b,
+	0xa6, 0xf3, 0x92, 0xd8, 0x6f, 0x70, 0x30, 0xcd, 0x39, 0xee, 0xf4, 0x69, 0xa4, 0xfc, 0x65, 0x1a,
+	0x26, 0xe9, 0x79, 0x94, 0x96, 0x67, 0xff, 0x1a, 0x1f, 0xb6, 0x26, 0x02, 0x9e, 0x85, 0xad, 0x89,
+	0x22, 0x00, 0xd1, 0xf2, 0xcc, 0x7e, 0x27, 0x4f, 0x18, 0x13, 0xf5, 0xba, 0x03, 0x05, 0xe5, 0xae,
+	0x8f, 0x62, 0x04, 0x06, 0x91, 0xb9, 0x70, 0x5d, 0x88, 0x01, 0x0a, 0xf0, 0x4d, 0xa6, 0x73, 0x0e,
+	0xcf, 0x06, 0x74, 0xb6, 0x39, 0x17, 0x55, 0xfa, 0xaf, 0x50, 0x54, 0x31, 0x01, 0x14, 0x23, 0x33,
+	0x84, 0xfc, 0x85, 0x53, 0x62, 0x1c, 0xa4, 0x10, 0xcd, 0x0e, 0xde, 0xff, 0x09, 0x90, 0xac, 0x54,
+	0x79, 0x1f, 0x72, 0x02, 0x28, 0x88, 0x9b, 0x6d, 0x10, 0x2a, 0x8c, 0x9b, 0x6d, 0x08, 0x65, 0x88,
+	0x1e, 0xf3, 0x98, 0x56, 0x7a, 0x1f, 0x92, 0x25, 0x48, 0x68, 0x7c, 0x46, 0xdc, 0x24, 0x8d, 0x3e,
+	0xf6, 0x95, 0xa4, 0x51, 0xb9, 0x8b, 0x8e, 0xd2, 0x78, 0x44, 0x5c, 0x11, 0x4b, 0xf2, 0x9e, 0x87,
+	0x12, 0x04, 0xaa, 0x29, 0x1f, 0x8f, 0x62, 0x49, 0x3a, 0x95, 0xfb, 0x4a, 0x45, 0xbe, 0x47, 0x9f,
+	0x02, 0xf8, 0x90, 0x46, 0xf8, 0xb4, 0x15, 0x8b, 0x8b, 0x86, 0x4f, 0x5b, 0xf1, 0xa8, 0x48, 0x34,
+	0x7f, 0xf8, 0xba, 0xf9, 0xc5, 0x80, 0x6a, 0xff, 0x1f, 0x0d, 0x50, 0x14, 0x01, 0x41, 0x8f, 0xe2,
+	0x35, 0xc4, 0x22, 0xae, 0xb5, 0xc7, 0x17, 0x63, 0x4e, 0x2a, 0x11, 0xbe, 0x59, 0x2d, 0x36, 0xa2,
+	0xff, 0x8a, 0x1a, 0xf6, 0xb9, 0x06, 0xa5, 0x00, 0x84, 0x82, 0xee, 0x25, 0xac, 0x71, 0x08, 0xb4,
+	0xad, 0xdd, 0x3f, 0x97, 0x2f, 0xe9, 0x24, 0xa6, 0xec, 0x08, 0x79, 0x10, 0xff, 0x2f, 0x0d, 0xca,
+	0x41, 0xd8, 0x05, 0x25, 0xc8, 0x8f, 0x00, 0xbf, 0xb5, 0xc5, 0xf3, 0x19, 0xcf, 0x5f, 0x2a, 0xff,
+	0x6c, 0xde, 0x87, 0x9c, 0x00, 0x6b, 0xe2, 0x02, 0x22, 0x08, 0x1b, 0xc7, 0x05, 0x44, 0x08, 0xe9,
+	0x49, 0x08, 0x08, 0xdb, 0xea, 0x12, 0x25, 0x04, 0x05, 0xa2, 0x93, 0xa4, 0x71, 0x74, 0x08, 0x86,
+	0xe0, 0xa0, 0x51, 0x1a, 0xfd, 0x10, 0x94, 0x70, 0x0e, 0x4a, 0x10, 0x78, 0x4e, 0x08, 0x86, 0xd1,
+	0xa0, 0x84, 0x10, 0x64, 0x4a, 0x95, 0x10, 0xf4, 0xc1, 0x97, 0xb8, 0x10, 0x8c, 0x20, 0xe2, 0x71,
+	0x21, 0x18, 0xc5, 0x6f, 0x12, 0xd6, 0x95, 0xe9, 0x0e, 0x84, 0xe0, 0x4c, 0x0c, 0x56, 0x83, 0x1e,
+	0x27, 0x38, 0x34, 0x16, 0x6c, 0xaf, 0xbd, 0x71, 0x41, 0xee, 0x91, 0x7b, 0x9f, 0x2f, 0x85, 0xdc,
+	0xfb, 0xff, 0xaf, 0xc1, 0x6c, 0x1c, 0xd6, 0x83, 0x12, 0x74, 0x25, 0x00, 0xf5, 0xb5, 0xa5, 0x8b,
+	0xb2, 0x9f, 0xef, 0x35, 0x2f, 0x1a, 0x9e, 0x56, 0x7e, 0xfe, 0xcd, 0xbc, 0xf6, 0xab, 0x6f, 0xe6,
+	0xb5, 0xdf, 0x7e, 0x33, 0xaf, 0xfd, 0xef, 0xef, 0xe6, 0x27, 0x0e, 0xb3, 0xec, 0x7f, 0xa7, 0xbd,
+	0xfd, 0xd7, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x77, 0x6b, 0x63, 0x24, 0x37, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// KVClient is the client API for KV service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type KVClient interface {
+	// Range gets the keys in the range from the key-value store.
+	Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error)
+	// Put puts the given key into the key-value store.
+	// A put request increments the revision of the key-value store
+	// and generates one event in the event history.
+	Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error)
+	// DeleteRange deletes the given range from the key-value store.
+	// A delete request increments the revision of the key-value store
+	// and generates a delete event in the event history for every deleted key.
+	DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error)
+	// Txn processes multiple requests in a single transaction.
+	// A txn request increments the revision of the key-value store
+	// and generates events with the same revision for every completed request.
+	// It is not allowed to modify the same key several times within one txn.
+	Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error)
+	// Compact compacts the event history in the etcd key-value store. The key-value
+	// store should be periodically compacted or the event history will continue to grow
+	// indefinitely.
+	Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error)
+}
+
+type kVClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewKVClient(cc *grpc.ClientConn) KVClient {
+	return &kVClient{cc}
+}
+
+func (c *kVClient) Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) {
+	out := new(RangeResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Range", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *kVClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) {
+	out := new(PutResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Put", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *kVClient) DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) {
+	out := new(DeleteRangeResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.KV/DeleteRange", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *kVClient) Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) {
+	out := new(TxnResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Txn", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *kVClient) Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) {
+	out := new(CompactionResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Compact", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// KVServer is the server API for KV service.
+type KVServer interface {
+	// Range gets the keys in the range from the key-value store.
+	Range(context.Context, *RangeRequest) (*RangeResponse, error)
+	// Put puts the given key into the key-value store.
+	// A put request increments the revision of the key-value store
+	// and generates one event in the event history.
+	Put(context.Context, *PutRequest) (*PutResponse, error)
+	// DeleteRange deletes the given range from the key-value store.
+	// A delete request increments the revision of the key-value store
+	// and generates a delete event in the event history for every deleted key.
+	DeleteRange(context.Context, *DeleteRangeRequest) (*DeleteRangeResponse, error)
+	// Txn processes multiple requests in a single transaction.
+	// A txn request increments the revision of the key-value store
+	// and generates events with the same revision for every completed request.
+	// It is not allowed to modify the same key several times within one txn.
+	Txn(context.Context, *TxnRequest) (*TxnResponse, error)
+	// Compact compacts the event history in the etcd key-value store. The key-value
+	// store should be periodically compacted or the event history will continue to grow
+	// indefinitely.
+	Compact(context.Context, *CompactionRequest) (*CompactionResponse, error)
+}
+
+// UnimplementedKVServer can be embedded to have forward compatible implementations.
+type UnimplementedKVServer struct {
+}
+
+func (*UnimplementedKVServer) Range(ctx context.Context, req *RangeRequest) (*RangeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Range not implemented")
+}
+func (*UnimplementedKVServer) Put(ctx context.Context, req *PutRequest) (*PutResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Put not implemented")
+}
+func (*UnimplementedKVServer) DeleteRange(ctx context.Context, req *DeleteRangeRequest) (*DeleteRangeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DeleteRange not implemented")
+}
+func (*UnimplementedKVServer) Txn(ctx context.Context, req *TxnRequest) (*TxnResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Txn not implemented")
+}
+func (*UnimplementedKVServer) Compact(ctx context.Context, req *CompactionRequest) (*CompactionResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Compact not implemented")
+}
+
+func RegisterKVServer(s *grpc.Server, srv KVServer) {
+	s.RegisterService(&_KV_serviceDesc, srv)
+}
+
+func _KV_Range_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(RangeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(KVServer).Range(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.KV/Range",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(KVServer).Range(ctx, req.(*RangeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _KV_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(PutRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(KVServer).Put(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.KV/Put",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(KVServer).Put(ctx, req.(*PutRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _KV_DeleteRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DeleteRangeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(KVServer).DeleteRange(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.KV/DeleteRange",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(KVServer).DeleteRange(ctx, req.(*DeleteRangeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _KV_Txn_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(TxnRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(KVServer).Txn(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.KV/Txn",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(KVServer).Txn(ctx, req.(*TxnRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _KV_Compact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(CompactionRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(KVServer).Compact(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.KV/Compact",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(KVServer).Compact(ctx, req.(*CompactionRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _KV_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "etcdserverpb.KV",
+	HandlerType: (*KVServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "Range",
+			Handler:    _KV_Range_Handler,
+		},
+		{
+			MethodName: "Put",
+			Handler:    _KV_Put_Handler,
+		},
+		{
+			MethodName: "DeleteRange",
+			Handler:    _KV_DeleteRange_Handler,
+		},
+		{
+			MethodName: "Txn",
+			Handler:    _KV_Txn_Handler,
+		},
+		{
+			MethodName: "Compact",
+			Handler:    _KV_Compact_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "rpc.proto",
+}
+
+// WatchClient is the client API for Watch service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type WatchClient interface {
+	// Watch watches for events happening or that have happened. Both input and output
+	// are streams; the input stream is for creating and canceling watchers and the output
+	// stream sends events. One watch RPC can watch on multiple key ranges, streaming events
+	// for several watches at once. The entire event history can be watched starting from the
+	// last compaction revision.
+	Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error)
+}
+
+type watchClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewWatchClient(cc *grpc.ClientConn) WatchClient {
+	return &watchClient{cc}
+}
+
+func (c *watchClient) Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) {
+	stream, err := c.cc.NewStream(ctx, &_Watch_serviceDesc.Streams[0], "/etcdserverpb.Watch/Watch", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &watchWatchClient{stream}
+	return x, nil
+}
+
+type Watch_WatchClient interface {
+	Send(*WatchRequest) error
+	Recv() (*WatchResponse, error)
+	grpc.ClientStream
+}
+
+type watchWatchClient struct {
+	grpc.ClientStream
+}
+
+func (x *watchWatchClient) Send(m *WatchRequest) error {
+	return x.ClientStream.SendMsg(m)
+}
+
+func (x *watchWatchClient) Recv() (*WatchResponse, error) {
+	m := new(WatchResponse)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+// WatchServer is the server API for Watch service.
+type WatchServer interface {
+	// Watch watches for events happening or that have happened. Both input and output
+	// are streams; the input stream is for creating and canceling watchers and the output
+	// stream sends events. One watch RPC can watch on multiple key ranges, streaming events
+	// for several watches at once. The entire event history can be watched starting from the
+	// last compaction revision.
+	Watch(Watch_WatchServer) error
+}
+
+// UnimplementedWatchServer can be embedded to have forward compatible implementations.
+type UnimplementedWatchServer struct {
+}
+
+func (*UnimplementedWatchServer) Watch(srv Watch_WatchServer) error {
+	return status.Errorf(codes.Unimplemented, "method Watch not implemented")
+}
+
+func RegisterWatchServer(s *grpc.Server, srv WatchServer) {
+	s.RegisterService(&_Watch_serviceDesc, srv)
+}
+
+func _Watch_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
+	return srv.(WatchServer).Watch(&watchWatchServer{stream})
+}
+
+type Watch_WatchServer interface {
+	Send(*WatchResponse) error
+	Recv() (*WatchRequest, error)
+	grpc.ServerStream
+}
+
+type watchWatchServer struct {
+	grpc.ServerStream
+}
+
+func (x *watchWatchServer) Send(m *WatchResponse) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func (x *watchWatchServer) Recv() (*WatchRequest, error) {
+	m := new(WatchRequest)
+	if err := x.ServerStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+var _Watch_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "etcdserverpb.Watch",
+	HandlerType: (*WatchServer)(nil),
+	Methods:     []grpc.MethodDesc{},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "Watch",
+			Handler:       _Watch_Watch_Handler,
+			ServerStreams: true,
+			ClientStreams: true,
+		},
+	},
+	Metadata: "rpc.proto",
+}
+
+// LeaseClient is the client API for Lease service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type LeaseClient interface {
+	// LeaseGrant creates a lease which expires if the server does not receive a keepAlive
+	// within a given time to live period. All keys attached to the lease will be expired and
+	// deleted if the lease expires. Each expired key generates a delete event in the event history.
+	LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error)
+	// LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
+	LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error)
+	// LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
+	// to the server and streaming keep alive responses from the server to the client.
+	LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error)
+	// LeaseTimeToLive retrieves lease information.
+	LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error)
+	// LeaseLeases lists all existing leases.
+	LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error)
+}
+
+type leaseClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewLeaseClient(cc *grpc.ClientConn) LeaseClient {
+	return &leaseClient{cc}
+}
+
+func (c *leaseClient) LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) {
+	out := new(LeaseGrantResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseGrant", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *leaseClient) LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) {
+	out := new(LeaseRevokeResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseRevoke", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *leaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) {
+	stream, err := c.cc.NewStream(ctx, &_Lease_serviceDesc.Streams[0], "/etcdserverpb.Lease/LeaseKeepAlive", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &leaseLeaseKeepAliveClient{stream}
+	return x, nil
+}
+
+type Lease_LeaseKeepAliveClient interface {
+	Send(*LeaseKeepAliveRequest) error
+	Recv() (*LeaseKeepAliveResponse, error)
+	grpc.ClientStream
+}
+
+type leaseLeaseKeepAliveClient struct {
+	grpc.ClientStream
+}
+
+func (x *leaseLeaseKeepAliveClient) Send(m *LeaseKeepAliveRequest) error {
+	return x.ClientStream.SendMsg(m)
+}
+
+func (x *leaseLeaseKeepAliveClient) Recv() (*LeaseKeepAliveResponse, error) {
+	m := new(LeaseKeepAliveResponse)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func (c *leaseClient) LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) {
+	out := new(LeaseTimeToLiveResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseTimeToLive", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *leaseClient) LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error) {
+	out := new(LeaseLeasesResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseLeases", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// LeaseServer is the server API for Lease service.
+type LeaseServer interface {
+	// LeaseGrant creates a lease which expires if the server does not receive a keepAlive
+	// within a given time to live period. All keys attached to the lease will be expired and
+	// deleted if the lease expires. Each expired key generates a delete event in the event history.
+	LeaseGrant(context.Context, *LeaseGrantRequest) (*LeaseGrantResponse, error)
+	// LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
+	LeaseRevoke(context.Context, *LeaseRevokeRequest) (*LeaseRevokeResponse, error)
+	// LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
+	// to the server and streaming keep alive responses from the server to the client.
+	LeaseKeepAlive(Lease_LeaseKeepAliveServer) error
+	// LeaseTimeToLive retrieves lease information.
+	LeaseTimeToLive(context.Context, *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error)
+	// LeaseLeases lists all existing leases.
+	LeaseLeases(context.Context, *LeaseLeasesRequest) (*LeaseLeasesResponse, error)
+}
+
+// UnimplementedLeaseServer can be embedded to have forward compatible implementations.
+type UnimplementedLeaseServer struct {
+}
+
+func (*UnimplementedLeaseServer) LeaseGrant(ctx context.Context, req *LeaseGrantRequest) (*LeaseGrantResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method LeaseGrant not implemented")
+}
+func (*UnimplementedLeaseServer) LeaseRevoke(ctx context.Context, req *LeaseRevokeRequest) (*LeaseRevokeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method LeaseRevoke not implemented")
+}
+func (*UnimplementedLeaseServer) LeaseKeepAlive(srv Lease_LeaseKeepAliveServer) error {
+	return status.Errorf(codes.Unimplemented, "method LeaseKeepAlive not implemented")
+}
+func (*UnimplementedLeaseServer) LeaseTimeToLive(ctx context.Context, req *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method LeaseTimeToLive not implemented")
+}
+func (*UnimplementedLeaseServer) LeaseLeases(ctx context.Context, req *LeaseLeasesRequest) (*LeaseLeasesResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method LeaseLeases not implemented")
+}
+
+func RegisterLeaseServer(s *grpc.Server, srv LeaseServer) {
+	s.RegisterService(&_Lease_serviceDesc, srv)
+}
+
+func _Lease_LeaseGrant_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(LeaseGrantRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(LeaseServer).LeaseGrant(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Lease/LeaseGrant",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(LeaseServer).LeaseGrant(ctx, req.(*LeaseGrantRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Lease_LeaseRevoke_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(LeaseRevokeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(LeaseServer).LeaseRevoke(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Lease/LeaseRevoke",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(LeaseServer).LeaseRevoke(ctx, req.(*LeaseRevokeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Lease_LeaseKeepAlive_Handler(srv interface{}, stream grpc.ServerStream) error {
+	return srv.(LeaseServer).LeaseKeepAlive(&leaseLeaseKeepAliveServer{stream})
+}
+
+type Lease_LeaseKeepAliveServer interface {
+	Send(*LeaseKeepAliveResponse) error
+	Recv() (*LeaseKeepAliveRequest, error)
+	grpc.ServerStream
+}
+
+type leaseLeaseKeepAliveServer struct {
+	grpc.ServerStream
+}
+
+func (x *leaseLeaseKeepAliveServer) Send(m *LeaseKeepAliveResponse) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func (x *leaseLeaseKeepAliveServer) Recv() (*LeaseKeepAliveRequest, error) {
+	m := new(LeaseKeepAliveRequest)
+	if err := x.ServerStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func _Lease_LeaseTimeToLive_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(LeaseTimeToLiveRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(LeaseServer).LeaseTimeToLive(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Lease/LeaseTimeToLive",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(LeaseServer).LeaseTimeToLive(ctx, req.(*LeaseTimeToLiveRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Lease_LeaseLeases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(LeaseLeasesRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(LeaseServer).LeaseLeases(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Lease/LeaseLeases",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(LeaseServer).LeaseLeases(ctx, req.(*LeaseLeasesRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Lease_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "etcdserverpb.Lease",
+	HandlerType: (*LeaseServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "LeaseGrant",
+			Handler:    _Lease_LeaseGrant_Handler,
+		},
+		{
+			MethodName: "LeaseRevoke",
+			Handler:    _Lease_LeaseRevoke_Handler,
+		},
+		{
+			MethodName: "LeaseTimeToLive",
+			Handler:    _Lease_LeaseTimeToLive_Handler,
+		},
+		{
+			MethodName: "LeaseLeases",
+			Handler:    _Lease_LeaseLeases_Handler,
+		},
+	},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "LeaseKeepAlive",
+			Handler:       _Lease_LeaseKeepAlive_Handler,
+			ServerStreams: true,
+			ClientStreams: true,
+		},
+	},
+	Metadata: "rpc.proto",
+}
+
+// ClusterClient is the client API for Cluster service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type ClusterClient interface {
+	// MemberAdd adds a member into the cluster.
+	MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error)
+	// MemberRemove removes an existing member from the cluster.
+	MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error)
+	// MemberUpdate updates the member configuration.
+	MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error)
+	// MemberList lists all the members in the cluster.
+	MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error)
+}
+
+type clusterClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewClusterClient(cc *grpc.ClientConn) ClusterClient {
+	return &clusterClient{cc}
+}
+
+func (c *clusterClient) MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) {
+	out := new(MemberAddResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberAdd", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *clusterClient) MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) {
+	out := new(MemberRemoveResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberRemove", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *clusterClient) MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) {
+	out := new(MemberUpdateResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberUpdate", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *clusterClient) MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) {
+	out := new(MemberListResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberList", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// ClusterServer is the server API for Cluster service.
+type ClusterServer interface {
+	// MemberAdd adds a member into the cluster.
+	MemberAdd(context.Context, *MemberAddRequest) (*MemberAddResponse, error)
+	// MemberRemove removes an existing member from the cluster.
+	MemberRemove(context.Context, *MemberRemoveRequest) (*MemberRemoveResponse, error)
+	// MemberUpdate updates the member configuration.
+	MemberUpdate(context.Context, *MemberUpdateRequest) (*MemberUpdateResponse, error)
+	// MemberList lists all the members in the cluster.
+	MemberList(context.Context, *MemberListRequest) (*MemberListResponse, error)
+}
+
+// UnimplementedClusterServer can be embedded to have forward compatible implementations.
+type UnimplementedClusterServer struct {
+}
+
+func (*UnimplementedClusterServer) MemberAdd(ctx context.Context, req *MemberAddRequest) (*MemberAddResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method MemberAdd not implemented")
+}
+func (*UnimplementedClusterServer) MemberRemove(ctx context.Context, req *MemberRemoveRequest) (*MemberRemoveResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method MemberRemove not implemented")
+}
+func (*UnimplementedClusterServer) MemberUpdate(ctx context.Context, req *MemberUpdateRequest) (*MemberUpdateResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method MemberUpdate not implemented")
+}
+func (*UnimplementedClusterServer) MemberList(ctx context.Context, req *MemberListRequest) (*MemberListResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method MemberList not implemented")
+}
+
+func RegisterClusterServer(s *grpc.Server, srv ClusterServer) {
+	s.RegisterService(&_Cluster_serviceDesc, srv)
+}
+
+func _Cluster_MemberAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(MemberAddRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ClusterServer).MemberAdd(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Cluster/MemberAdd",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ClusterServer).MemberAdd(ctx, req.(*MemberAddRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Cluster_MemberRemove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(MemberRemoveRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ClusterServer).MemberRemove(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Cluster/MemberRemove",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ClusterServer).MemberRemove(ctx, req.(*MemberRemoveRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Cluster_MemberUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(MemberUpdateRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ClusterServer).MemberUpdate(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Cluster/MemberUpdate",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ClusterServer).MemberUpdate(ctx, req.(*MemberUpdateRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Cluster_MemberList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(MemberListRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ClusterServer).MemberList(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Cluster/MemberList",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ClusterServer).MemberList(ctx, req.(*MemberListRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Cluster_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "etcdserverpb.Cluster",
+	HandlerType: (*ClusterServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "MemberAdd",
+			Handler:    _Cluster_MemberAdd_Handler,
+		},
+		{
+			MethodName: "MemberRemove",
+			Handler:    _Cluster_MemberRemove_Handler,
+		},
+		{
+			MethodName: "MemberUpdate",
+			Handler:    _Cluster_MemberUpdate_Handler,
+		},
+		{
+			MethodName: "MemberList",
+			Handler:    _Cluster_MemberList_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "rpc.proto",
+}
+
+// MaintenanceClient is the client API for Maintenance service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type MaintenanceClient interface {
+	// Alarm activates, deactivates, and queries alarms regarding cluster health.
+	Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error)
+	// Status gets the status of the member.
+	Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error)
+	// Defragment defragments a member's backend database to recover storage space.
+	Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error)
+	// Hash computes the hash of the KV's backend.
+	// This is designed for testing; do not use this in production when there
+	// are ongoing transactions.
+	Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error)
+	// HashKV computes the hash of all MVCC keys up to a given revision.
+	HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error)
+	// Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
+	Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error)
+	// MoveLeader requests current leader node to transfer its leadership to transferee.
+	MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error)
+}
+
+type maintenanceClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewMaintenanceClient(cc *grpc.ClientConn) MaintenanceClient {
+	return &maintenanceClient{cc}
+}
+
+func (c *maintenanceClient) Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) {
+	out := new(AlarmResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Alarm", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *maintenanceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) {
+	out := new(StatusResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Status", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *maintenanceClient) Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) {
+	out := new(DefragmentResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Defragment", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *maintenanceClient) Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) {
+	out := new(HashResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Hash", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *maintenanceClient) HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) {
+	out := new(HashKVResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/HashKV", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *maintenanceClient) Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) {
+	stream, err := c.cc.NewStream(ctx, &_Maintenance_serviceDesc.Streams[0], "/etcdserverpb.Maintenance/Snapshot", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &maintenanceSnapshotClient{stream}
+	if err := x.ClientStream.SendMsg(in); err != nil {
+		return nil, err
+	}
+	if err := x.ClientStream.CloseSend(); err != nil {
+		return nil, err
+	}
+	return x, nil
+}
+
+type Maintenance_SnapshotClient interface {
+	Recv() (*SnapshotResponse, error)
+	grpc.ClientStream
+}
+
+type maintenanceSnapshotClient struct {
+	grpc.ClientStream
+}
+
+func (x *maintenanceSnapshotClient) Recv() (*SnapshotResponse, error) {
+	m := new(SnapshotResponse)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func (c *maintenanceClient) MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) {
+	out := new(MoveLeaderResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/MoveLeader", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// MaintenanceServer is the server API for Maintenance service.
+type MaintenanceServer interface {
+	// Alarm activates, deactivates, and queries alarms regarding cluster health.
+	Alarm(context.Context, *AlarmRequest) (*AlarmResponse, error)
+	// Status gets the status of the member.
+	Status(context.Context, *StatusRequest) (*StatusResponse, error)
+	// Defragment defragments a member's backend database to recover storage space.
+	Defragment(context.Context, *DefragmentRequest) (*DefragmentResponse, error)
+	// Hash computes the hash of the KV's backend.
+	// This is designed for testing; do not use this in production when there
+	// are ongoing transactions.
+	Hash(context.Context, *HashRequest) (*HashResponse, error)
+	// HashKV computes the hash of all MVCC keys up to a given revision.
+	HashKV(context.Context, *HashKVRequest) (*HashKVResponse, error)
+	// Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
+	Snapshot(*SnapshotRequest, Maintenance_SnapshotServer) error
+	// MoveLeader requests current leader node to transfer its leadership to transferee.
+	MoveLeader(context.Context, *MoveLeaderRequest) (*MoveLeaderResponse, error)
+}
+
+// UnimplementedMaintenanceServer can be embedded to have forward compatible implementations.
+type UnimplementedMaintenanceServer struct {
+}
+
+func (*UnimplementedMaintenanceServer) Alarm(ctx context.Context, req *AlarmRequest) (*AlarmResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Alarm not implemented")
+}
+func (*UnimplementedMaintenanceServer) Status(ctx context.Context, req *StatusRequest) (*StatusResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Status not implemented")
+}
+func (*UnimplementedMaintenanceServer) Defragment(ctx context.Context, req *DefragmentRequest) (*DefragmentResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Defragment not implemented")
+}
+func (*UnimplementedMaintenanceServer) Hash(ctx context.Context, req *HashRequest) (*HashResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Hash not implemented")
+}
+func (*UnimplementedMaintenanceServer) HashKV(ctx context.Context, req *HashKVRequest) (*HashKVResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method HashKV not implemented")
+}
+func (*UnimplementedMaintenanceServer) Snapshot(req *SnapshotRequest, srv Maintenance_SnapshotServer) error {
+	return status.Errorf(codes.Unimplemented, "method Snapshot not implemented")
+}
+func (*UnimplementedMaintenanceServer) MoveLeader(ctx context.Context, req *MoveLeaderRequest) (*MoveLeaderResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method MoveLeader not implemented")
+}
+
+func RegisterMaintenanceServer(s *grpc.Server, srv MaintenanceServer) {
+	s.RegisterService(&_Maintenance_serviceDesc, srv)
+}
+
+func _Maintenance_Alarm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AlarmRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(MaintenanceServer).Alarm(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Maintenance/Alarm",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(MaintenanceServer).Alarm(ctx, req.(*AlarmRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(StatusRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(MaintenanceServer).Status(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Maintenance/Status",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(MaintenanceServer).Status(ctx, req.(*StatusRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_Defragment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DefragmentRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(MaintenanceServer).Defragment(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Maintenance/Defragment",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(MaintenanceServer).Defragment(ctx, req.(*DefragmentRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_Hash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(HashRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(MaintenanceServer).Hash(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Maintenance/Hash",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(MaintenanceServer).Hash(ctx, req.(*HashRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_HashKV_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(HashKVRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(MaintenanceServer).HashKV(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Maintenance/HashKV",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(MaintenanceServer).HashKV(ctx, req.(*HashKVRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_Snapshot_Handler(srv interface{}, stream grpc.ServerStream) error {
+	m := new(SnapshotRequest)
+	if err := stream.RecvMsg(m); err != nil {
+		return err
+	}
+	return srv.(MaintenanceServer).Snapshot(m, &maintenanceSnapshotServer{stream})
+}
+
+type Maintenance_SnapshotServer interface {
+	Send(*SnapshotResponse) error
+	grpc.ServerStream
+}
+
+type maintenanceSnapshotServer struct {
+	grpc.ServerStream
+}
+
+func (x *maintenanceSnapshotServer) Send(m *SnapshotResponse) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func _Maintenance_MoveLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(MoveLeaderRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(MaintenanceServer).MoveLeader(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Maintenance/MoveLeader",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(MaintenanceServer).MoveLeader(ctx, req.(*MoveLeaderRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Maintenance_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "etcdserverpb.Maintenance",
+	HandlerType: (*MaintenanceServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "Alarm",
+			Handler:    _Maintenance_Alarm_Handler,
+		},
+		{
+			MethodName: "Status",
+			Handler:    _Maintenance_Status_Handler,
+		},
+		{
+			MethodName: "Defragment",
+			Handler:    _Maintenance_Defragment_Handler,
+		},
+		{
+			MethodName: "Hash",
+			Handler:    _Maintenance_Hash_Handler,
+		},
+		{
+			MethodName: "HashKV",
+			Handler:    _Maintenance_HashKV_Handler,
+		},
+		{
+			MethodName: "MoveLeader",
+			Handler:    _Maintenance_MoveLeader_Handler,
+		},
+	},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "Snapshot",
+			Handler:       _Maintenance_Snapshot_Handler,
+			ServerStreams: true,
+		},
+	},
+	Metadata: "rpc.proto",
+}
+
+// AuthClient is the client API for Auth service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type AuthClient interface {
+	// AuthEnable enables authentication.
+	AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error)
+	// AuthDisable disables authentication.
+	AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error)
+	// Authenticate processes an authenticate request.
+	Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error)
+	// UserAdd adds a new user.
+	UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error)
+	// UserGet gets detailed user information.
+	UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error)
+	// UserList gets a list of all users.
+	UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error)
+	// UserDelete deletes a specified user.
+	UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error)
+	// UserChangePassword changes the password of a specified user.
+	UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error)
+	// UserGrant grants a role to a specified user.
+	UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error)
+	// UserRevokeRole revokes a role of specified user.
+	UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error)
+	// RoleAdd adds a new role.
+	RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error)
+	// RoleGet gets detailed role information.
+	RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error)
+	// RoleList gets lists of all roles.
+	RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error)
+	// RoleDelete deletes a specified role.
+	RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error)
+	// RoleGrantPermission grants a permission of a specified key or range to a specified role.
+	RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error)
+	// RoleRevokePermission revokes a key or range permission of a specified role.
+	RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error)
+}
+
+type authClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewAuthClient(cc *grpc.ClientConn) AuthClient {
+	return &authClient{cc}
+}
+
+func (c *authClient) AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) {
+	out := new(AuthEnableResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthEnable", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) {
+	out := new(AuthDisableResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthDisable", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) {
+	out := new(AuthenticateResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/Authenticate", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) {
+	out := new(AuthUserAddResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserAdd", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) {
+	out := new(AuthUserGetResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserGet", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) {
+	out := new(AuthUserListResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserList", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) {
+	out := new(AuthUserDeleteResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserDelete", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) {
+	out := new(AuthUserChangePasswordResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserChangePassword", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) {
+	out := new(AuthUserGrantRoleResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserGrantRole", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) {
+	out := new(AuthUserRevokeRoleResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserRevokeRole", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) {
+	out := new(AuthRoleAddResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleAdd", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) {
+	out := new(AuthRoleGetResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleGet", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) {
+	out := new(AuthRoleListResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleList", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) {
+	out := new(AuthRoleDeleteResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleDelete", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) {
+	out := new(AuthRoleGrantPermissionResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleGrantPermission", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) {
+	out := new(AuthRoleRevokePermissionResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleRevokePermission", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// AuthServer is the server API for Auth service.
+type AuthServer interface {
+	// AuthEnable enables authentication.
+	AuthEnable(context.Context, *AuthEnableRequest) (*AuthEnableResponse, error)
+	// AuthDisable disables authentication.
+	AuthDisable(context.Context, *AuthDisableRequest) (*AuthDisableResponse, error)
+	// Authenticate processes an authenticate request.
+	Authenticate(context.Context, *AuthenticateRequest) (*AuthenticateResponse, error)
+	// UserAdd adds a new user.
+	UserAdd(context.Context, *AuthUserAddRequest) (*AuthUserAddResponse, error)
+	// UserGet gets detailed user information.
+	UserGet(context.Context, *AuthUserGetRequest) (*AuthUserGetResponse, error)
+	// UserList gets a list of all users.
+	UserList(context.Context, *AuthUserListRequest) (*AuthUserListResponse, error)
+	// UserDelete deletes a specified user.
+	UserDelete(context.Context, *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error)
+	// UserChangePassword changes the password of a specified user.
+	UserChangePassword(context.Context, *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error)
+	// UserGrant grants a role to a specified user.
+	UserGrantRole(context.Context, *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error)
+	// UserRevokeRole revokes a role of specified user.
+	UserRevokeRole(context.Context, *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error)
+	// RoleAdd adds a new role.
+	RoleAdd(context.Context, *AuthRoleAddRequest) (*AuthRoleAddResponse, error)
+	// RoleGet gets detailed role information.
+	RoleGet(context.Context, *AuthRoleGetRequest) (*AuthRoleGetResponse, error)
+	// RoleList gets lists of all roles.
+	RoleList(context.Context, *AuthRoleListRequest) (*AuthRoleListResponse, error)
+	// RoleDelete deletes a specified role.
+	RoleDelete(context.Context, *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error)
+	// RoleGrantPermission grants a permission of a specified key or range to a specified role.
+	RoleGrantPermission(context.Context, *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error)
+	// RoleRevokePermission revokes a key or range permission of a specified role.
+	RoleRevokePermission(context.Context, *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error)
+}
+
+// UnimplementedAuthServer can be embedded to have forward compatible implementations.
+type UnimplementedAuthServer struct {
+}
+
+func (*UnimplementedAuthServer) AuthEnable(ctx context.Context, req *AuthEnableRequest) (*AuthEnableResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method AuthEnable not implemented")
+}
+func (*UnimplementedAuthServer) AuthDisable(ctx context.Context, req *AuthDisableRequest) (*AuthDisableResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method AuthDisable not implemented")
+}
+func (*UnimplementedAuthServer) Authenticate(ctx context.Context, req *AuthenticateRequest) (*AuthenticateResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Authenticate not implemented")
+}
+func (*UnimplementedAuthServer) UserAdd(ctx context.Context, req *AuthUserAddRequest) (*AuthUserAddResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UserAdd not implemented")
+}
+func (*UnimplementedAuthServer) UserGet(ctx context.Context, req *AuthUserGetRequest) (*AuthUserGetResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UserGet not implemented")
+}
+func (*UnimplementedAuthServer) UserList(ctx context.Context, req *AuthUserListRequest) (*AuthUserListResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UserList not implemented")
+}
+func (*UnimplementedAuthServer) UserDelete(ctx context.Context, req *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UserDelete not implemented")
+}
+func (*UnimplementedAuthServer) UserChangePassword(ctx context.Context, req *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UserChangePassword not implemented")
+}
+func (*UnimplementedAuthServer) UserGrantRole(ctx context.Context, req *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UserGrantRole not implemented")
+}
+func (*UnimplementedAuthServer) UserRevokeRole(ctx context.Context, req *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UserRevokeRole not implemented")
+}
+func (*UnimplementedAuthServer) RoleAdd(ctx context.Context, req *AuthRoleAddRequest) (*AuthRoleAddResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method RoleAdd not implemented")
+}
+func (*UnimplementedAuthServer) RoleGet(ctx context.Context, req *AuthRoleGetRequest) (*AuthRoleGetResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method RoleGet not implemented")
+}
+func (*UnimplementedAuthServer) RoleList(ctx context.Context, req *AuthRoleListRequest) (*AuthRoleListResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method RoleList not implemented")
+}
+func (*UnimplementedAuthServer) RoleDelete(ctx context.Context, req *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method RoleDelete not implemented")
+}
+func (*UnimplementedAuthServer) RoleGrantPermission(ctx context.Context, req *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method RoleGrantPermission not implemented")
+}
+func (*UnimplementedAuthServer) RoleRevokePermission(ctx context.Context, req *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method RoleRevokePermission not implemented")
+}
+
+func RegisterAuthServer(s *grpc.Server, srv AuthServer) {
+	s.RegisterService(&_Auth_serviceDesc, srv)
+}
+
+func _Auth_AuthEnable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthEnableRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).AuthEnable(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/AuthEnable",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).AuthEnable(ctx, req.(*AuthEnableRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_AuthDisable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthDisableRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).AuthDisable(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/AuthDisable",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).AuthDisable(ctx, req.(*AuthDisableRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_Authenticate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthenticateRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).Authenticate(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/Authenticate",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).Authenticate(ctx, req.(*AuthenticateRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthUserAddRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).UserAdd(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/UserAdd",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).UserAdd(ctx, req.(*AuthUserAddRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthUserGetRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).UserGet(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/UserGet",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).UserGet(ctx, req.(*AuthUserGetRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthUserListRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).UserList(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/UserList",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).UserList(ctx, req.(*AuthUserListRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthUserDeleteRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).UserDelete(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/UserDelete",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).UserDelete(ctx, req.(*AuthUserDeleteRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserChangePassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthUserChangePasswordRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).UserChangePassword(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/UserChangePassword",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).UserChangePassword(ctx, req.(*AuthUserChangePasswordRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserGrantRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthUserGrantRoleRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).UserGrantRole(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/UserGrantRole",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).UserGrantRole(ctx, req.(*AuthUserGrantRoleRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserRevokeRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthUserRevokeRoleRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).UserRevokeRole(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/UserRevokeRole",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).UserRevokeRole(ctx, req.(*AuthUserRevokeRoleRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthRoleAddRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).RoleAdd(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/RoleAdd",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).RoleAdd(ctx, req.(*AuthRoleAddRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthRoleGetRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).RoleGet(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/RoleGet",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).RoleGet(ctx, req.(*AuthRoleGetRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthRoleListRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).RoleList(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/RoleList",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).RoleList(ctx, req.(*AuthRoleListRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthRoleDeleteRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).RoleDelete(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/RoleDelete",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).RoleDelete(ctx, req.(*AuthRoleDeleteRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleGrantPermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthRoleGrantPermissionRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).RoleGrantPermission(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/RoleGrantPermission",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).RoleGrantPermission(ctx, req.(*AuthRoleGrantPermissionRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleRevokePermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthRoleRevokePermissionRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).RoleRevokePermission(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/RoleRevokePermission",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).RoleRevokePermission(ctx, req.(*AuthRoleRevokePermissionRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Auth_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "etcdserverpb.Auth",
+	HandlerType: (*AuthServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "AuthEnable",
+			Handler:    _Auth_AuthEnable_Handler,
+		},
+		{
+			MethodName: "AuthDisable",
+			Handler:    _Auth_AuthDisable_Handler,
+		},
+		{
+			MethodName: "Authenticate",
+			Handler:    _Auth_Authenticate_Handler,
+		},
+		{
+			MethodName: "UserAdd",
+			Handler:    _Auth_UserAdd_Handler,
+		},
+		{
+			MethodName: "UserGet",
+			Handler:    _Auth_UserGet_Handler,
+		},
+		{
+			MethodName: "UserList",
+			Handler:    _Auth_UserList_Handler,
+		},
+		{
+			MethodName: "UserDelete",
+			Handler:    _Auth_UserDelete_Handler,
+		},
+		{
+			MethodName: "UserChangePassword",
+			Handler:    _Auth_UserChangePassword_Handler,
+		},
+		{
+			MethodName: "UserGrantRole",
+			Handler:    _Auth_UserGrantRole_Handler,
+		},
+		{
+			MethodName: "UserRevokeRole",
+			Handler:    _Auth_UserRevokeRole_Handler,
+		},
+		{
+			MethodName: "RoleAdd",
+			Handler:    _Auth_RoleAdd_Handler,
+		},
+		{
+			MethodName: "RoleGet",
+			Handler:    _Auth_RoleGet_Handler,
+		},
+		{
+			MethodName: "RoleList",
+			Handler:    _Auth_RoleList_Handler,
+		},
+		{
+			MethodName: "RoleDelete",
+			Handler:    _Auth_RoleDelete_Handler,
+		},
+		{
+			MethodName: "RoleGrantPermission",
+			Handler:    _Auth_RoleGrantPermission_Handler,
+		},
+		{
+			MethodName: "RoleRevokePermission",
+			Handler:    _Auth_RoleRevokePermission_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "rpc.proto",
+}
+
+func (m *ResponseHeader) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResponseHeader) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResponseHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.RaftTerm != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm))
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.Revision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.MemberId != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.MemberId))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.ClusterId != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ClusterId))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *RangeRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *RangeRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RangeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.MaxCreateRevision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.MaxCreateRevision))
+		i--
+		dAtA[i] = 0x68
+	}
+	if m.MinCreateRevision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.MinCreateRevision))
+		i--
+		dAtA[i] = 0x60
+	}
+	if m.MaxModRevision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.MaxModRevision))
+		i--
+		dAtA[i] = 0x58
+	}
+	if m.MinModRevision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.MinModRevision))
+		i--
+		dAtA[i] = 0x50
+	}
+	if m.CountOnly {
+		i--
+		if m.CountOnly {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x48
+	}
+	if m.KeysOnly {
+		i--
+		if m.KeysOnly {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x40
+	}
+	if m.Serializable {
+		i--
+		if m.Serializable {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x38
+	}
+	if m.SortTarget != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.SortTarget))
+		i--
+		dAtA[i] = 0x30
+	}
+	if m.SortOrder != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.SortOrder))
+		i--
+		dAtA[i] = 0x28
+	}
+	if m.Revision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.Limit != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Limit))
+		i--
+		dAtA[i] = 0x18
+	}
+	if len(m.RangeEnd) > 0 {
+		i -= len(m.RangeEnd)
+		copy(dAtA[i:], m.RangeEnd)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Key) > 0 {
+		i -= len(m.Key)
+		copy(dAtA[i:], m.Key)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *RangeResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *RangeResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RangeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Count != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Count))
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.More {
+		i--
+		if m.More {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x18
+	}
+	if len(m.Kvs) > 0 {
+		for iNdEx := len(m.Kvs) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Kvs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *PutRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PutRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PutRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.IgnoreLease {
+		i--
+		if m.IgnoreLease {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x30
+	}
+	if m.IgnoreValue {
+		i--
+		if m.IgnoreValue {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x28
+	}
+	if m.PrevKv {
+		i--
+		if m.PrevKv {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.Lease != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Lease))
+		i--
+		dAtA[i] = 0x18
+	}
+	if len(m.Value) > 0 {
+		i -= len(m.Value)
+		copy(dAtA[i:], m.Value)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Value)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Key) > 0 {
+		i -= len(m.Key)
+		copy(dAtA[i:], m.Key)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *PutResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PutResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PutResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.PrevKv != nil {
+		{
+			size, err := m.PrevKv.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *DeleteRangeRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeleteRangeRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeleteRangeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.PrevKv {
+		i--
+		if m.PrevKv {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x18
+	}
+	if len(m.RangeEnd) > 0 {
+		i -= len(m.RangeEnd)
+		copy(dAtA[i:], m.RangeEnd)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Key) > 0 {
+		i -= len(m.Key)
+		copy(dAtA[i:], m.Key)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *DeleteRangeResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeleteRangeResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeleteRangeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.PrevKvs) > 0 {
+		for iNdEx := len(m.PrevKvs) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.PrevKvs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if m.Deleted != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Deleted))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *RequestOp) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *RequestOp) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RequestOp) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Request != nil {
+		{
+			size := m.Request.Size()
+			i -= size
+			if _, err := m.Request.MarshalTo(dAtA[i:]); err != nil {
+				return 0, err
+			}
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *RequestOp_RequestRange) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *RequestOp_RequestRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.RequestRange != nil {
+		{
+			size, err := m.RequestRange.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+func (m *RequestOp_RequestPut) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *RequestOp_RequestPut) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.RequestPut != nil {
+		{
+			size, err := m.RequestPut.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	return len(dAtA) - i, nil
+}
+func (m *RequestOp_RequestDeleteRange) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *RequestOp_RequestDeleteRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.RequestDeleteRange != nil {
+		{
+			size, err := m.RequestDeleteRange.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	return len(dAtA) - i, nil
+}
+func (m *RequestOp_RequestTxn) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *RequestOp_RequestTxn) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.RequestTxn != nil {
+		{
+			size, err := m.RequestTxn.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	return len(dAtA) - i, nil
+}
+func (m *ResponseOp) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResponseOp) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResponseOp) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Response != nil {
+		{
+			size := m.Response.Size()
+			i -= size
+			if _, err := m.Response.MarshalTo(dAtA[i:]); err != nil {
+				return 0, err
+			}
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ResponseOp_ResponseRange) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *ResponseOp_ResponseRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.ResponseRange != nil {
+		{
+			size, err := m.ResponseRange.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+func (m *ResponseOp_ResponsePut) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *ResponseOp_ResponsePut) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.ResponsePut != nil {
+		{
+			size, err := m.ResponsePut.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	return len(dAtA) - i, nil
+}
+func (m *ResponseOp_ResponseDeleteRange) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *ResponseOp_ResponseDeleteRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.ResponseDeleteRange != nil {
+		{
+			size, err := m.ResponseDeleteRange.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	return len(dAtA) - i, nil
+}
+func (m *ResponseOp_ResponseTxn) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *ResponseOp_ResponseTxn) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.ResponseTxn != nil {
+		{
+			size, err := m.ResponseTxn.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	return len(dAtA) - i, nil
+}
+func (m *Compare) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Compare) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Compare) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.RangeEnd) > 0 {
+		i -= len(m.RangeEnd)
+		copy(dAtA[i:], m.RangeEnd)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+		i--
+		dAtA[i] = 0x4
+		i--
+		dAtA[i] = 0x82
+	}
+	if m.TargetUnion != nil {
+		{
+			size := m.TargetUnion.Size()
+			i -= size
+			if _, err := m.TargetUnion.MarshalTo(dAtA[i:]); err != nil {
+				return 0, err
+			}
+		}
+	}
+	if len(m.Key) > 0 {
+		i -= len(m.Key)
+		copy(dAtA[i:], m.Key)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.Target != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Target))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Result != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Result))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Compare_Version) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *Compare_Version) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	i = encodeVarintRpc(dAtA, i, uint64(m.Version))
+	i--
+	dAtA[i] = 0x20
+	return len(dAtA) - i, nil
+}
+func (m *Compare_CreateRevision) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *Compare_CreateRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	i = encodeVarintRpc(dAtA, i, uint64(m.CreateRevision))
+	i--
+	dAtA[i] = 0x28
+	return len(dAtA) - i, nil
+}
+func (m *Compare_ModRevision) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *Compare_ModRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	i = encodeVarintRpc(dAtA, i, uint64(m.ModRevision))
+	i--
+	dAtA[i] = 0x30
+	return len(dAtA) - i, nil
+}
+func (m *Compare_Value) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *Compare_Value) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.Value != nil {
+		i -= len(m.Value)
+		copy(dAtA[i:], m.Value)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Value)))
+		i--
+		dAtA[i] = 0x3a
+	}
+	return len(dAtA) - i, nil
+}
+func (m *Compare_Lease) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *Compare_Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	i = encodeVarintRpc(dAtA, i, uint64(m.Lease))
+	i--
+	dAtA[i] = 0x40
+	return len(dAtA) - i, nil
+}
+func (m *TxnRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TxnRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TxnRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Failure) > 0 {
+		for iNdEx := len(m.Failure) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Failure[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if len(m.Success) > 0 {
+		for iNdEx := len(m.Success) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Success[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if len(m.Compare) > 0 {
+		for iNdEx := len(m.Compare) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Compare[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *TxnResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TxnResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TxnResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Responses) > 0 {
+		for iNdEx := len(m.Responses) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Responses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if m.Succeeded {
+		i--
+		if m.Succeeded {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *CompactionRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CompactionRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CompactionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Physical {
+		i--
+		if m.Physical {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Revision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *CompactionResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CompactionResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CompactionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *HashRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *HashRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HashRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *HashKVRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *HashKVRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HashKVRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Revision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *HashKVResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *HashKVResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HashKVResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.CompactRevision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision))
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.Hash != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Hash))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *HashResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *HashResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HashResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Hash != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Hash))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *SnapshotRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SnapshotRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *SnapshotResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SnapshotResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SnapshotResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Blob) > 0 {
+		i -= len(m.Blob)
+		copy(dAtA[i:], m.Blob)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Blob)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.RemainingBytes != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.RemainingBytes))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *WatchRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WatchRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.RequestUnion != nil {
+		{
+			size := m.RequestUnion.Size()
+			i -= size
+			if _, err := m.RequestUnion.MarshalTo(dAtA[i:]); err != nil {
+				return 0, err
+			}
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *WatchRequest_CreateRequest) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *WatchRequest_CreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.CreateRequest != nil {
+		{
+			size, err := m.CreateRequest.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+func (m *WatchRequest_CancelRequest) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *WatchRequest_CancelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.CancelRequest != nil {
+		{
+			size, err := m.CancelRequest.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	return len(dAtA) - i, nil
+}
+func (m *WatchRequest_ProgressRequest) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *WatchRequest_ProgressRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.ProgressRequest != nil {
+		{
+			size, err := m.ProgressRequest.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	return len(dAtA) - i, nil
+}
+func (m *WatchCreateRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WatchCreateRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchCreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Fragment {
+		i--
+		if m.Fragment {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x40
+	}
+	if m.WatchId != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
+		i--
+		dAtA[i] = 0x38
+	}
+	if m.PrevKv {
+		i--
+		if m.PrevKv {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x30
+	}
+	if len(m.Filters) > 0 {
+		dAtA22 := make([]byte, len(m.Filters)*10)
+		var j21 int
+		for _, num := range m.Filters {
+			for num >= 1<<7 {
+				dAtA22[j21] = uint8(uint64(num)&0x7f | 0x80)
+				num >>= 7
+				j21++
+			}
+			dAtA22[j21] = uint8(num)
+			j21++
+		}
+		i -= j21
+		copy(dAtA[i:], dAtA22[:j21])
+		i = encodeVarintRpc(dAtA, i, uint64(j21))
+		i--
+		dAtA[i] = 0x2a
+	}
+	if m.ProgressNotify {
+		i--
+		if m.ProgressNotify {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.StartRevision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.StartRevision))
+		i--
+		dAtA[i] = 0x18
+	}
+	if len(m.RangeEnd) > 0 {
+		i -= len(m.RangeEnd)
+		copy(dAtA[i:], m.RangeEnd)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Key) > 0 {
+		i -= len(m.Key)
+		copy(dAtA[i:], m.Key)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *WatchCancelRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WatchCancelRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchCancelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.WatchId != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *WatchProgressRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WatchProgressRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchProgressRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *WatchResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WatchResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Events) > 0 {
+		for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x5a
+		}
+	}
+	if m.Fragment {
+		i--
+		if m.Fragment {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x38
+	}
+	if len(m.CancelReason) > 0 {
+		i -= len(m.CancelReason)
+		copy(dAtA[i:], m.CancelReason)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.CancelReason)))
+		i--
+		dAtA[i] = 0x32
+	}
+	if m.CompactRevision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision))
+		i--
+		dAtA[i] = 0x28
+	}
+	if m.Canceled {
+		i--
+		if m.Canceled {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.Created {
+		i--
+		if m.Created {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.WatchId != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseGrantRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseGrantRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseGrantRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.TTL != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseGrantResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseGrantResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseGrantResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Error) > 0 {
+		i -= len(m.Error)
+		copy(dAtA[i:], m.Error)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Error)))
+		i--
+		dAtA[i] = 0x22
+	}
+	if m.TTL != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseRevokeRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseRevokeRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseRevokeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseRevokeResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseRevokeResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseRevokeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseKeepAliveRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseKeepAliveRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseKeepAliveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseKeepAliveResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseKeepAliveResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseKeepAliveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.TTL != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseTimeToLiveRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseTimeToLiveRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseTimeToLiveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Keys {
+		i--
+		if m.Keys {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseTimeToLiveResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseTimeToLiveResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseTimeToLiveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Keys) > 0 {
+		for iNdEx := len(m.Keys) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Keys[iNdEx])
+			copy(dAtA[i:], m.Keys[iNdEx])
+			i = encodeVarintRpc(dAtA, i, uint64(len(m.Keys[iNdEx])))
+			i--
+			dAtA[i] = 0x2a
+		}
+	}
+	if m.GrantedTTL != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.GrantedTTL))
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.TTL != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseLeasesRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseLeasesRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseLeasesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseLeasesResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseLeasesResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseLeasesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Leases) > 0 {
+		for iNdEx := len(m.Leases) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Leases[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Member) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Member) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Member) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.ClientURLs) > 0 {
+		for iNdEx := len(m.ClientURLs) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.ClientURLs[iNdEx])
+			copy(dAtA[i:], m.ClientURLs[iNdEx])
+			i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientURLs[iNdEx])))
+			i--
+			dAtA[i] = 0x22
+		}
+	}
+	if len(m.PeerURLs) > 0 {
+		for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.PeerURLs[iNdEx])
+			copy(dAtA[i:], m.PeerURLs[iNdEx])
+			i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx])))
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MemberAddRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MemberAddRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.PeerURLs) > 0 {
+		for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.PeerURLs[iNdEx])
+			copy(dAtA[i:], m.PeerURLs[iNdEx])
+			i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MemberAddResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MemberAddResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Members) > 0 {
+		for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if m.Member != nil {
+		{
+			size, err := m.Member.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MemberRemoveRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MemberRemoveRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberRemoveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MemberRemoveResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MemberRemoveResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberRemoveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Members) > 0 {
+		for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MemberUpdateRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MemberUpdateRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberUpdateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.PeerURLs) > 0 {
+		for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.PeerURLs[iNdEx])
+			copy(dAtA[i:], m.PeerURLs[iNdEx])
+			i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx])))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MemberUpdateResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MemberUpdateResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberUpdateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Members) > 0 {
+		for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MemberListRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MemberListRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MemberListResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MemberListResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Members) > 0 {
+		for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *DefragmentRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DefragmentRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DefragmentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *DefragmentResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DefragmentResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DefragmentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MoveLeaderRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MoveLeaderRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MoveLeaderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.TargetID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.TargetID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MoveLeaderResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MoveLeaderResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MoveLeaderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AlarmRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AlarmRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AlarmRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Alarm != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Alarm))
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.MemberID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.MemberID))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Action != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Action))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AlarmMember) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AlarmMember) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AlarmMember) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Alarm != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Alarm))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.MemberID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.MemberID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AlarmResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AlarmResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AlarmResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Alarms) > 0 {
+		for iNdEx := len(m.Alarms) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Alarms[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *StatusRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *StatusResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.RaftTerm != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm))
+		i--
+		dAtA[i] = 0x30
+	}
+	if m.RaftIndex != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.RaftIndex))
+		i--
+		dAtA[i] = 0x28
+	}
+	if m.Leader != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Leader))
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.DbSize != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.DbSize))
+		i--
+		dAtA[i] = 0x18
+	}
+	if len(m.Version) > 0 {
+		i -= len(m.Version)
+		copy(dAtA[i:], m.Version)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Version)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthEnableRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthEnableRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthEnableRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthDisableRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthDisableRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthDisableRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthenticateRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthenticateRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthenticateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Password) > 0 {
+		i -= len(m.Password)
+		copy(dAtA[i:], m.Password)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserAddRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserAddRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Password) > 0 {
+		i -= len(m.Password)
+		copy(dAtA[i:], m.Password)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserGetRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserGetRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserGetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserDeleteRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserDeleteRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserChangePasswordRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserChangePasswordRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserChangePasswordRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Password) > 0 {
+		i -= len(m.Password)
+		copy(dAtA[i:], m.Password)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserGrantRoleRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserGrantRoleRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserGrantRoleRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Role) > 0 {
+		i -= len(m.Role)
+		copy(dAtA[i:], m.Role)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.User) > 0 {
+		i -= len(m.User)
+		copy(dAtA[i:], m.User)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.User)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserRevokeRoleRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserRevokeRoleRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserRevokeRoleRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Role) > 0 {
+		i -= len(m.Role)
+		copy(dAtA[i:], m.Role)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleAddRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleAddRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleGetRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleGetRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleGetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Role) > 0 {
+		i -= len(m.Role)
+		copy(dAtA[i:], m.Role)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserListRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserListRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleListRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleListRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleDeleteRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleDeleteRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Role) > 0 {
+		i -= len(m.Role)
+		copy(dAtA[i:], m.Role)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleGrantPermissionRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleGrantPermissionRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleGrantPermissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Perm != nil {
+		{
+			size, err := m.Perm.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleRevokePermissionRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleRevokePermissionRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleRevokePermissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.RangeEnd) > 0 {
+		i -= len(m.RangeEnd)
+		copy(dAtA[i:], m.RangeEnd)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	if len(m.Key) > 0 {
+		i -= len(m.Key)
+		copy(dAtA[i:], m.Key)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Role) > 0 {
+		i -= len(m.Role)
+		copy(dAtA[i:], m.Role)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthEnableResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthEnableResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthEnableResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthDisableResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthDisableResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthDisableResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthenticateResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthenticateResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthenticateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Token) > 0 {
+		i -= len(m.Token)
+		copy(dAtA[i:], m.Token)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Token)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserAddResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserAddResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserGetResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserGetResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserGetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Roles) > 0 {
+		for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Roles[iNdEx])
+			copy(dAtA[i:], m.Roles[iNdEx])
+			i = encodeVarintRpc(dAtA, i, uint64(len(m.Roles[iNdEx])))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserDeleteResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserDeleteResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserDeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserChangePasswordResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserChangePasswordResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserChangePasswordResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserGrantRoleResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserGrantRoleResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserGrantRoleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserRevokeRoleResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserRevokeRoleResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserRevokeRoleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleAddResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleAddResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleGetResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleGetResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleGetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Perm) > 0 {
+		for iNdEx := len(m.Perm) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Perm[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleListResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleListResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Roles) > 0 {
+		for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Roles[iNdEx])
+			copy(dAtA[i:], m.Roles[iNdEx])
+			i = encodeVarintRpc(dAtA, i, uint64(len(m.Roles[iNdEx])))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserListResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserListResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Users) > 0 {
+		for iNdEx := len(m.Users) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Users[iNdEx])
+			copy(dAtA[i:], m.Users[iNdEx])
+			i = encodeVarintRpc(dAtA, i, uint64(len(m.Users[iNdEx])))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleDeleteResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleDeleteResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleDeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleGrantPermissionResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleGrantPermissionResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleGrantPermissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleRevokePermissionResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleRevokePermissionResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleRevokePermissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintRpc(dAtA []byte, offset int, v uint64) int {
+	offset -= sovRpc(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *ResponseHeader) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ClusterId != 0 {
+		n += 1 + sovRpc(uint64(m.ClusterId))
+	}
+	if m.MemberId != 0 {
+		n += 1 + sovRpc(uint64(m.MemberId))
+	}
+	if m.Revision != 0 {
+		n += 1 + sovRpc(uint64(m.Revision))
+	}
+	if m.RaftTerm != 0 {
+		n += 1 + sovRpc(uint64(m.RaftTerm))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *RangeRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.RangeEnd)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Limit != 0 {
+		n += 1 + sovRpc(uint64(m.Limit))
+	}
+	if m.Revision != 0 {
+		n += 1 + sovRpc(uint64(m.Revision))
+	}
+	if m.SortOrder != 0 {
+		n += 1 + sovRpc(uint64(m.SortOrder))
+	}
+	if m.SortTarget != 0 {
+		n += 1 + sovRpc(uint64(m.SortTarget))
+	}
+	if m.Serializable {
+		n += 2
+	}
+	if m.KeysOnly {
+		n += 2
+	}
+	if m.CountOnly {
+		n += 2
+	}
+	if m.MinModRevision != 0 {
+		n += 1 + sovRpc(uint64(m.MinModRevision))
+	}
+	if m.MaxModRevision != 0 {
+		n += 1 + sovRpc(uint64(m.MaxModRevision))
+	}
+	if m.MinCreateRevision != 0 {
+		n += 1 + sovRpc(uint64(m.MinCreateRevision))
+	}
+	if m.MaxCreateRevision != 0 {
+		n += 1 + sovRpc(uint64(m.MaxCreateRevision))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *RangeResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Kvs) > 0 {
+		for _, e := range m.Kvs {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.More {
+		n += 2
+	}
+	if m.Count != 0 {
+		n += 1 + sovRpc(uint64(m.Count))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *PutRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Value)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Lease != 0 {
+		n += 1 + sovRpc(uint64(m.Lease))
+	}
+	if m.PrevKv {
+		n += 2
+	}
+	if m.IgnoreValue {
+		n += 2
+	}
+	if m.IgnoreLease {
+		n += 2
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *PutResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.PrevKv != nil {
+		l = m.PrevKv.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *DeleteRangeRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.RangeEnd)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.PrevKv {
+		n += 2
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *DeleteRangeResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Deleted != 0 {
+		n += 1 + sovRpc(uint64(m.Deleted))
+	}
+	if len(m.PrevKvs) > 0 {
+		for _, e := range m.PrevKvs {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *RequestOp) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Request != nil {
+		n += m.Request.Size()
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *RequestOp_RequestRange) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.RequestRange != nil {
+		l = m.RequestRange.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *RequestOp_RequestPut) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.RequestPut != nil {
+		l = m.RequestPut.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *RequestOp_RequestDeleteRange) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.RequestDeleteRange != nil {
+		l = m.RequestDeleteRange.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *RequestOp_RequestTxn) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.RequestTxn != nil {
+		l = m.RequestTxn.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *ResponseOp) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Response != nil {
+		n += m.Response.Size()
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *ResponseOp_ResponseRange) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ResponseRange != nil {
+		l = m.ResponseRange.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *ResponseOp_ResponsePut) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ResponsePut != nil {
+		l = m.ResponsePut.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *ResponseOp_ResponseDeleteRange) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ResponseDeleteRange != nil {
+		l = m.ResponseDeleteRange.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *ResponseOp_ResponseTxn) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ResponseTxn != nil {
+		l = m.ResponseTxn.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *Compare) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Result != 0 {
+		n += 1 + sovRpc(uint64(m.Result))
+	}
+	if m.Target != 0 {
+		n += 1 + sovRpc(uint64(m.Target))
+	}
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.TargetUnion != nil {
+		n += m.TargetUnion.Size()
+	}
+	l = len(m.RangeEnd)
+	if l > 0 {
+		n += 2 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *Compare_Version) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovRpc(uint64(m.Version))
+	return n
+}
+func (m *Compare_CreateRevision) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovRpc(uint64(m.CreateRevision))
+	return n
+}
+func (m *Compare_ModRevision) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovRpc(uint64(m.ModRevision))
+	return n
+}
+func (m *Compare_Value) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Value != nil {
+		l = len(m.Value)
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *Compare_Lease) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovRpc(uint64(m.Lease))
+	return n
+}
+func (m *TxnRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Compare) > 0 {
+		for _, e := range m.Compare {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if len(m.Success) > 0 {
+		for _, e := range m.Success {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if len(m.Failure) > 0 {
+		for _, e := range m.Failure {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *TxnResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Succeeded {
+		n += 2
+	}
+	if len(m.Responses) > 0 {
+		for _, e := range m.Responses {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *CompactionRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Revision != 0 {
+		n += 1 + sovRpc(uint64(m.Revision))
+	}
+	if m.Physical {
+		n += 2
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *CompactionResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *HashRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *HashKVRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Revision != 0 {
+		n += 1 + sovRpc(uint64(m.Revision))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *HashKVResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Hash != 0 {
+		n += 1 + sovRpc(uint64(m.Hash))
+	}
+	if m.CompactRevision != 0 {
+		n += 1 + sovRpc(uint64(m.CompactRevision))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *HashResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Hash != 0 {
+		n += 1 + sovRpc(uint64(m.Hash))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *SnapshotRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *SnapshotResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.RemainingBytes != 0 {
+		n += 1 + sovRpc(uint64(m.RemainingBytes))
+	}
+	l = len(m.Blob)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *WatchRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.RequestUnion != nil {
+		n += m.RequestUnion.Size()
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *WatchRequest_CreateRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.CreateRequest != nil {
+		l = m.CreateRequest.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *WatchRequest_CancelRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.CancelRequest != nil {
+		l = m.CancelRequest.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *WatchRequest_ProgressRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ProgressRequest != nil {
+		l = m.ProgressRequest.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *WatchCreateRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.RangeEnd)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.StartRevision != 0 {
+		n += 1 + sovRpc(uint64(m.StartRevision))
+	}
+	if m.ProgressNotify {
+		n += 2
+	}
+	if len(m.Filters) > 0 {
+		l = 0
+		for _, e := range m.Filters {
+			l += sovRpc(uint64(e))
+		}
+		n += 1 + sovRpc(uint64(l)) + l
+	}
+	if m.PrevKv {
+		n += 2
+	}
+	if m.WatchId != 0 {
+		n += 1 + sovRpc(uint64(m.WatchId))
+	}
+	if m.Fragment {
+		n += 2
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *WatchCancelRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.WatchId != 0 {
+		n += 1 + sovRpc(uint64(m.WatchId))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *WatchProgressRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *WatchResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.WatchId != 0 {
+		n += 1 + sovRpc(uint64(m.WatchId))
+	}
+	if m.Created {
+		n += 2
+	}
+	if m.Canceled {
+		n += 2
+	}
+	if m.CompactRevision != 0 {
+		n += 1 + sovRpc(uint64(m.CompactRevision))
+	}
+	l = len(m.CancelReason)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Fragment {
+		n += 2
+	}
+	if len(m.Events) > 0 {
+		for _, e := range m.Events {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseGrantRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.TTL != 0 {
+		n += 1 + sovRpc(uint64(m.TTL))
+	}
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseGrantResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.TTL != 0 {
+		n += 1 + sovRpc(uint64(m.TTL))
+	}
+	l = len(m.Error)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseRevokeRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseRevokeResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseKeepAliveRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseKeepAliveResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.TTL != 0 {
+		n += 1 + sovRpc(uint64(m.TTL))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseTimeToLiveRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.Keys {
+		n += 2
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseTimeToLiveResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.TTL != 0 {
+		n += 1 + sovRpc(uint64(m.TTL))
+	}
+	if m.GrantedTTL != 0 {
+		n += 1 + sovRpc(uint64(m.GrantedTTL))
+	}
+	if len(m.Keys) > 0 {
+		for _, b := range m.Keys {
+			l = len(b)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseLeasesRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseStatus) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseLeasesResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Leases) > 0 {
+		for _, e := range m.Leases {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *Member) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.PeerURLs) > 0 {
+		for _, s := range m.PeerURLs {
+			l = len(s)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if len(m.ClientURLs) > 0 {
+		for _, s := range m.ClientURLs {
+			l = len(s)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MemberAddRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.PeerURLs) > 0 {
+		for _, s := range m.PeerURLs {
+			l = len(s)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MemberAddResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Member != nil {
+		l = m.Member.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Members) > 0 {
+		for _, e := range m.Members {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MemberRemoveRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MemberRemoveResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Members) > 0 {
+		for _, e := range m.Members {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MemberUpdateRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if len(m.PeerURLs) > 0 {
+		for _, s := range m.PeerURLs {
+			l = len(s)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MemberUpdateResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Members) > 0 {
+		for _, e := range m.Members {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MemberListRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MemberListResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Members) > 0 {
+		for _, e := range m.Members {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *DefragmentRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *DefragmentResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MoveLeaderRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.TargetID != 0 {
+		n += 1 + sovRpc(uint64(m.TargetID))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MoveLeaderResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AlarmRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Action != 0 {
+		n += 1 + sovRpc(uint64(m.Action))
+	}
+	if m.MemberID != 0 {
+		n += 1 + sovRpc(uint64(m.MemberID))
+	}
+	if m.Alarm != 0 {
+		n += 1 + sovRpc(uint64(m.Alarm))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AlarmMember) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.MemberID != 0 {
+		n += 1 + sovRpc(uint64(m.MemberID))
+	}
+	if m.Alarm != 0 {
+		n += 1 + sovRpc(uint64(m.Alarm))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AlarmResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Alarms) > 0 {
+		for _, e := range m.Alarms {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *StatusRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *StatusResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Version)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.DbSize != 0 {
+		n += 1 + sovRpc(uint64(m.DbSize))
+	}
+	if m.Leader != 0 {
+		n += 1 + sovRpc(uint64(m.Leader))
+	}
+	if m.RaftIndex != 0 {
+		n += 1 + sovRpc(uint64(m.RaftIndex))
+	}
+	if m.RaftTerm != 0 {
+		n += 1 + sovRpc(uint64(m.RaftTerm))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthEnableRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthDisableRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthenticateRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Password)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserAddRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Password)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserGetRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserDeleteRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserChangePasswordRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Password)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserGrantRoleRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.User)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Role)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserRevokeRoleRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Role)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleAddRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleGetRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Role)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserListRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleListRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleDeleteRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Role)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleGrantPermissionRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Perm != nil {
+		l = m.Perm.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleRevokePermissionRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Role)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.RangeEnd)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthEnableResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthDisableResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthenticateResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Token)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserAddResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserGetResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Roles) > 0 {
+		for _, s := range m.Roles {
+			l = len(s)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserDeleteResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserChangePasswordResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserGrantRoleResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserRevokeRoleResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleAddResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleGetResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Perm) > 0 {
+		for _, e := range m.Perm {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleListResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Roles) > 0 {
+		for _, s := range m.Roles {
+			l = len(s)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserListResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Users) > 0 {
+		for _, s := range m.Users {
+			l = len(s)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleDeleteResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleGrantPermissionResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleRevokePermissionResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func sovRpc(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozRpc(x uint64) (n int) {
+	return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *ResponseHeader) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResponseHeader: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResponseHeader: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType)
+			}
+			m.ClusterId = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ClusterId |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MemberId", wireType)
+			}
+			m.MemberId = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.MemberId |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+			}
+			m.Revision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Revision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType)
+			}
+			m.RaftTerm = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.RaftTerm |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *RangeRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: RangeRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: RangeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+			if m.Key == nil {
+				m.Key = []byte{}
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+			if m.RangeEnd == nil {
+				m.RangeEnd = []byte{}
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
+			}
+			m.Limit = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Limit |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+			}
+			m.Revision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Revision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SortOrder", wireType)
+			}
+			m.SortOrder = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.SortOrder |= RangeRequest_SortOrder(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SortTarget", wireType)
+			}
+			m.SortTarget = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.SortTarget |= RangeRequest_SortTarget(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 7:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Serializable", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Serializable = bool(v != 0)
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field KeysOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.KeysOnly = bool(v != 0)
+		case 9:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CountOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.CountOnly = bool(v != 0)
+		case 10:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MinModRevision", wireType)
+			}
+			m.MinModRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.MinModRevision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 11:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MaxModRevision", wireType)
+			}
+			m.MaxModRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.MaxModRevision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 12:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MinCreateRevision", wireType)
+			}
+			m.MinCreateRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.MinCreateRevision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 13:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MaxCreateRevision", wireType)
+			}
+			m.MaxCreateRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.MaxCreateRevision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *RangeResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: RangeResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: RangeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Kvs = append(m.Kvs, &mvccpb.KeyValue{})
+			if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field More", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.More = bool(v != 0)
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
+			}
+			m.Count = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Count |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PutRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PutRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PutRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+			if m.Key == nil {
+				m.Key = []byte{}
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
+			if m.Value == nil {
+				m.Value = []byte{}
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
+			}
+			m.Lease = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Lease |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.PrevKv = bool(v != 0)
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IgnoreValue", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.IgnoreValue = bool(v != 0)
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IgnoreLease", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.IgnoreLease = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PutResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PutResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PutResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.PrevKv == nil {
+				m.PrevKv = &mvccpb.KeyValue{}
+			}
+			if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeleteRangeRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeleteRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+			if m.Key == nil {
+				m.Key = []byte{}
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+			if m.RangeEnd == nil {
+				m.RangeEnd = []byte{}
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.PrevKv = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeleteRangeResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeleteRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType)
+			}
+			m.Deleted = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Deleted |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PrevKvs", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PrevKvs = append(m.PrevKvs, &mvccpb.KeyValue{})
+			if err := m.PrevKvs[len(m.PrevKvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *RequestOp) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: RequestOp: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: RequestOp: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RequestRange", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &RangeRequest{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Request = &RequestOp_RequestRange{v}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RequestPut", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &PutRequest{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Request = &RequestOp_RequestPut{v}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RequestDeleteRange", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &DeleteRangeRequest{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Request = &RequestOp_RequestDeleteRange{v}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RequestTxn", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &TxnRequest{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Request = &RequestOp_RequestTxn{v}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResponseOp) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResponseOp: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResponseOp: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResponseRange", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &RangeResponse{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Response = &ResponseOp_ResponseRange{v}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResponsePut", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &PutResponse{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Response = &ResponseOp_ResponsePut{v}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResponseDeleteRange", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &DeleteRangeResponse{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Response = &ResponseOp_ResponseDeleteRange{v}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResponseTxn", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &TxnResponse{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Response = &ResponseOp_ResponseTxn{v}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Compare) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Compare: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Compare: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType)
+			}
+			m.Result = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Result |= Compare_CompareResult(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType)
+			}
+			m.Target = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Target |= Compare_CompareTarget(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+			if m.Key == nil {
+				m.Key = []byte{}
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.TargetUnion = &Compare_Version{v}
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.TargetUnion = &Compare_CreateRevision{v}
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.TargetUnion = &Compare_ModRevision{v}
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := make([]byte, postIndex-iNdEx)
+			copy(v, dAtA[iNdEx:postIndex])
+			m.TargetUnion = &Compare_Value{v}
+			iNdEx = postIndex
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.TargetUnion = &Compare_Lease{v}
+		case 64:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+			if m.RangeEnd == nil {
+				m.RangeEnd = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TxnRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TxnRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TxnRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Compare", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Compare = append(m.Compare, &Compare{})
+			if err := m.Compare[len(m.Compare)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Success = append(m.Success, &RequestOp{})
+			if err := m.Success[len(m.Success)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Failure", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Failure = append(m.Failure, &RequestOp{})
+			if err := m.Failure[len(m.Failure)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TxnResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TxnResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TxnResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Succeeded = bool(v != 0)
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Responses", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Responses = append(m.Responses, &ResponseOp{})
+			if err := m.Responses[len(m.Responses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CompactionRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CompactionRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CompactionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+			}
+			m.Revision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Revision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Physical", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Physical = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CompactionResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CompactionResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CompactionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *HashRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: HashRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: HashRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *HashKVRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: HashKVRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: HashKVRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+			}
+			m.Revision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Revision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *HashKVResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: HashKVResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: HashKVResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType)
+			}
+			m.Hash = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Hash |= uint32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType)
+			}
+			m.CompactRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.CompactRevision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *HashResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: HashResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: HashResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType)
+			}
+			m.Hash = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Hash |= uint32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SnapshotRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SnapshotRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SnapshotResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SnapshotResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RemainingBytes", wireType)
+			}
+			m.RemainingBytes = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.RemainingBytes |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Blob", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Blob = append(m.Blob[:0], dAtA[iNdEx:postIndex]...)
+			if m.Blob == nil {
+				m.Blob = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *WatchRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WatchRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WatchRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CreateRequest", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &WatchCreateRequest{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.RequestUnion = &WatchRequest_CreateRequest{v}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CancelRequest", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &WatchCancelRequest{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.RequestUnion = &WatchRequest_CancelRequest{v}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ProgressRequest", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &WatchProgressRequest{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.RequestUnion = &WatchRequest_ProgressRequest{v}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WatchCreateRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WatchCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+			if m.Key == nil {
+				m.Key = []byte{}
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+			if m.RangeEnd == nil {
+				m.RangeEnd = []byte{}
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StartRevision", wireType)
+			}
+			m.StartRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.StartRevision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ProgressNotify", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ProgressNotify = bool(v != 0)
+		case 5:
+			if wireType == 0 {
+				var v WatchCreateRequest_FilterType
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowRpc
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					v |= WatchCreateRequest_FilterType(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				m.Filters = append(m.Filters, v)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowRpc
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= int(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLengthRpc
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex < 0 {
+					return ErrInvalidLengthRpc
+				}
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				var elementCount int
+				if elementCount != 0 && len(m.Filters) == 0 {
+					m.Filters = make([]WatchCreateRequest_FilterType, 0, elementCount)
+				}
+				for iNdEx < postIndex {
+					var v WatchCreateRequest_FilterType
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowRpc
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						v |= WatchCreateRequest_FilterType(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					m.Filters = append(m.Filters, v)
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
+			}
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.PrevKv = bool(v != 0)
+		case 7:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
+			}
+			m.WatchId = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.WatchId |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Fragment = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *WatchCancelRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WatchCancelRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WatchCancelRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
+			}
+			m.WatchId = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.WatchId |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *WatchProgressRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WatchProgressRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WatchProgressRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *WatchResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WatchResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WatchResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
+			}
+			m.WatchId = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.WatchId |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Created = bool(v != 0)
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Canceled", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Canceled = bool(v != 0)
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType)
+			}
+			m.CompactRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.CompactRevision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CancelReason", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.CancelReason = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Fragment = bool(v != 0)
+		case 11:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Events = append(m.Events, &mvccpb.Event{})
+			if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseGrantRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseGrantRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
+			}
+			m.TTL = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TTL |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseGrantResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseGrantResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
+			}
+			m.TTL = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TTL |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Error = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseRevokeRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseRevokeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseRevokeResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseRevokeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseKeepAliveRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseKeepAliveRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseKeepAliveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseKeepAliveResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseKeepAliveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
+			}
+			m.TTL = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TTL |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseTimeToLiveRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseTimeToLiveRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseTimeToLiveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Keys = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseTimeToLiveResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseTimeToLiveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
+			}
+			m.TTL = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TTL |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field GrantedTTL", wireType)
+			}
+			m.GrantedTTL = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.GrantedTTL |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx))
+			copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseLeasesRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseLeasesRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseLeasesRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseStatus) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseStatus: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseLeasesResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseLeasesResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Leases = append(m.Leases, &LeaseStatus{})
+			if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Member) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Member: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ClientURLs", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ClientURLs = append(m.ClientURLs, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MemberAddRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MemberAddRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MemberAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MemberAddResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MemberAddResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MemberAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Member", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Member == nil {
+				m.Member = &Member{}
+			}
+			if err := m.Member.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Members = append(m.Members, &Member{})
+			if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MemberRemoveRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MemberRemoveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MemberRemoveResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MemberRemoveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Members = append(m.Members, &Member{})
+			if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MemberUpdateRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MemberUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MemberUpdateResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MemberUpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Members = append(m.Members, &Member{})
+			if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MemberListRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MemberListRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MemberListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MemberListResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MemberListResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MemberListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Members = append(m.Members, &Member{})
+			if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DefragmentRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DefragmentRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DefragmentRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DefragmentResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DefragmentResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DefragmentResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MoveLeaderRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MoveLeaderRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MoveLeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TargetID", wireType)
+			}
+			m.TargetID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TargetID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MoveLeaderResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MoveLeaderResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MoveLeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AlarmRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AlarmRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AlarmRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
+			}
+			m.Action = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Action |= AlarmRequest_AlarmAction(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType)
+			}
+			m.MemberID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.MemberID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
+			}
+			m.Alarm = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Alarm |= AlarmType(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AlarmMember) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AlarmMember: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AlarmMember: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType)
+			}
+			m.MemberID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.MemberID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
+			}
+			m.Alarm = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Alarm |= AlarmType(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AlarmResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AlarmResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AlarmResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Alarms", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Alarms = append(m.Alarms, &AlarmMember{})
+			if err := m.Alarms[len(m.Alarms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *StatusRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *StatusResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Version = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DbSize", wireType)
+			}
+			m.DbSize = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.DbSize |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType)
+			}
+			m.Leader = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Leader |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RaftIndex", wireType)
+			}
+			m.RaftIndex = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.RaftIndex |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType)
+			}
+			m.RaftTerm = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.RaftTerm |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthEnableRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthEnableRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthEnableRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthDisableRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthDisableRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthDisableRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthenticateRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Password = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserAddRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Password = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserGetRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserGetRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserGetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserDeleteRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserDeleteRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserChangePasswordRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserChangePasswordRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Password = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserGrantRoleRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserGrantRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.User = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Role = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserRevokeRoleRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserRevokeRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Role = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleAddRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleAddRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleGetRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleGetRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleGetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Role = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserListRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserListRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleListRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleListRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleDeleteRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleDeleteRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Role = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Perm == nil {
+				m.Perm = &authpb.Permission{}
+			}
+			if err := m.Perm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Role = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RangeEnd = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthEnableResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthEnableResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthEnableResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthDisableResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthDisableResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthDisableResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthenticateResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthenticateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Token = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserAddResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserAddResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserGetResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserGetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserDeleteResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserDeleteResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserChangePasswordResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserChangePasswordResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserChangePasswordResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserGrantRoleResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserGrantRoleResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserGrantRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserRevokeRoleResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserRevokeRoleResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserRevokeRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleAddResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleAddResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleGetResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleGetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Perm = append(m.Perm, &authpb.Permission{})
+			if err := m.Perm[len(m.Perm)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleListResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserListResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Users = append(m.Users, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleDeleteResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleDeleteResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleGrantPermissionResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleRevokePermissionResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipRpc(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthRpc
+			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthRpc
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowRpc
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipRpc(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthRpc
+				}
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowRpc   = fmt.Errorf("proto: integer overflow")
+)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto
new file mode 100644
index 0000000..d9da43c
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto
@@ -0,0 +1,1075 @@
+syntax = "proto3";
+package etcdserverpb;
+
+import "gogoproto/gogo.proto";
+import "etcd/mvcc/mvccpb/kv.proto";
+import "etcd/auth/authpb/auth.proto";
+
+// for grpc-gateway
+import "google/api/annotations.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+
+service KV {
+  // Range gets the keys in the range from the key-value store.
+  rpc Range(RangeRequest) returns (RangeResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/kv/range"
+        body: "*"
+    };
+  }
+
+  // Put puts the given key into the key-value store.
+  // A put request increments the revision of the key-value store
+  // and generates one event in the event history.
+  rpc Put(PutRequest) returns (PutResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/kv/put"
+        body: "*"
+    };
+  }
+
+  // DeleteRange deletes the given range from the key-value store.
+  // A delete request increments the revision of the key-value store
+  // and generates a delete event in the event history for every deleted key.
+  rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/kv/deleterange"
+        body: "*"
+    };
+  }
+
+  // Txn processes multiple requests in a single transaction.
+  // A txn request increments the revision of the key-value store
+  // and generates events with the same revision for every completed request.
+  // It is not allowed to modify the same key several times within one txn.
+  rpc Txn(TxnRequest) returns (TxnResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/kv/txn"
+        body: "*"
+    };
+  }
+
+  // Compact compacts the event history in the etcd key-value store. The key-value
+  // store should be periodically compacted or the event history will continue to grow
+  // indefinitely.
+  rpc Compact(CompactionRequest) returns (CompactionResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/kv/compaction"
+        body: "*"
+    };
+  }
+}
+
+service Watch {
+  // Watch watches for events happening or that have happened. Both input and output
+  // are streams; the input stream is for creating and canceling watchers and the output
+  // stream sends events. One watch RPC can watch on multiple key ranges, streaming events
+  // for several watches at once. The entire event history can be watched starting from the
+  // last compaction revision.
+  rpc Watch(stream WatchRequest) returns (stream WatchResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/watch"
+        body: "*"
+    };
+  }
+}
+
+service Lease {
+  // LeaseGrant creates a lease which expires if the server does not receive a keepAlive
+  // within a given time to live period. All keys attached to the lease will be expired and
+  // deleted if the lease expires. Each expired key generates a delete event in the event history.
+  rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/lease/grant"
+        body: "*"
+    };
+  }
+
+  // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
+  rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/kv/lease/revoke"
+        body: "*"
+    };
+  }
+
+  // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
+  // to the server and streaming keep alive responses from the server to the client.
+  rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/lease/keepalive"
+        body: "*"
+    };
+  }
+
+  // LeaseTimeToLive retrieves lease information.
+  rpc LeaseTimeToLive(LeaseTimeToLiveRequest) returns (LeaseTimeToLiveResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/kv/lease/timetolive"
+        body: "*"
+    };
+  }
+
+  // LeaseLeases lists all existing leases.
+  rpc LeaseLeases(LeaseLeasesRequest) returns (LeaseLeasesResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/kv/lease/leases"
+        body: "*"
+    };
+  }
+}
+
+service Cluster {
+  // MemberAdd adds a member into the cluster.
+  rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/cluster/member/add"
+        body: "*"
+    };
+  }
+
+  // MemberRemove removes an existing member from the cluster.
+  rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/cluster/member/remove"
+        body: "*"
+    };
+  }
+
+  // MemberUpdate updates the member configuration.
+  rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/cluster/member/update"
+        body: "*"
+    };
+  }
+
+  // MemberList lists all the members in the cluster.
+  rpc MemberList(MemberListRequest) returns (MemberListResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/cluster/member/list"
+        body: "*"
+    };
+  }
+}
+
+service Maintenance {
+  // Alarm activates, deactivates, and queries alarms regarding cluster health.
+  rpc Alarm(AlarmRequest) returns (AlarmResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/maintenance/alarm"
+        body: "*"
+    };
+  }
+
+  // Status gets the status of the member.
+  rpc Status(StatusRequest) returns (StatusResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/maintenance/status"
+        body: "*"
+    };
+  }
+
+  // Defragment defragments a member's backend database to recover storage space.
+  rpc Defragment(DefragmentRequest) returns (DefragmentResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/maintenance/defragment"
+        body: "*"
+    };
+  }
+
+  // Hash computes the hash of the KV's backend.
+  // This is designed for testing; do not use this in production when there
+  // are ongoing transactions.
+  rpc Hash(HashRequest) returns (HashResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/maintenance/hash"
+        body: "*"
+    };
+  }
+
+  // HashKV computes the hash of all MVCC keys up to a given revision.
+  rpc HashKV(HashKVRequest) returns (HashKVResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/maintenance/hash"
+        body: "*"
+    };
+  }
+
+  // Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
+  rpc Snapshot(SnapshotRequest) returns (stream SnapshotResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/maintenance/snapshot"
+        body: "*"
+    };
+  }
+
+  // MoveLeader requests current leader node to transfer its leadership to transferee.
+  rpc MoveLeader(MoveLeaderRequest) returns (MoveLeaderResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/maintenance/transfer-leadership"
+        body: "*"
+    };
+  }
+}
+
+service Auth {
+  // AuthEnable enables authentication.
+  rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/enable"
+        body: "*"
+    };
+  }
+
+  // AuthDisable disables authentication.
+  rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/disable"
+        body: "*"
+    };
+  }
+
+  // Authenticate processes an authenticate request.
+  rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/authenticate"
+        body: "*"
+    };
+  }
+
+  // UserAdd adds a new user.
+  rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/user/add"
+        body: "*"
+    };
+  }
+
+  // UserGet gets detailed user information.
+  rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/user/get"
+        body: "*"
+    };
+  }
+
+  // UserList gets a list of all users.
+  rpc UserList(AuthUserListRequest) returns (AuthUserListResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/user/list"
+        body: "*"
+    };
+  }
+
+  // UserDelete deletes a specified user.
+  rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/user/delete"
+        body: "*"
+    };
+  }
+
+  // UserChangePassword changes the password of a specified user.
+  rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/user/changepw"
+        body: "*"
+    };
+  }
+
+  // UserGrant grants a role to a specified user.
+  rpc UserGrantRole(AuthUserGrantRoleRequest) returns (AuthUserGrantRoleResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/user/grant"
+        body: "*"
+    };
+  }
+
+  // UserRevokeRole revokes a role of specified user.
+  rpc UserRevokeRole(AuthUserRevokeRoleRequest) returns (AuthUserRevokeRoleResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/user/revoke"
+        body: "*"
+    };
+  }
+
+  // RoleAdd adds a new role.
+  rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/role/add"
+        body: "*"
+    };
+  }
+
+  // RoleGet gets detailed role information.
+  rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/role/get"
+        body: "*"
+    };
+  }
+
+  // RoleList gets lists of all roles.
+  rpc RoleList(AuthRoleListRequest) returns (AuthRoleListResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/role/list"
+        body: "*"
+    };
+  }
+
+  // RoleDelete deletes a specified role.
+  rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/role/delete"
+        body: "*"
+    };
+  }
+
+  // RoleGrantPermission grants a permission of a specified key or range to a specified role.
+  rpc RoleGrantPermission(AuthRoleGrantPermissionRequest) returns (AuthRoleGrantPermissionResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/role/grant"
+        body: "*"
+    };
+  }
+
+  // RoleRevokePermission revokes a key or range permission of a specified role.
+  rpc RoleRevokePermission(AuthRoleRevokePermissionRequest) returns (AuthRoleRevokePermissionResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/role/revoke"
+        body: "*"
+    };
+  }
+}
+
+message ResponseHeader {
+  // cluster_id is the ID of the cluster which sent the response.
+  uint64 cluster_id = 1;
+  // member_id is the ID of the member which sent the response.
+  uint64 member_id = 2;
+  // revision is the key-value store revision when the request was applied.
+  // For watch progress responses, the header.revision indicates progress. All future events
+  // recieved in this stream are guaranteed to have a higher revision number than the
+  // header.revision number.
+  int64 revision = 3;
+  // raft_term is the raft term when the request was applied.
+  uint64 raft_term = 4;
+}
+
+message RangeRequest {
+  enum SortOrder {
+	NONE = 0; // default, no sorting
+	ASCEND = 1; // lowest target value first
+	DESCEND = 2; // highest target value first
+  }
+  enum SortTarget {
+	KEY = 0;
+	VERSION = 1;
+	CREATE = 2;
+	MOD = 3;
+	VALUE = 4;
+  }
+
+  // key is the first key for the range. If range_end is not given, the request only looks up key.
+  bytes key = 1;
+  // range_end is the upper bound on the requested range [key, range_end).
+  // If range_end is '\0', the range is all keys >= key.
+  // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
+  // then the range request gets all keys prefixed with key.
+  // If both key and range_end are '\0', then the range request returns all keys.
+  bytes range_end = 2;
+  // limit is a limit on the number of keys returned for the request. When limit is set to 0,
+  // it is treated as no limit.
+  int64 limit = 3;
+  // revision is the point-in-time of the key-value store to use for the range.
+  // If revision is less or equal to zero, the range is over the newest key-value store.
+  // If the revision has been compacted, ErrCompacted is returned as a response.
+  int64 revision = 4;
+
+  // sort_order is the order for returned sorted results.
+  SortOrder sort_order = 5;
+
+  // sort_target is the key-value field to use for sorting.
+  SortTarget sort_target = 6;
+
+  // serializable sets the range request to use serializable member-local reads.
+  // Range requests are linearizable by default; linearizable requests have higher
+  // latency and lower throughput than serializable requests but reflect the current
+  // consensus of the cluster. For better performance, in exchange for possible stale reads,
+  // a serializable range request is served locally without needing to reach consensus
+  // with other nodes in the cluster.
+  bool serializable = 7;
+
+  // keys_only when set returns only the keys and not the values.
+  bool keys_only = 8;
+
+  // count_only when set returns only the count of the keys in the range.
+  bool count_only = 9;
+
+  // min_mod_revision is the lower bound for returned key mod revisions; all keys with
+  // lesser mod revisions will be filtered away.
+  int64 min_mod_revision = 10;
+
+  // max_mod_revision is the upper bound for returned key mod revisions; all keys with
+  // greater mod revisions will be filtered away.
+  int64 max_mod_revision = 11;
+
+  // min_create_revision is the lower bound for returned key create revisions; all keys with
+  // lesser create trevisions will be filtered away.
+  int64 min_create_revision = 12;
+
+  // max_create_revision is the upper bound for returned key create revisions; all keys with
+  // greater create revisions will be filtered away.
+  int64 max_create_revision = 13;
+}
+
+message RangeResponse {
+  ResponseHeader header = 1;
+  // kvs is the list of key-value pairs matched by the range request.
+  // kvs is empty when count is requested.
+  repeated mvccpb.KeyValue kvs = 2;
+  // more indicates if there are more keys to return in the requested range.
+  bool more = 3;
+  // count is set to the number of keys within the range when requested.
+  int64 count = 4;
+}
+
+message PutRequest {
+  // key is the key, in bytes, to put into the key-value store.
+  bytes key = 1;
+  // value is the value, in bytes, to associate with the key in the key-value store.
+  bytes value = 2;
+  // lease is the lease ID to associate with the key in the key-value store. A lease
+  // value of 0 indicates no lease.
+  int64 lease = 3;
+
+  // If prev_kv is set, etcd gets the previous key-value pair before changing it.
+  // The previous key-value pair will be returned in the put response.
+  bool prev_kv = 4;
+
+  // If ignore_value is set, etcd updates the key using its current value.
+  // Returns an error if the key does not exist.
+  bool ignore_value = 5;
+
+  // If ignore_lease is set, etcd updates the key using its current lease.
+  // Returns an error if the key does not exist.
+  bool ignore_lease = 6;
+}
+
+message PutResponse {
+  ResponseHeader header = 1;
+  // if prev_kv is set in the request, the previous key-value pair will be returned.
+  mvccpb.KeyValue prev_kv = 2;
+}
+
+message DeleteRangeRequest {
+  // key is the first key to delete in the range.
+  bytes key = 1;
+  // range_end is the key following the last key to delete for the range [key, range_end).
+  // If range_end is not given, the range is defined to contain only the key argument.
+  // If range_end is one bit larger than the given key, then the range is all the keys
+  // with the prefix (the given key).
+  // If range_end is '\0', the range is all keys greater than or equal to the key argument.
+  bytes range_end = 2;
+
+  // If prev_kv is set, etcd gets the previous key-value pairs before deleting it.
+  // The previous key-value pairs will be returned in the delete response.
+  bool prev_kv = 3;
+}
+
+message DeleteRangeResponse {
+  ResponseHeader header = 1;
+  // deleted is the number of keys deleted by the delete range request.
+  int64 deleted = 2;
+  // if prev_kv is set in the request, the previous key-value pairs will be returned.
+  repeated mvccpb.KeyValue prev_kvs = 3;
+}
+
+message RequestOp {
+  // request is a union of request types accepted by a transaction.
+  oneof request {
+    RangeRequest request_range = 1;
+    PutRequest request_put = 2;
+    DeleteRangeRequest request_delete_range = 3;
+    TxnRequest request_txn = 4;
+  }
+}
+
+message ResponseOp {
+  // response is a union of response types returned by a transaction.
+  oneof response {
+    RangeResponse response_range = 1;
+    PutResponse response_put = 2;
+    DeleteRangeResponse response_delete_range = 3;
+    TxnResponse response_txn = 4;
+  }
+}
+
+message Compare {
+  enum CompareResult {
+    EQUAL = 0;
+    GREATER = 1;
+    LESS = 2;
+    NOT_EQUAL = 3;
+  }
+  enum CompareTarget {
+    VERSION = 0;
+    CREATE = 1;
+    MOD = 2;
+    VALUE= 3;
+    LEASE = 4;
+  }
+  // result is logical comparison operation for this comparison.
+  CompareResult result = 1;
+  // target is the key-value field to inspect for the comparison.
+  CompareTarget target = 2;
+  // key is the subject key for the comparison operation.
+  bytes key = 3;
+  oneof target_union {
+    // version is the version of the given key
+    int64 version = 4;
+    // create_revision is the creation revision of the given key
+    int64 create_revision = 5;
+    // mod_revision is the last modified revision of the given key.
+    int64 mod_revision = 6;
+    // value is the value of the given key, in bytes.
+    bytes value = 7;
+    // lease is the lease id of the given key.
+    int64 lease = 8;
+    // leave room for more target_union field tags, jump to 64
+  }
+
+  // range_end compares the given target to all keys in the range [key, range_end).
+  // See RangeRequest for more details on key ranges.
+  bytes range_end = 64;
+  // TODO: fill out with most of the rest of RangeRequest fields when needed.
+}
+
+// From google paxosdb paper:
+// Our implementation hinges around a powerful primitive which we call MultiOp. All other database
+// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically
+// and consists of three components:
+// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check
+// for the absence or presence of a value, or compare with a given value. Two different tests in the guard
+// may apply to the same or different entries in the database. All tests in the guard are applied and
+// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise
+// it executes f op (see item 3 below).
+// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or
+// lookup operation, and applies to a single database entry. Two different operations in the list may apply
+// to the same or different entries in the database. These operations are executed
+// if guard evaluates to
+// true.
+// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.
+message TxnRequest {
+  // compare is a list of predicates representing a conjunction of terms.
+  // If the comparisons succeed, then the success requests will be processed in order,
+  // and the response will contain their respective responses in order.
+  // If the comparisons fail, then the failure requests will be processed in order,
+  // and the response will contain their respective responses in order.
+  repeated Compare compare = 1;
+  // success is a list of requests which will be applied when compare evaluates to true.
+  repeated RequestOp success = 2;
+  // failure is a list of requests which will be applied when compare evaluates to false.
+  repeated RequestOp failure = 3;
+}
+
+message TxnResponse {
+  ResponseHeader header = 1;
+  // succeeded is set to true if the compare evaluated to true or false otherwise.
+  bool succeeded = 2;
+  // responses is a list of responses corresponding to the results from applying
+  // success if succeeded is true or failure if succeeded is false.
+  repeated ResponseOp responses = 3;
+}
+
+// CompactionRequest compacts the key-value store up to a given revision. All superseded keys
+// with a revision less than the compaction revision will be removed.
+message CompactionRequest {
+  // revision is the key-value store revision for the compaction operation.
+  int64 revision = 1;
+  // physical is set so the RPC will wait until the compaction is physically
+  // applied to the local database such that compacted entries are totally
+  // removed from the backend database.
+  bool physical = 2;
+}
+
+message CompactionResponse {
+  ResponseHeader header = 1;
+}
+
+message HashRequest {
+}
+
+message HashKVRequest {
+  // revision is the key-value store revision for the hash operation.
+  int64 revision = 1;
+}
+
+message HashKVResponse {
+  ResponseHeader header = 1;
+  // hash is the hash value computed from the responding member's MVCC keys up to a given revision.
+  uint32 hash = 2;
+  // compact_revision is the compacted revision of key-value store when hash begins.
+  int64 compact_revision = 3;
+}
+
+message HashResponse {
+  ResponseHeader header = 1;
+  // hash is the hash value computed from the responding member's KV's backend.
+  uint32 hash = 2;
+}
+
+message SnapshotRequest {
+}
+
+message SnapshotResponse {
+  // header has the current key-value store information. The first header in the snapshot
+  // stream indicates the point in time of the snapshot.
+  ResponseHeader header = 1;
+
+  // remaining_bytes is the number of blob bytes to be sent after this message
+  uint64 remaining_bytes = 2;
+
+  // blob contains the next chunk of the snapshot in the snapshot stream.
+  bytes blob = 3;
+}
+
+message WatchRequest {
+  // request_union is a request to either create a new watcher or cancel an existing watcher.
+  oneof request_union {
+    WatchCreateRequest create_request = 1;
+    WatchCancelRequest cancel_request = 2;
+    WatchProgressRequest progress_request = 3;
+  }
+}
+
+message WatchCreateRequest {
+  // key is the key to register for watching.
+  bytes key = 1;
+  // range_end is the end of the range [key, range_end) to watch. If range_end is not given,
+  // only the key argument is watched. If range_end is equal to '\0', all keys greater than
+  // or equal to the key argument are watched.
+  // If the range_end is one bit larger than the given key,
+  // then all keys with the prefix (the given key) will be watched.
+  bytes range_end = 2;
+  // start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
+  int64 start_revision = 3;
+  // progress_notify is set so that the etcd server will periodically send a WatchResponse with
+  // no events to the new watcher if there are no recent events. It is useful when clients
+  // wish to recover a disconnected watcher starting from a recent known revision.
+  // The etcd server may decide how often it will send notifications based on current load.
+  bool progress_notify = 4;
+
+  enum FilterType {
+  // filter out put event.
+  NOPUT = 0;
+  // filter out delete event.
+  NODELETE = 1;
+  }
+  // filters filter the events at server side before it sends back to the watcher.
+  repeated FilterType filters = 5;
+
+  // If prev_kv is set, created watcher gets the previous KV before the event happens.
+  // If the previous KV is already compacted, nothing will be returned.
+  bool prev_kv = 6;
+
+  // If watch_id is provided and non-zero, it will be assigned to this watcher.
+  // Since creating a watcher in etcd is not a synchronous operation,
+  // this can be used ensure that ordering is correct when creating multiple
+  // watchers on the same stream. Creating a watcher with an ID already in
+  // use on the stream will cause an error to be returned.
+  int64 watch_id = 7;
+
+  // fragment enables splitting large revisions into multiple watch responses.
+  bool fragment = 8;
+}
+
+message WatchCancelRequest {
+  // watch_id is the watcher id to cancel so that no more events are transmitted.
+  int64 watch_id = 1;
+}
+
+// Requests the a watch stream progress status be sent in the watch response stream as soon as
+// possible.
+message WatchProgressRequest {
+}
+
+message WatchResponse {
+  ResponseHeader header = 1;
+  // watch_id is the ID of the watcher that corresponds to the response.
+  int64 watch_id = 2;
+  // created is set to true if the response is for a create watch request.
+  // The client should record the watch_id and expect to receive events for
+  // the created watcher from the same stream.
+  // All events sent to the created watcher will attach with the same watch_id.
+  bool created = 3;
+  // canceled is set to true if the response is for a cancel watch request.
+  // No further events will be sent to the canceled watcher.
+  bool canceled = 4;
+  // compact_revision is set to the minimum index if a watcher tries to watch
+  // at a compacted index.
+  //
+  // This happens when creating a watcher at a compacted revision or the watcher cannot
+  // catch up with the progress of the key-value store.
+  //
+  // The client should treat the watcher as canceled and should not try to create any
+  // watcher with the same start_revision again.
+  int64 compact_revision  = 5;
+
+  // cancel_reason indicates the reason for canceling the watcher.
+  string cancel_reason = 6;
+
+  // framgment is true if large watch response was split over multiple responses.
+  bool fragment = 7;
+
+  repeated mvccpb.Event events = 11;
+}
+
+message LeaseGrantRequest {
+  // TTL is the advisory time-to-live in seconds. Expired lease will return -1.
+  int64 TTL = 1;
+  // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.
+  int64 ID = 2;
+}
+
+message LeaseGrantResponse {
+  ResponseHeader header = 1;
+  // ID is the lease ID for the granted lease.
+  int64 ID = 2;
+  // TTL is the server chosen lease time-to-live in seconds.
+  int64 TTL = 3;
+  string error = 4;
+}
+
+message LeaseRevokeRequest {
+  // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.
+  int64 ID = 1;
+}
+
+message LeaseRevokeResponse {
+  ResponseHeader header = 1;
+}
+
+message LeaseKeepAliveRequest {
+  // ID is the lease ID for the lease to keep alive.
+  int64 ID = 1;
+}
+
+message LeaseKeepAliveResponse {
+  ResponseHeader header = 1;
+  // ID is the lease ID from the keep alive request.
+  int64 ID = 2;
+  // TTL is the new time-to-live for the lease.
+  int64 TTL = 3;
+}
+
+message LeaseTimeToLiveRequest {
+  // ID is the lease ID for the lease.
+  int64 ID = 1;
+  // keys is true to query all the keys attached to this lease.
+  bool keys = 2;
+}
+
+message LeaseTimeToLiveResponse {
+  ResponseHeader header = 1;
+  // ID is the lease ID from the keep alive request.
+  int64 ID = 2;
+  // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
+  int64 TTL = 3;
+  // GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
+  int64 grantedTTL = 4;
+  // Keys is the list of keys attached to this lease.
+  repeated bytes keys = 5;
+}
+
+message LeaseLeasesRequest {
+}
+
+message LeaseStatus {
+  int64 ID = 1;
+  // TODO: int64 TTL = 2;
+}
+
+message LeaseLeasesResponse {
+  ResponseHeader header = 1;
+  repeated LeaseStatus leases = 2;
+}
+
+message Member {
+  // ID is the member ID for this member.
+  uint64 ID = 1;
+  // name is the human-readable name of the member. If the member is not started, the name will be an empty string.
+  string name = 2;
+  // peerURLs is the list of URLs the member exposes to the cluster for communication.
+  repeated string peerURLs = 3;
+  // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.
+  repeated string clientURLs = 4;
+}
+
+message MemberAddRequest {
+  // peerURLs is the list of URLs the added member will use to communicate with the cluster.
+  repeated string peerURLs = 1;
+}
+
+message MemberAddResponse {
+  ResponseHeader header = 1;
+  // member is the member information for the added member.
+  Member member = 2;
+  // members is a list of all members after adding the new member.
+  repeated Member members = 3;
+}
+
+message MemberRemoveRequest {
+  // ID is the member ID of the member to remove.
+  uint64 ID = 1;
+}
+
+message MemberRemoveResponse {
+  ResponseHeader header = 1;
+  // members is a list of all members after removing the member.
+  repeated Member members = 2;
+}
+
+message MemberUpdateRequest {
+  // ID is the member ID of the member to update.
+  uint64 ID = 1;
+  // peerURLs is the new list of URLs the member will use to communicate with the cluster.
+  repeated string peerURLs = 2;
+}
+
+message MemberUpdateResponse{
+  ResponseHeader header = 1;
+  // members is a list of all members after updating the member.
+  repeated Member members = 2;
+}
+
+message MemberListRequest {
+}
+
+message MemberListResponse {
+  ResponseHeader header = 1;
+  // members is a list of all members associated with the cluster.
+  repeated Member members = 2;
+}
+
+message DefragmentRequest {
+}
+
+message DefragmentResponse {
+  ResponseHeader header = 1;
+}
+
+message MoveLeaderRequest {
+  // targetID is the node ID for the new leader.
+  uint64 targetID = 1;
+}
+
+message MoveLeaderResponse {
+  ResponseHeader header = 1;
+}
+
+enum AlarmType {
+	NONE = 0; // default, used to query if any alarm is active
+	NOSPACE = 1; // space quota is exhausted
+	CORRUPT = 2; // kv store corruption detected
+}
+
+message AlarmRequest {
+  enum AlarmAction {
+	GET = 0;
+	ACTIVATE = 1;
+	DEACTIVATE = 2;
+  }
+  // action is the kind of alarm request to issue. The action
+  // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a
+  // raised alarm.
+  AlarmAction action = 1;
+  // memberID is the ID of the member associated with the alarm. If memberID is 0, the
+  // alarm request covers all members.
+  uint64 memberID = 2;
+  // alarm is the type of alarm to consider for this request.
+  AlarmType alarm = 3;
+}
+
+message AlarmMember {
+  // memberID is the ID of the member associated with the raised alarm.
+  uint64 memberID = 1;
+  // alarm is the type of alarm which has been raised.
+  AlarmType alarm = 2;
+}
+
+message AlarmResponse {
+  ResponseHeader header = 1;
+  // alarms is a list of alarms associated with the alarm request.
+  repeated AlarmMember alarms = 2;
+}
+
+message StatusRequest {
+}
+
+message StatusResponse {
+  ResponseHeader header = 1;
+  // version is the cluster protocol version used by the responding member.
+  string version = 2;
+  // dbSize is the size of the backend database, in bytes, of the responding member.
+  int64 dbSize = 3;
+  // leader is the member ID which the responding member believes is the current leader.
+  uint64 leader = 4;
+  // raftIndex is the current raft index of the responding member.
+  uint64 raftIndex = 5;
+  // raftTerm is the current raft term of the responding member.
+  uint64 raftTerm = 6;
+}
+
+message AuthEnableRequest {
+}
+
+message AuthDisableRequest {
+}
+
+message AuthenticateRequest {
+  string name = 1;
+  string password = 2;
+}
+
+message AuthUserAddRequest {
+  string name = 1;
+  string password = 2;
+}
+
+message AuthUserGetRequest {
+  string name = 1;
+}
+
+message AuthUserDeleteRequest {
+  // name is the name of the user to delete.
+  string name = 1;
+}
+
+message AuthUserChangePasswordRequest {
+  // name is the name of the user whose password is being changed.
+  string name = 1;
+  // password is the new password for the user.
+  string password = 2;
+}
+
+message AuthUserGrantRoleRequest {
+  // user is the name of the user which should be granted a given role.
+  string user = 1;
+  // role is the name of the role to grant to the user.
+  string role = 2;
+}
+
+message AuthUserRevokeRoleRequest {
+  string name = 1;
+  string role = 2;
+}
+
+message AuthRoleAddRequest {
+  // name is the name of the role to add to the authentication system.
+  string name = 1;
+}
+
+message AuthRoleGetRequest {
+  string role = 1;
+}
+
+message AuthUserListRequest {
+}
+
+message AuthRoleListRequest {
+}
+
+message AuthRoleDeleteRequest {
+  string role = 1;
+}
+
+message AuthRoleGrantPermissionRequest {
+  // name is the name of the role which will be granted the permission.
+  string name = 1;
+  // perm is the permission to grant to the role.
+  authpb.Permission perm = 2;
+}
+
+message AuthRoleRevokePermissionRequest {
+  string role = 1;
+  string key = 2;
+  string range_end = 3;
+}
+
+message AuthEnableResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthDisableResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthenticateResponse {
+  ResponseHeader header = 1;
+  // token is an authorized token that can be used in succeeding RPCs
+  string token = 2;
+}
+
+message AuthUserAddResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthUserGetResponse {
+  ResponseHeader header = 1;
+
+  repeated string roles = 2;
+}
+
+message AuthUserDeleteResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthUserChangePasswordResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthUserGrantRoleResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthUserRevokeRoleResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthRoleAddResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthRoleGetResponse {
+  ResponseHeader header = 1;
+
+  repeated authpb.Permission perm = 2;
+}
+
+message AuthRoleListResponse {
+  ResponseHeader header = 1;
+
+  repeated string roles = 2;
+}
+
+message AuthUserListResponse {
+  ResponseHeader header = 1;
+
+  repeated string users = 2;
+}
+
+message AuthRoleDeleteResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthRoleGrantPermissionResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthRoleRevokePermissionResponse {
+  ResponseHeader header = 1;
+}
diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go
new file mode 100644
index 0000000..4679da5
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go
@@ -0,0 +1,830 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: kv.proto
+
+package mvccpb
+
+import (
+	fmt "fmt"
+	io "io"
+	math "math"
+	math_bits "math/bits"
+
+	_ "github.com/gogo/protobuf/gogoproto"
+	proto "github.com/golang/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Event_EventType int32
+
+const (
+	PUT    Event_EventType = 0
+	DELETE Event_EventType = 1
+)
+
+var Event_EventType_name = map[int32]string{
+	0: "PUT",
+	1: "DELETE",
+}
+
+var Event_EventType_value = map[string]int32{
+	"PUT":    0,
+	"DELETE": 1,
+}
+
+func (x Event_EventType) String() string {
+	return proto.EnumName(Event_EventType_name, int32(x))
+}
+
+func (Event_EventType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_2216fe83c9c12408, []int{1, 0}
+}
+
+type KeyValue struct {
+	// key is the key in bytes. An empty key is not allowed.
+	Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	// create_revision is the revision of last creation on this key.
+	CreateRevision int64 `protobuf:"varint,2,opt,name=create_revision,json=createRevision,proto3" json:"create_revision,omitempty"`
+	// mod_revision is the revision of last modification on this key.
+	ModRevision int64 `protobuf:"varint,3,opt,name=mod_revision,json=modRevision,proto3" json:"mod_revision,omitempty"`
+	// version is the version of the key. A deletion resets
+	// the version to zero and any modification of the key
+	// increases its version.
+	Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"`
+	// value is the value held by the key, in bytes.
+	Value []byte `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"`
+	// lease is the ID of the lease that attached to key.
+	// When the attached lease expires, the key will be deleted.
+	// If lease is 0, then no lease is attached to the key.
+	Lease                int64    `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *KeyValue) Reset()         { *m = KeyValue{} }
+func (m *KeyValue) String() string { return proto.CompactTextString(m) }
+func (*KeyValue) ProtoMessage()    {}
+func (*KeyValue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_2216fe83c9c12408, []int{0}
+}
+func (m *KeyValue) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_KeyValue.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *KeyValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_KeyValue.Merge(m, src)
+}
+func (m *KeyValue) XXX_Size() int {
+	return m.Size()
+}
+func (m *KeyValue) XXX_DiscardUnknown() {
+	xxx_messageInfo_KeyValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KeyValue proto.InternalMessageInfo
+
+type Event struct {
+	// type is the kind of event. If type is a PUT, it indicates
+	// new data has been stored to the key. If type is a DELETE,
+	// it indicates the key was deleted.
+	Type Event_EventType `protobuf:"varint,1,opt,name=type,proto3,enum=mvccpb.Event_EventType" json:"type,omitempty"`
+	// kv holds the KeyValue for the event.
+	// A PUT event contains current kv pair.
+	// A PUT event with kv.Version=1 indicates the creation of a key.
+	// A DELETE/EXPIRE event contains the deleted key with
+	// its modification revision set to the revision of deletion.
+	Kv *KeyValue `protobuf:"bytes,2,opt,name=kv,proto3" json:"kv,omitempty"`
+	// prev_kv holds the key-value pair before the event happens.
+	PrevKv               *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *Event) Reset()         { *m = Event{} }
+func (m *Event) String() string { return proto.CompactTextString(m) }
+func (*Event) ProtoMessage()    {}
+func (*Event) Descriptor() ([]byte, []int) {
+	return fileDescriptor_2216fe83c9c12408, []int{1}
+}
+func (m *Event) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Event.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Event) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Event.Merge(m, src)
+}
+func (m *Event) XXX_Size() int {
+	return m.Size()
+}
+func (m *Event) XXX_DiscardUnknown() {
+	xxx_messageInfo_Event.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Event proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value)
+	proto.RegisterType((*KeyValue)(nil), "mvccpb.KeyValue")
+	proto.RegisterType((*Event)(nil), "mvccpb.Event")
+}
+
+func init() { proto.RegisterFile("kv.proto", fileDescriptor_2216fe83c9c12408) }
+
+var fileDescriptor_2216fe83c9c12408 = []byte{
+	// 303 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40,
+	0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18,
+	0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94,
+	0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa,
+	0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3,
+	0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae,
+	0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7,
+	0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3,
+	0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d,
+	0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b,
+	0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23,
+	0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36,
+	0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34,
+	0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad,
+	0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30,
+	0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a,
+	0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94,
+	0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff,
+	0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00,
+}
+
+func (m *KeyValue) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KeyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Lease != 0 {
+		i = encodeVarintKv(dAtA, i, uint64(m.Lease))
+		i--
+		dAtA[i] = 0x30
+	}
+	if len(m.Value) > 0 {
+		i -= len(m.Value)
+		copy(dAtA[i:], m.Value)
+		i = encodeVarintKv(dAtA, i, uint64(len(m.Value)))
+		i--
+		dAtA[i] = 0x2a
+	}
+	if m.Version != 0 {
+		i = encodeVarintKv(dAtA, i, uint64(m.Version))
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.ModRevision != 0 {
+		i = encodeVarintKv(dAtA, i, uint64(m.ModRevision))
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.CreateRevision != 0 {
+		i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision))
+		i--
+		dAtA[i] = 0x10
+	}
+	if len(m.Key) > 0 {
+		i -= len(m.Key)
+		copy(dAtA[i:], m.Key)
+		i = encodeVarintKv(dAtA, i, uint64(len(m.Key)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Event) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Event) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.PrevKv != nil {
+		{
+			size, err := m.PrevKv.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintKv(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.Kv != nil {
+		{
+			size, err := m.Kv.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintKv(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Type != 0 {
+		i = encodeVarintKv(dAtA, i, uint64(m.Type))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintKv(dAtA []byte, offset int, v uint64) int {
+	offset -= sovKv(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *KeyValue) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovKv(uint64(l))
+	}
+	if m.CreateRevision != 0 {
+		n += 1 + sovKv(uint64(m.CreateRevision))
+	}
+	if m.ModRevision != 0 {
+		n += 1 + sovKv(uint64(m.ModRevision))
+	}
+	if m.Version != 0 {
+		n += 1 + sovKv(uint64(m.Version))
+	}
+	l = len(m.Value)
+	if l > 0 {
+		n += 1 + l + sovKv(uint64(l))
+	}
+	if m.Lease != 0 {
+		n += 1 + sovKv(uint64(m.Lease))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *Event) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Type != 0 {
+		n += 1 + sovKv(uint64(m.Type))
+	}
+	if m.Kv != nil {
+		l = m.Kv.Size()
+		n += 1 + l + sovKv(uint64(l))
+	}
+	if m.PrevKv != nil {
+		l = m.PrevKv.Size()
+		n += 1 + l + sovKv(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func sovKv(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozKv(x uint64) (n int) {
+	return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *KeyValue) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowKv
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: KeyValue: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthKv
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthKv
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+			if m.Key == nil {
+				m.Key = []byte{}
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType)
+			}
+			m.CreateRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.CreateRevision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType)
+			}
+			m.ModRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ModRevision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+			}
+			m.Version = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Version |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthKv
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthKv
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
+			if m.Value == nil {
+				m.Value = []byte{}
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
+			}
+			m.Lease = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Lease |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipKv(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthKv
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthKv
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Event) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowKv
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Event: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			m.Type = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Type |= Event_EventType(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthKv
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthKv
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Kv == nil {
+				m.Kv = &KeyValue{}
+			}
+			if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthKv
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthKv
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.PrevKv == nil {
+				m.PrevKv = &KeyValue{}
+			}
+			if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipKv(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthKv
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthKv
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipKv(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowKv
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthKv
+			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthKv
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowKv
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipKv(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthKv
+				}
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowKv   = fmt.Errorf("proto: integer overflow")
+)
diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto
new file mode 100644
index 0000000..23c911b
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto
@@ -0,0 +1,49 @@
+syntax = "proto3";
+package mvccpb;
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+option (gogoproto.goproto_enum_prefix_all) = false;
+
+message KeyValue {
+  // key is the key in bytes. An empty key is not allowed.
+  bytes key = 1;
+  // create_revision is the revision of last creation on this key.
+  int64 create_revision = 2;
+  // mod_revision is the revision of last modification on this key.
+  int64 mod_revision = 3;
+  // version is the version of the key. A deletion resets
+  // the version to zero and any modification of the key
+  // increases its version.
+  int64 version = 4;
+  // value is the value held by the key, in bytes.
+  bytes value = 5;
+  // lease is the ID of the lease that attached to key.
+  // When the attached lease expires, the key will be deleted.
+  // If lease is 0, then no lease is attached to the key.
+  int64 lease = 6;
+}
+
+message Event {
+  enum EventType {
+    PUT = 0;
+    DELETE = 1;
+  }
+  // type is the kind of event. If type is a PUT, it indicates
+  // new data has been stored to the key. If type is a DELETE,
+  // it indicates the key was deleted.
+  EventType type = 1;
+  // kv holds the KeyValue for the event.
+  // A PUT event contains current kv pair.
+  // A PUT event with kv.Version=1 indicates the creation of a key.
+  // A DELETE/EXPIRE event contains the deleted key with
+  // its modification revision set to the revision of deletion.
+  KeyValue kv = 2;
+
+  // prev_kv holds the key-value pair before the event happens.
+  KeyValue prev_kv = 3;
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/discard_logger.go b/vendor/github.com/coreos/etcd/pkg/logutil/discard_logger.go
new file mode 100644
index 0000000..81b0a9d
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/logutil/discard_logger.go
@@ -0,0 +1,46 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import (
+	"log"
+
+	"google.golang.org/grpc/grpclog"
+)
+
+// assert that "discardLogger" satisfy "Logger" interface
+var _ Logger = &discardLogger{}
+
+// NewDiscardLogger returns a new Logger that discards everything except "fatal".
+func NewDiscardLogger() Logger { return &discardLogger{} }
+
+type discardLogger struct{}
+
+func (l *discardLogger) Info(args ...interface{})                    {}
+func (l *discardLogger) Infoln(args ...interface{})                  {}
+func (l *discardLogger) Infof(format string, args ...interface{})    {}
+func (l *discardLogger) Warning(args ...interface{})                 {}
+func (l *discardLogger) Warningln(args ...interface{})               {}
+func (l *discardLogger) Warningf(format string, args ...interface{}) {}
+func (l *discardLogger) Error(args ...interface{})                   {}
+func (l *discardLogger) Errorln(args ...interface{})                 {}
+func (l *discardLogger) Errorf(format string, args ...interface{})   {}
+func (l *discardLogger) Fatal(args ...interface{})                   { log.Fatal(args...) }
+func (l *discardLogger) Fatalln(args ...interface{})                 { log.Fatalln(args...) }
+func (l *discardLogger) Fatalf(format string, args ...interface{})   { log.Fatalf(format, args...) }
+func (l *discardLogger) V(lvl int) bool {
+	return false
+}
+func (l *discardLogger) Lvl(lvl int) grpclog.LoggerV2 { return l }
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/doc.go b/vendor/github.com/coreos/etcd/pkg/logutil/doc.go
new file mode 100644
index 0000000..e919f24
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/logutil/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package logutil includes utilities to facilitate logging.
+package logutil
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/log_level.go b/vendor/github.com/coreos/etcd/pkg/logutil/log_level.go
new file mode 100644
index 0000000..d57e173
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/logutil/log_level.go
@@ -0,0 +1,70 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import (
+	"fmt"
+
+	"github.com/coreos/pkg/capnslog"
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+var DefaultLogLevel = "info"
+
+// ConvertToZapLevel converts log level string to zapcore.Level.
+func ConvertToZapLevel(lvl string) zapcore.Level {
+	switch lvl {
+	case "debug":
+		return zap.DebugLevel
+	case "info":
+		return zap.InfoLevel
+	case "warn":
+		return zap.WarnLevel
+	case "error":
+		return zap.ErrorLevel
+	case "dpanic":
+		return zap.DPanicLevel
+	case "panic":
+		return zap.PanicLevel
+	case "fatal":
+		return zap.FatalLevel
+	default:
+		panic(fmt.Sprintf("unknown level %q", lvl))
+	}
+}
+
+// ConvertToCapnslogLogLevel convert log level string to capnslog.LogLevel.
+// TODO: deprecate this in 3.5
+func ConvertToCapnslogLogLevel(lvl string) capnslog.LogLevel {
+	switch lvl {
+	case "debug":
+		return capnslog.DEBUG
+	case "info":
+		return capnslog.INFO
+	case "warn":
+		return capnslog.WARNING
+	case "error":
+		return capnslog.ERROR
+	case "dpanic":
+		return capnslog.CRITICAL
+	case "panic":
+		return capnslog.CRITICAL
+	case "fatal":
+		return capnslog.CRITICAL
+	default:
+		panic(fmt.Sprintf("unknown level %q", lvl))
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/logger.go b/vendor/github.com/coreos/etcd/pkg/logutil/logger.go
new file mode 100644
index 0000000..e7da80e
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/logutil/logger.go
@@ -0,0 +1,64 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import "google.golang.org/grpc/grpclog"
+
+// Logger defines logging interface.
+// TODO: deprecate in v3.5.
+type Logger interface {
+	grpclog.LoggerV2
+
+	// Lvl returns logger if logger's verbosity level >= "lvl".
+	// Otherwise, logger that discards everything.
+	Lvl(lvl int) grpclog.LoggerV2
+}
+
+// assert that "defaultLogger" satisfy "Logger" interface
+var _ Logger = &defaultLogger{}
+
+// NewLogger wraps "grpclog.LoggerV2" that implements "Logger" interface.
+//
+// For example:
+//
+//  var defaultLogger Logger
+//  g := grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 4)
+//  defaultLogger = NewLogger(g)
+//
+func NewLogger(g grpclog.LoggerV2) Logger { return &defaultLogger{g: g} }
+
+type defaultLogger struct {
+	g grpclog.LoggerV2
+}
+
+func (l *defaultLogger) Info(args ...interface{})                    { l.g.Info(args...) }
+func (l *defaultLogger) Infoln(args ...interface{})                  { l.g.Info(args...) }
+func (l *defaultLogger) Infof(format string, args ...interface{})    { l.g.Infof(format, args...) }
+func (l *defaultLogger) Warning(args ...interface{})                 { l.g.Warning(args...) }
+func (l *defaultLogger) Warningln(args ...interface{})               { l.g.Warning(args...) }
+func (l *defaultLogger) Warningf(format string, args ...interface{}) { l.g.Warningf(format, args...) }
+func (l *defaultLogger) Error(args ...interface{})                   { l.g.Error(args...) }
+func (l *defaultLogger) Errorln(args ...interface{})                 { l.g.Error(args...) }
+func (l *defaultLogger) Errorf(format string, args ...interface{})   { l.g.Errorf(format, args...) }
+func (l *defaultLogger) Fatal(args ...interface{})                   { l.g.Fatal(args...) }
+func (l *defaultLogger) Fatalln(args ...interface{})                 { l.g.Fatal(args...) }
+func (l *defaultLogger) Fatalf(format string, args ...interface{})   { l.g.Fatalf(format, args...) }
+func (l *defaultLogger) V(lvl int) bool                              { return l.g.V(lvl) }
+func (l *defaultLogger) Lvl(lvl int) grpclog.LoggerV2 {
+	if l.g.V(lvl) {
+		return l
+	}
+	return &discardLogger{}
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go b/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go
new file mode 100644
index 0000000..866b6f7
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go
@@ -0,0 +1,194 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import (
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/coreos/pkg/capnslog"
+)
+
+var (
+	defaultMergePeriod     = time.Second
+	defaultTimeOutputScale = 10 * time.Millisecond
+
+	outputInterval = time.Second
+)
+
+// line represents a log line that can be printed out
+// through capnslog.PackageLogger.
+type line struct {
+	level capnslog.LogLevel
+	str   string
+}
+
+func (l line) append(s string) line {
+	return line{
+		level: l.level,
+		str:   l.str + " " + s,
+	}
+}
+
+// status represents the merge status of a line.
+type status struct {
+	period time.Duration
+
+	start time.Time // start time of latest merge period
+	count int       // number of merged lines from starting
+}
+
+func (s *status) isInMergePeriod(now time.Time) bool {
+	return s.period == 0 || s.start.Add(s.period).After(now)
+}
+
+func (s *status) isEmpty() bool { return s.count == 0 }
+
+func (s *status) summary(now time.Time) string {
+	ts := s.start.Round(defaultTimeOutputScale)
+	took := now.Round(defaultTimeOutputScale).Sub(ts)
+	return fmt.Sprintf("[merged %d repeated lines in %s]", s.count, took)
+}
+
+func (s *status) reset(now time.Time) {
+	s.start = now
+	s.count = 0
+}
+
+// MergeLogger supports merge logging, which merges repeated log lines
+// and prints summary log lines instead.
+//
+// For merge logging, MergeLogger prints out the line when the line appears
+// at the first time. MergeLogger holds the same log line printed within
+// defaultMergePeriod, and prints out summary log line at the end of defaultMergePeriod.
+// It stops merging when the line doesn't appear within the
+// defaultMergePeriod.
+type MergeLogger struct {
+	*capnslog.PackageLogger
+
+	mu      sync.Mutex // protect statusm
+	statusm map[line]*status
+}
+
+func NewMergeLogger(logger *capnslog.PackageLogger) *MergeLogger {
+	l := &MergeLogger{
+		PackageLogger: logger,
+		statusm:       make(map[line]*status),
+	}
+	go l.outputLoop()
+	return l
+}
+
+func (l *MergeLogger) MergeInfo(entries ...interface{}) {
+	l.merge(line{
+		level: capnslog.INFO,
+		str:   fmt.Sprint(entries...),
+	})
+}
+
+func (l *MergeLogger) MergeInfof(format string, args ...interface{}) {
+	l.merge(line{
+		level: capnslog.INFO,
+		str:   fmt.Sprintf(format, args...),
+	})
+}
+
+func (l *MergeLogger) MergeNotice(entries ...interface{}) {
+	l.merge(line{
+		level: capnslog.NOTICE,
+		str:   fmt.Sprint(entries...),
+	})
+}
+
+func (l *MergeLogger) MergeNoticef(format string, args ...interface{}) {
+	l.merge(line{
+		level: capnslog.NOTICE,
+		str:   fmt.Sprintf(format, args...),
+	})
+}
+
+func (l *MergeLogger) MergeWarning(entries ...interface{}) {
+	l.merge(line{
+		level: capnslog.WARNING,
+		str:   fmt.Sprint(entries...),
+	})
+}
+
+func (l *MergeLogger) MergeWarningf(format string, args ...interface{}) {
+	l.merge(line{
+		level: capnslog.WARNING,
+		str:   fmt.Sprintf(format, args...),
+	})
+}
+
+func (l *MergeLogger) MergeError(entries ...interface{}) {
+	l.merge(line{
+		level: capnslog.ERROR,
+		str:   fmt.Sprint(entries...),
+	})
+}
+
+func (l *MergeLogger) MergeErrorf(format string, args ...interface{}) {
+	l.merge(line{
+		level: capnslog.ERROR,
+		str:   fmt.Sprintf(format, args...),
+	})
+}
+
+func (l *MergeLogger) merge(ln line) {
+	l.mu.Lock()
+
+	// increase count if the logger is merging the line
+	if status, ok := l.statusm[ln]; ok {
+		status.count++
+		l.mu.Unlock()
+		return
+	}
+
+	// initialize status of the line
+	l.statusm[ln] = &status{
+		period: defaultMergePeriod,
+		start:  time.Now(),
+	}
+	// release the lock before IO operation
+	l.mu.Unlock()
+	// print out the line at its first time
+	l.PackageLogger.Logf(ln.level, ln.str)
+}
+
+func (l *MergeLogger) outputLoop() {
+	for now := range time.Tick(outputInterval) {
+		var outputs []line
+
+		l.mu.Lock()
+		for ln, status := range l.statusm {
+			if status.isInMergePeriod(now) {
+				continue
+			}
+			if status.isEmpty() {
+				delete(l.statusm, ln)
+				continue
+			}
+			outputs = append(outputs, ln.append(status.summary(now)))
+			status.reset(now)
+		}
+		l.mu.Unlock()
+
+		for _, o := range outputs {
+			l.PackageLogger.Logf(o.level, o.str)
+		}
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/package_logger.go b/vendor/github.com/coreos/etcd/pkg/logutil/package_logger.go
new file mode 100644
index 0000000..378bee0
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/logutil/package_logger.go
@@ -0,0 +1,60 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import (
+	"github.com/coreos/pkg/capnslog"
+	"google.golang.org/grpc/grpclog"
+)
+
+// assert that "packageLogger" satisfy "Logger" interface
+var _ Logger = &packageLogger{}
+
+// NewPackageLogger wraps "*capnslog.PackageLogger" that implements "Logger" interface.
+//
+// For example:
+//
+//  var defaultLogger Logger
+//  defaultLogger = NewPackageLogger("github.com/coreos/etcd", "snapshot")
+//
+func NewPackageLogger(repo, pkg string) Logger {
+	return &packageLogger{p: capnslog.NewPackageLogger(repo, pkg)}
+}
+
+type packageLogger struct {
+	p *capnslog.PackageLogger
+}
+
+func (l *packageLogger) Info(args ...interface{})                    { l.p.Info(args...) }
+func (l *packageLogger) Infoln(args ...interface{})                  { l.p.Info(args...) }
+func (l *packageLogger) Infof(format string, args ...interface{})    { l.p.Infof(format, args...) }
+func (l *packageLogger) Warning(args ...interface{})                 { l.p.Warning(args...) }
+func (l *packageLogger) Warningln(args ...interface{})               { l.p.Warning(args...) }
+func (l *packageLogger) Warningf(format string, args ...interface{}) { l.p.Warningf(format, args...) }
+func (l *packageLogger) Error(args ...interface{})                   { l.p.Error(args...) }
+func (l *packageLogger) Errorln(args ...interface{})                 { l.p.Error(args...) }
+func (l *packageLogger) Errorf(format string, args ...interface{})   { l.p.Errorf(format, args...) }
+func (l *packageLogger) Fatal(args ...interface{})                   { l.p.Fatal(args...) }
+func (l *packageLogger) Fatalln(args ...interface{})                 { l.p.Fatal(args...) }
+func (l *packageLogger) Fatalf(format string, args ...interface{})   { l.p.Fatalf(format, args...) }
+func (l *packageLogger) V(lvl int) bool {
+	return l.p.LevelAt(capnslog.LogLevel(lvl))
+}
+func (l *packageLogger) Lvl(lvl int) grpclog.LoggerV2 {
+	if l.p.LevelAt(capnslog.LogLevel(lvl)) {
+		return l
+	}
+	return &discardLogger{}
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/zap.go b/vendor/github.com/coreos/etcd/pkg/logutil/zap.go
new file mode 100644
index 0000000..2f69223
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/logutil/zap.go
@@ -0,0 +1,97 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import (
+	"sort"
+
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+// DefaultZapLoggerConfig defines default zap logger configuration.
+var DefaultZapLoggerConfig = zap.Config{
+	Level: zap.NewAtomicLevelAt(ConvertToZapLevel(DefaultLogLevel)),
+
+	Development: false,
+	Sampling: &zap.SamplingConfig{
+		Initial:    100,
+		Thereafter: 100,
+	},
+
+	Encoding: "json",
+
+	// copied from "zap.NewProductionEncoderConfig" with some updates
+	EncoderConfig: zapcore.EncoderConfig{
+		TimeKey:        "ts",
+		LevelKey:       "level",
+		NameKey:        "logger",
+		CallerKey:      "caller",
+		MessageKey:     "msg",
+		StacktraceKey:  "stacktrace",
+		LineEnding:     zapcore.DefaultLineEnding,
+		EncodeLevel:    zapcore.LowercaseLevelEncoder,
+		EncodeTime:     zapcore.ISO8601TimeEncoder,
+		EncodeDuration: zapcore.StringDurationEncoder,
+		EncodeCaller:   zapcore.ShortCallerEncoder,
+	},
+
+	// Use "/dev/null" to discard all
+	OutputPaths:      []string{"stderr"},
+	ErrorOutputPaths: []string{"stderr"},
+}
+
+// AddOutputPaths adds output paths to the existing output paths, resolving conflicts.
+func AddOutputPaths(cfg zap.Config, outputPaths, errorOutputPaths []string) zap.Config {
+	outputs := make(map[string]struct{})
+	for _, v := range cfg.OutputPaths {
+		outputs[v] = struct{}{}
+	}
+	for _, v := range outputPaths {
+		outputs[v] = struct{}{}
+	}
+	outputSlice := make([]string, 0)
+	if _, ok := outputs["/dev/null"]; ok {
+		// "/dev/null" to discard all
+		outputSlice = []string{"/dev/null"}
+	} else {
+		for k := range outputs {
+			outputSlice = append(outputSlice, k)
+		}
+	}
+	cfg.OutputPaths = outputSlice
+	sort.Strings(cfg.OutputPaths)
+
+	errOutputs := make(map[string]struct{})
+	for _, v := range cfg.ErrorOutputPaths {
+		errOutputs[v] = struct{}{}
+	}
+	for _, v := range errorOutputPaths {
+		errOutputs[v] = struct{}{}
+	}
+	errOutputSlice := make([]string, 0)
+	if _, ok := errOutputs["/dev/null"]; ok {
+		// "/dev/null" to discard all
+		errOutputSlice = []string{"/dev/null"}
+	} else {
+		for k := range errOutputs {
+			errOutputSlice = append(errOutputSlice, k)
+		}
+	}
+	cfg.ErrorOutputPaths = errOutputSlice
+	sort.Strings(cfg.ErrorOutputPaths)
+
+	return cfg
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/zap_grpc.go b/vendor/github.com/coreos/etcd/pkg/logutil/zap_grpc.go
new file mode 100644
index 0000000..3f48d81
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/logutil/zap_grpc.go
@@ -0,0 +1,111 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import (
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+	"google.golang.org/grpc/grpclog"
+)
+
+// NewGRPCLoggerV2 converts "*zap.Logger" to "grpclog.LoggerV2".
+// It discards all INFO level logging in gRPC, if debug level
+// is not enabled in "*zap.Logger".
+func NewGRPCLoggerV2(lcfg zap.Config) (grpclog.LoggerV2, error) {
+	lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil"
+	if err != nil {
+		return nil, err
+	}
+	return &zapGRPCLogger{lg: lg, sugar: lg.Sugar()}, nil
+}
+
+// NewGRPCLoggerV2FromZapCore creates "grpclog.LoggerV2" from "zap.Core"
+// and "zapcore.WriteSyncer". It discards all INFO level logging in gRPC,
+// if debug level is not enabled in "*zap.Logger".
+func NewGRPCLoggerV2FromZapCore(cr zapcore.Core, syncer zapcore.WriteSyncer) grpclog.LoggerV2 {
+	// "AddCallerSkip" to annotate caller outside of "logutil"
+	lg := zap.New(cr, zap.AddCaller(), zap.AddCallerSkip(1), zap.ErrorOutput(syncer))
+	return &zapGRPCLogger{lg: lg, sugar: lg.Sugar()}
+}
+
+type zapGRPCLogger struct {
+	lg    *zap.Logger
+	sugar *zap.SugaredLogger
+}
+
+func (zl *zapGRPCLogger) Info(args ...interface{}) {
+	if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
+		return
+	}
+	zl.sugar.Info(args...)
+}
+
+func (zl *zapGRPCLogger) Infoln(args ...interface{}) {
+	if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
+		return
+	}
+	zl.sugar.Info(args...)
+}
+
+func (zl *zapGRPCLogger) Infof(format string, args ...interface{}) {
+	if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
+		return
+	}
+	zl.sugar.Infof(format, args...)
+}
+
+func (zl *zapGRPCLogger) Warning(args ...interface{}) {
+	zl.sugar.Warn(args...)
+}
+
+func (zl *zapGRPCLogger) Warningln(args ...interface{}) {
+	zl.sugar.Warn(args...)
+}
+
+func (zl *zapGRPCLogger) Warningf(format string, args ...interface{}) {
+	zl.sugar.Warnf(format, args...)
+}
+
+func (zl *zapGRPCLogger) Error(args ...interface{}) {
+	zl.sugar.Error(args...)
+}
+
+func (zl *zapGRPCLogger) Errorln(args ...interface{}) {
+	zl.sugar.Error(args...)
+}
+
+func (zl *zapGRPCLogger) Errorf(format string, args ...interface{}) {
+	zl.sugar.Errorf(format, args...)
+}
+
+func (zl *zapGRPCLogger) Fatal(args ...interface{}) {
+	zl.sugar.Fatal(args...)
+}
+
+func (zl *zapGRPCLogger) Fatalln(args ...interface{}) {
+	zl.sugar.Fatal(args...)
+}
+
+func (zl *zapGRPCLogger) Fatalf(format string, args ...interface{}) {
+	zl.sugar.Fatalf(format, args...)
+}
+
+func (zl *zapGRPCLogger) V(l int) bool {
+	// infoLog == 0
+	if l <= 0 { // debug level, then we ignore info level in gRPC
+		return !zl.lg.Core().Enabled(zapcore.DebugLevel)
+	}
+	return true
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/zap_journal.go b/vendor/github.com/coreos/etcd/pkg/logutil/zap_journal.go
new file mode 100644
index 0000000..b1788bc
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/logutil/zap_journal.go
@@ -0,0 +1,92 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows
+
+package logutil
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+
+	"github.com/coreos/etcd/pkg/systemd"
+
+	"github.com/coreos/go-systemd/journal"
+	"go.uber.org/zap/zapcore"
+)
+
+// NewJournalWriter wraps "io.Writer" to redirect log output
+// to the local systemd journal. If journald send fails, it fails
+// back to writing to the original writer.
+// The decode overhead is only <30µs per write.
+// Reference: https://github.com/coreos/pkg/blob/master/capnslog/journald_formatter.go
+func NewJournalWriter(wr io.Writer) (io.Writer, error) {
+	return &journalWriter{Writer: wr}, systemd.DialJournal()
+}
+
+type journalWriter struct {
+	io.Writer
+}
+
+// WARN: assume that etcd uses default field names in zap encoder config
+// make sure to keep this up-to-date!
+type logLine struct {
+	Level  string `json:"level"`
+	Caller string `json:"caller"`
+}
+
+func (w *journalWriter) Write(p []byte) (int, error) {
+	line := &logLine{}
+	if err := json.NewDecoder(bytes.NewReader(p)).Decode(line); err != nil {
+		return 0, err
+	}
+
+	var pri journal.Priority
+	switch line.Level {
+	case zapcore.DebugLevel.String():
+		pri = journal.PriDebug
+	case zapcore.InfoLevel.String():
+		pri = journal.PriInfo
+
+	case zapcore.WarnLevel.String():
+		pri = journal.PriWarning
+	case zapcore.ErrorLevel.String():
+		pri = journal.PriErr
+
+	case zapcore.DPanicLevel.String():
+		pri = journal.PriCrit
+	case zapcore.PanicLevel.String():
+		pri = journal.PriCrit
+	case zapcore.FatalLevel.String():
+		pri = journal.PriCrit
+
+	default:
+		panic(fmt.Errorf("unknown log level: %q", line.Level))
+	}
+
+	err := journal.Send(string(p), pri, map[string]string{
+		"PACKAGE":           filepath.Dir(line.Caller),
+		"SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]),
+	})
+	if err != nil {
+		// "journal" also falls back to stderr
+		// "fmt.Fprintln(os.Stderr, s)"
+		return w.Writer.Write(p)
+	}
+	return 0, nil
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/zap_raft.go b/vendor/github.com/coreos/etcd/pkg/logutil/zap_raft.go
new file mode 100644
index 0000000..012d688
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/logutil/zap_raft.go
@@ -0,0 +1,102 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import (
+	"errors"
+
+	"github.com/coreos/etcd/raft"
+
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+// NewRaftLogger builds "raft.Logger" from "*zap.Config".
+func NewRaftLogger(lcfg *zap.Config) (raft.Logger, error) {
+	if lcfg == nil {
+		return nil, errors.New("nil zap.Config")
+	}
+	lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil"
+	if err != nil {
+		return nil, err
+	}
+	return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}, nil
+}
+
+// NewRaftLoggerZap converts "*zap.Logger" to "raft.Logger".
+func NewRaftLoggerZap(lg *zap.Logger) raft.Logger {
+	return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}
+}
+
+// NewRaftLoggerFromZapCore creates "raft.Logger" from "zap.Core"
+// and "zapcore.WriteSyncer".
+func NewRaftLoggerFromZapCore(cr zapcore.Core, syncer zapcore.WriteSyncer) raft.Logger {
+	// "AddCallerSkip" to annotate caller outside of "logutil"
+	lg := zap.New(cr, zap.AddCaller(), zap.AddCallerSkip(1), zap.ErrorOutput(syncer))
+	return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}
+}
+
+type zapRaftLogger struct {
+	lg    *zap.Logger
+	sugar *zap.SugaredLogger
+}
+
+func (zl *zapRaftLogger) Debug(args ...interface{}) {
+	zl.sugar.Debug(args...)
+}
+
+func (zl *zapRaftLogger) Debugf(format string, args ...interface{}) {
+	zl.sugar.Debugf(format, args...)
+}
+
+func (zl *zapRaftLogger) Error(args ...interface{}) {
+	zl.sugar.Error(args...)
+}
+
+func (zl *zapRaftLogger) Errorf(format string, args ...interface{}) {
+	zl.sugar.Errorf(format, args...)
+}
+
+func (zl *zapRaftLogger) Info(args ...interface{}) {
+	zl.sugar.Info(args...)
+}
+
+func (zl *zapRaftLogger) Infof(format string, args ...interface{}) {
+	zl.sugar.Infof(format, args...)
+}
+
+func (zl *zapRaftLogger) Warning(args ...interface{}) {
+	zl.sugar.Warn(args...)
+}
+
+func (zl *zapRaftLogger) Warningf(format string, args ...interface{}) {
+	zl.sugar.Warnf(format, args...)
+}
+
+func (zl *zapRaftLogger) Fatal(args ...interface{}) {
+	zl.sugar.Fatal(args...)
+}
+
+func (zl *zapRaftLogger) Fatalf(format string, args ...interface{}) {
+	zl.sugar.Fatalf(format, args...)
+}
+
+func (zl *zapRaftLogger) Panic(args ...interface{}) {
+	zl.sugar.Panic(args...)
+}
+
+func (zl *zapRaftLogger) Panicf(format string, args ...interface{}) {
+	zl.sugar.Panicf(format, args...)
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/systemd/doc.go b/vendor/github.com/coreos/etcd/pkg/systemd/doc.go
new file mode 100644
index 0000000..30e77ce
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/systemd/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package systemd provides utility functions for systemd.
+package systemd
diff --git a/vendor/github.com/coreos/etcd/pkg/systemd/journal.go b/vendor/github.com/coreos/etcd/pkg/systemd/journal.go
new file mode 100644
index 0000000..b861c69
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/systemd/journal.go
@@ -0,0 +1,29 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package systemd
+
+import "net"
+
+// DialJournal returns no error if the process can dial journal socket.
+// Returns an error if dial failed, whichi indicates journald is not available
+// (e.g. run embedded etcd as docker daemon).
+// Reference: https://github.com/coreos/go-systemd/blob/master/journal/journal.go.
+func DialJournal() error {
+	conn, err := net.Dial("unixgram", "/run/systemd/journal/socket")
+	if conn != nil {
+		defer conn.Close()
+	}
+	return err
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/types/doc.go b/vendor/github.com/coreos/etcd/pkg/types/doc.go
new file mode 100644
index 0000000..de8ef0b
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/types/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package types declares various data types and implements type-checking
+// functions.
+package types
diff --git a/vendor/github.com/coreos/etcd/pkg/types/id.go b/vendor/github.com/coreos/etcd/pkg/types/id.go
new file mode 100644
index 0000000..1b042d9
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/types/id.go
@@ -0,0 +1,41 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+	"strconv"
+)
+
+// ID represents a generic identifier which is canonically
+// stored as a uint64 but is typically represented as a
+// base-16 string for input/output
+type ID uint64
+
+func (i ID) String() string {
+	return strconv.FormatUint(uint64(i), 16)
+}
+
+// IDFromString attempts to create an ID from a base-16 string.
+func IDFromString(s string) (ID, error) {
+	i, err := strconv.ParseUint(s, 16, 64)
+	return ID(i), err
+}
+
+// IDSlice implements the sort interface
+type IDSlice []ID
+
+func (p IDSlice) Len() int           { return len(p) }
+func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) }
+func (p IDSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/coreos/etcd/pkg/types/set.go b/vendor/github.com/coreos/etcd/pkg/types/set.go
new file mode 100644
index 0000000..c111b0c
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/types/set.go
@@ -0,0 +1,178 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+	"reflect"
+	"sort"
+	"sync"
+)
+
+type Set interface {
+	Add(string)
+	Remove(string)
+	Contains(string) bool
+	Equals(Set) bool
+	Length() int
+	Values() []string
+	Copy() Set
+	Sub(Set) Set
+}
+
+func NewUnsafeSet(values ...string) *unsafeSet {
+	set := &unsafeSet{make(map[string]struct{})}
+	for _, v := range values {
+		set.Add(v)
+	}
+	return set
+}
+
+func NewThreadsafeSet(values ...string) *tsafeSet {
+	us := NewUnsafeSet(values...)
+	return &tsafeSet{us, sync.RWMutex{}}
+}
+
+type unsafeSet struct {
+	d map[string]struct{}
+}
+
+// Add adds a new value to the set (no-op if the value is already present)
+func (us *unsafeSet) Add(value string) {
+	us.d[value] = struct{}{}
+}
+
+// Remove removes the given value from the set
+func (us *unsafeSet) Remove(value string) {
+	delete(us.d, value)
+}
+
+// Contains returns whether the set contains the given value
+func (us *unsafeSet) Contains(value string) (exists bool) {
+	_, exists = us.d[value]
+	return exists
+}
+
+// ContainsAll returns whether the set contains all given values
+func (us *unsafeSet) ContainsAll(values []string) bool {
+	for _, s := range values {
+		if !us.Contains(s) {
+			return false
+		}
+	}
+	return true
+}
+
+// Equals returns whether the contents of two sets are identical
+func (us *unsafeSet) Equals(other Set) bool {
+	v1 := sort.StringSlice(us.Values())
+	v2 := sort.StringSlice(other.Values())
+	v1.Sort()
+	v2.Sort()
+	return reflect.DeepEqual(v1, v2)
+}
+
+// Length returns the number of elements in the set
+func (us *unsafeSet) Length() int {
+	return len(us.d)
+}
+
+// Values returns the values of the Set in an unspecified order.
+func (us *unsafeSet) Values() (values []string) {
+	values = make([]string, 0)
+	for val := range us.d {
+		values = append(values, val)
+	}
+	return values
+}
+
+// Copy creates a new Set containing the values of the first
+func (us *unsafeSet) Copy() Set {
+	cp := NewUnsafeSet()
+	for val := range us.d {
+		cp.Add(val)
+	}
+
+	return cp
+}
+
+// Sub removes all elements in other from the set
+func (us *unsafeSet) Sub(other Set) Set {
+	oValues := other.Values()
+	result := us.Copy().(*unsafeSet)
+
+	for _, val := range oValues {
+		if _, ok := result.d[val]; !ok {
+			continue
+		}
+		delete(result.d, val)
+	}
+
+	return result
+}
+
+type tsafeSet struct {
+	us *unsafeSet
+	m  sync.RWMutex
+}
+
+func (ts *tsafeSet) Add(value string) {
+	ts.m.Lock()
+	defer ts.m.Unlock()
+	ts.us.Add(value)
+}
+
+func (ts *tsafeSet) Remove(value string) {
+	ts.m.Lock()
+	defer ts.m.Unlock()
+	ts.us.Remove(value)
+}
+
+func (ts *tsafeSet) Contains(value string) (exists bool) {
+	ts.m.RLock()
+	defer ts.m.RUnlock()
+	return ts.us.Contains(value)
+}
+
+func (ts *tsafeSet) Equals(other Set) bool {
+	ts.m.RLock()
+	defer ts.m.RUnlock()
+	return ts.us.Equals(other)
+}
+
+func (ts *tsafeSet) Length() int {
+	ts.m.RLock()
+	defer ts.m.RUnlock()
+	return ts.us.Length()
+}
+
+func (ts *tsafeSet) Values() (values []string) {
+	ts.m.RLock()
+	defer ts.m.RUnlock()
+	return ts.us.Values()
+}
+
+func (ts *tsafeSet) Copy() Set {
+	ts.m.RLock()
+	defer ts.m.RUnlock()
+	usResult := ts.us.Copy().(*unsafeSet)
+	return &tsafeSet{usResult, sync.RWMutex{}}
+}
+
+func (ts *tsafeSet) Sub(other Set) Set {
+	ts.m.RLock()
+	defer ts.m.RUnlock()
+	usResult := ts.us.Sub(other).(*unsafeSet)
+	return &tsafeSet{usResult, sync.RWMutex{}}
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/types/slice.go b/vendor/github.com/coreos/etcd/pkg/types/slice.go
new file mode 100644
index 0000000..0dd9ca7
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/types/slice.go
@@ -0,0 +1,22 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+// Uint64Slice implements sort interface
+type Uint64Slice []uint64
+
+func (p Uint64Slice) Len() int           { return len(p) }
+func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Uint64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/coreos/etcd/pkg/types/urls.go b/vendor/github.com/coreos/etcd/pkg/types/urls.go
new file mode 100644
index 0000000..9e5d03f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/types/urls.go
@@ -0,0 +1,82 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+	"errors"
+	"fmt"
+	"net"
+	"net/url"
+	"sort"
+	"strings"
+)
+
+type URLs []url.URL
+
+func NewURLs(strs []string) (URLs, error) {
+	all := make([]url.URL, len(strs))
+	if len(all) == 0 {
+		return nil, errors.New("no valid URLs given")
+	}
+	for i, in := range strs {
+		in = strings.TrimSpace(in)
+		u, err := url.Parse(in)
+		if err != nil {
+			return nil, err
+		}
+		if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" {
+			return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in)
+		}
+		if _, _, err := net.SplitHostPort(u.Host); err != nil {
+			return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in)
+		}
+		if u.Path != "" {
+			return nil, fmt.Errorf("URL must not contain a path: %s", in)
+		}
+		all[i] = *u
+	}
+	us := URLs(all)
+	us.Sort()
+
+	return us, nil
+}
+
+func MustNewURLs(strs []string) URLs {
+	urls, err := NewURLs(strs)
+	if err != nil {
+		panic(err)
+	}
+	return urls
+}
+
+func (us URLs) String() string {
+	return strings.Join(us.StringSlice(), ",")
+}
+
+func (us *URLs) Sort() {
+	sort.Sort(us)
+}
+func (us URLs) Len() int           { return len(us) }
+func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() }
+func (us URLs) Swap(i, j int)      { us[i], us[j] = us[j], us[i] }
+
+func (us URLs) StringSlice() []string {
+	out := make([]string, len(us))
+	for i := range us {
+		out[i] = us[i].String()
+	}
+
+	return out
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go b/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go
new file mode 100644
index 0000000..47690cc
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go
@@ -0,0 +1,107 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+)
+
+// URLsMap is a map from a name to its URLs.
+type URLsMap map[string]URLs
+
+// NewURLsMap returns a URLsMap instantiated from the given string,
+// which consists of discovery-formatted names-to-URLs, like:
+// mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380
+func NewURLsMap(s string) (URLsMap, error) {
+	m := parse(s)
+
+	cl := URLsMap{}
+	for name, urls := range m {
+		us, err := NewURLs(urls)
+		if err != nil {
+			return nil, err
+		}
+		cl[name] = us
+	}
+	return cl, nil
+}
+
+// NewURLsMapFromStringMap takes a map of strings and returns a URLsMap. The
+// string values in the map can be multiple values separated by the sep string.
+func NewURLsMapFromStringMap(m map[string]string, sep string) (URLsMap, error) {
+	var err error
+	um := URLsMap{}
+	for k, v := range m {
+		um[k], err = NewURLs(strings.Split(v, sep))
+		if err != nil {
+			return nil, err
+		}
+	}
+	return um, nil
+}
+
+// String turns URLsMap into discovery-formatted name-to-URLs sorted by name.
+func (c URLsMap) String() string {
+	var pairs []string
+	for name, urls := range c {
+		for _, url := range urls {
+			pairs = append(pairs, fmt.Sprintf("%s=%s", name, url.String()))
+		}
+	}
+	sort.Strings(pairs)
+	return strings.Join(pairs, ",")
+}
+
+// URLs returns a list of all URLs.
+// The returned list is sorted in ascending lexicographical order.
+func (c URLsMap) URLs() []string {
+	var urls []string
+	for _, us := range c {
+		for _, u := range us {
+			urls = append(urls, u.String())
+		}
+	}
+	sort.Strings(urls)
+	return urls
+}
+
+// Len returns the size of URLsMap.
+func (c URLsMap) Len() int {
+	return len(c)
+}
+
+// parse parses the given string and returns a map listing the values specified for each key.
+func parse(s string) map[string][]string {
+	m := make(map[string][]string)
+	for s != "" {
+		key := s
+		if i := strings.IndexAny(key, ","); i >= 0 {
+			key, s = key[:i], key[i+1:]
+		} else {
+			s = ""
+		}
+		if key == "" {
+			continue
+		}
+		value := ""
+		if i := strings.Index(key, "="); i >= 0 {
+			key, value = key[:i], key[i+1:]
+		}
+		m[key] = append(m[key], value)
+	}
+	return m
+}
diff --git a/vendor/github.com/coreos/etcd/raft/README.md b/vendor/github.com/coreos/etcd/raft/README.md
new file mode 100644
index 0000000..fde22b1
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/README.md
@@ -0,0 +1,196 @@
+# Raft library
+
+Raft is a protocol with which a cluster of nodes can maintain a replicated state machine.
+The state machine is kept in sync through the use of a replicated log.
+For more details on Raft, see "In Search of an Understandable Consensus Algorithm"
+(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout.
+
+This Raft library is stable and feature complete. As of 2016, it is **the most widely used** Raft library in production, serving tens of thousands clusters each day. It powers distributed systems such as etcd, Kubernetes, Docker Swarm, Cloud Foundry Diego, CockroachDB, TiDB, Project Calico, Flannel, and more.
+
+Most Raft implementations have a monolithic design, including storage handling, messaging serialization, and network transport. This library instead follows a minimalistic design philosophy by only implementing the core raft algorithm. This minimalism buys flexibility, determinism, and performance.
+
+To keep the codebase small as well as provide flexibility, the library only implements the Raft algorithm; both network and disk IO are left to the user. Library users must implement their own transportation layer for message passing between Raft peers over the wire. Similarly, users must implement their own storage layer to persist the Raft log and state.
+
+In order to easily test the Raft library, its behavior should be deterministic. To achieve this determinism, the library models Raft as a state machine.  The state machine takes a `Message` as input. A message can either be a local timer update or a network message sent from a remote peer. The state machine's output is a 3-tuple `{[]Messages, []LogEntries, NextState}` consisting of an array of `Messages`, `log entries`, and `Raft state changes`. For state machines with the same state, the same state machine input should always generate the same state machine output.
+
+A simple example application, _raftexample_, is also available to help illustrate how to use this package in practice: https://github.com/coreos/etcd/tree/master/contrib/raftexample
+
+# Features
+
+This raft implementation is a full feature implementation of Raft protocol. Features includes:
+
+- Leader election
+- Log replication
+- Log compaction 
+- Membership changes
+- Leadership transfer extension
+- Efficient linearizable read-only queries served by both the leader and followers
+  - leader checks with quorum and bypasses Raft log before processing read-only queries
+  - followers asks leader to get a safe read index before processing read-only queries
+- More efficient lease-based linearizable read-only queries served by both the leader and followers
+  - leader bypasses Raft log and processing read-only queries locally
+  - followers asks leader to get a safe read index before processing read-only queries
+  - this approach relies on the clock of the all the machines in raft group
+
+This raft implementation also includes a few optional enhancements:
+
+- Optimistic pipelining to reduce log replication latency
+- Flow control for log replication
+- Batching Raft messages to reduce synchronized network I/O calls
+- Batching log entries to reduce disk synchronized I/O
+- Writing to leader's disk in parallel
+- Internal proposal redirection from followers to leader
+- Automatic stepping down when the leader loses quorum 
+
+## Notable Users
+
+- [cockroachdb](https://github.com/cockroachdb/cockroach) A Scalable, Survivable, Strongly-Consistent SQL Database
+- [dgraph](https://github.com/dgraph-io/dgraph) A Scalable, Distributed, Low Latency, High Throughput Graph Database
+- [etcd](https://github.com/coreos/etcd) A distributed reliable key-value store
+- [tikv](https://github.com/pingcap/tikv) A Distributed transactional key value database powered by Rust and Raft
+- [swarmkit](https://github.com/docker/swarmkit) A toolkit for orchestrating distributed systems at any scale.
+- [chain core](https://github.com/chain/chain) Software for operating permissioned, multi-asset blockchain networks
+
+## Usage
+
+The primary object in raft is a Node. Either start a Node from scratch using raft.StartNode or start a Node from some initial state using raft.RestartNode.
+
+To start a three-node cluster
+```go
+  storage := raft.NewMemoryStorage()
+  c := &Config{
+    ID:              0x01,
+    ElectionTick:    10,
+    HeartbeatTick:   1,
+    Storage:         storage,
+    MaxSizePerMsg:   4096,
+    MaxInflightMsgs: 256,
+  }
+  // Set peer list to the other nodes in the cluster.
+  // Note that they need to be started separately as well.
+  n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}})
+```
+
+Start a single node cluster, like so:
+```go
+  // Create storage and config as shown above.
+  // Set peer list to itself, so this node can become the leader of this single-node cluster.
+  peers := []raft.Peer{{ID: 0x01}}
+  n := raft.StartNode(c, peers)
+```
+
+To allow a new node to join this cluster, do not pass in any peers. First, add the node to the existing cluster by calling `ProposeConfChange` on any existing node inside the cluster. Then, start the node with an empty peer list, like so:
+```go
+  // Create storage and config as shown above.
+  n := raft.StartNode(c, nil)
+```
+
+To restart a node from previous state:
+```go
+  storage := raft.NewMemoryStorage()
+
+  // Recover the in-memory storage from persistent snapshot, state and entries.
+  storage.ApplySnapshot(snapshot)
+  storage.SetHardState(state)
+  storage.Append(entries)
+
+  c := &Config{
+    ID:              0x01,
+    ElectionTick:    10,
+    HeartbeatTick:   1,
+    Storage:         storage,
+    MaxSizePerMsg:   4096,
+    MaxInflightMsgs: 256,
+  }
+
+  // Restart raft without peer information.
+  // Peer information is already included in the storage.
+  n := raft.RestartNode(c)
+```
+
+After creating a Node, the user has a few responsibilities:
+
+First, read from the Node.Ready() channel and process the updates it contains. These steps may be performed in parallel, except as noted in step 2.
+
+1. Write Entries, HardState and Snapshot to persistent storage in order, i.e. Entries first, then HardState and Snapshot if they are not empty. If persistent storage supports atomic writes then all of them can be written together. Note that when writing an Entry with Index i, any previously-persisted entries with Index >= i must be discarded.
+
+2. Send all Messages to the nodes named in the To field. It is important that no messages be sent until the latest HardState has been persisted to disk, and all Entries written by any previous Ready batch (Messages may be sent while entries from the same batch are being persisted). To reduce the I/O latency, an optimization can be applied to make leader write to disk in parallel with its followers (as explained at section 10.2.1 in Raft thesis). If any Message has type MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be large). Note: Marshalling messages is not thread-safe; it is important to make sure that no new entries are persisted while marshalling. The easiest way to achieve this is to serialise the messages directly inside the main raft loop.
+
+3. Apply Snapshot (if any) and CommittedEntries to the state machine. If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() to apply it to the node. The configuration change may be cancelled at this point by setting the NodeID field to zero before calling ApplyConfChange (but ApplyConfChange must be called one way or the other, and the decision to cancel must be based solely on the state machine and not external information such as the observed health of the node).
+
+4. Call Node.Advance() to signal readiness for the next batch of updates. This may be done at any time after step 1, although all updates must be processed in the order they were returned by Ready.
+
+Second, all persisted log entries must be made available via an implementation of the Storage interface. The provided MemoryStorage type can be used for this (if repopulating its state upon a restart), or a custom disk-backed implementation can be supplied.
+
+Third, after receiving a message from another node, pass it to Node.Step:
+
+```go
+	func recvRaftRPC(ctx context.Context, m raftpb.Message) {
+		n.Step(ctx, m)
+	}
+```
+
+Finally, call `Node.Tick()` at regular intervals (probably via a `time.Ticker`). Raft has two important timeouts: heartbeat and the election timeout. However, internally to the raft package time is represented by an abstract "tick".
+
+The total state machine handling loop will look something like this:
+
+```go
+  for {
+    select {
+    case <-s.Ticker:
+      n.Tick()
+    case rd := <-s.Node.Ready():
+      saveToStorage(rd.State, rd.Entries, rd.Snapshot)
+      send(rd.Messages)
+      if !raft.IsEmptySnap(rd.Snapshot) {
+        processSnapshot(rd.Snapshot)
+      }
+      for _, entry := range rd.CommittedEntries {
+        process(entry)
+        if entry.Type == raftpb.EntryConfChange {
+          var cc raftpb.ConfChange
+          cc.Unmarshal(entry.Data)
+          s.Node.ApplyConfChange(cc)
+        }
+      }
+      s.Node.Advance()
+    case <-s.done:
+      return
+    }
+  }
+```
+
+To propose changes to the state machine from the node to take application data, serialize it into a byte slice and call:
+
+```go
+	n.Propose(ctx, data)
+```
+
+If the proposal is committed, data will appear in committed entries with type raftpb.EntryNormal. There is no guarantee that a proposed command will be committed; the command may have to be reproposed after a timeout. 
+
+To add or remove node in a cluster, build ConfChange struct 'cc' and call:
+
+```go
+	n.ProposeConfChange(ctx, cc)
+```
+
+After config change is committed, some committed entry with type raftpb.EntryConfChange will be returned. This must be applied to node through:
+
+```go
+	var cc raftpb.ConfChange
+	cc.Unmarshal(data)
+	n.ApplyConfChange(cc)
+```
+
+Note: An ID represents a unique node in a cluster for all time. A
+given ID MUST be used only once even if the old node has been removed.
+This means that for example IP addresses make poor node IDs since they
+may be reused. Node IDs must be non-zero.
+
+## Implementation notes
+
+This implementation is up to date with the final Raft thesis (https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although this implementation of the membership change protocol differs somewhat from that described in chapter 4. The key invariant that membership changes happen one node at a time is preserved, but in our implementation the membership change takes effect when its entry is applied, not when it is added to the log (so the entry is committed under the old membership instead of the new). This is equivalent in terms of safety, since the old and new configurations are guaranteed to overlap.
+
+To ensure there is no attempt to commit two membership changes at once by matching log positions (which would be unsafe since they should have different quorum requirements), any proposed membership change is simply disallowed while any uncommitted change appears in the leader's log.
+
+This approach introduces a problem when removing a member from a two-member cluster: If one of the members dies before the other one receives the commit of the confchange entry, then the member cannot be removed any more since the cluster cannot make progress. For this reason it is highly recommended to use three or more nodes in every cluster.
diff --git a/vendor/github.com/coreos/etcd/raft/design.md b/vendor/github.com/coreos/etcd/raft/design.md
new file mode 100644
index 0000000..7bc0531
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/design.md
@@ -0,0 +1,57 @@
+## Progress
+
+Progress represents a follower’s progress in the view of the leader. Leader maintains progresses of all followers, and sends `replication message` to the follower based on its progress. 
+
+`replication message` is a `msgApp` with log entries.
+
+A progress has two attribute: `match` and `next`. `match` is the index of the highest known matched entry. If leader knows nothing about follower’s replication status, `match` is set to zero. `next` is the index of the first entry that will be replicated to the follower. Leader puts entries from `next` to its latest one in next `replication message`.
+
+A progress is in one of the three state: `probe`, `replicate`, `snapshot`. 
+
+```
+                            +--------------------------------------------------------+          
+                            |                  send snapshot                         |          
+                            |                                                        |          
+                  +---------+----------+                                  +----------v---------+
+              +--->       probe        |                                  |      snapshot      |
+              |   |  max inflight = 1  <----------------------------------+  max inflight = 0  |
+              |   +---------+----------+                                  +--------------------+
+              |             |            1. snapshot success                                    
+              |             |               (next=snapshot.index + 1)                           
+              |             |            2. snapshot failure                                    
+              |             |               (no change)                                         
+              |             |            3. receives msgAppResp(rej=false&&index>lastsnap.index)
+              |             |               (match=m.index,next=match+1)                        
+receives msgAppResp(rej=true)                                                                   
+(next=match+1)|             |                                                                   
+              |             |                                                                   
+              |             |                                                                   
+              |             |   receives msgAppResp(rej=false&&index>match)                     
+              |             |   (match=m.index,next=match+1)                                    
+              |             |                                                                   
+              |             |                                                                   
+              |             |                                                                   
+              |   +---------v----------+                                                        
+              |   |     replicate      |                                                        
+              +---+  max inflight = n  |                                                        
+                  +--------------------+                                                        
+```
+
+When the progress of a follower is in `probe` state, leader sends at most one `replication message` per heartbeat interval. The leader sends `replication message` slowly and probing the actual progress of the follower. A `msgHeartbeatResp` or a `msgAppResp` with reject might trigger the sending of the next `replication message`.
+
+When the progress of a follower is in `replicate` state, leader sends `replication message`, then optimistically increases `next` to the latest entry sent. This is an optimized state for fast replicating log entries to the follower.
+
+When the progress of a follower is in `snapshot` state, leader stops sending any `replication message`.
+
+A newly elected leader sets the progresses of all the followers to `probe` state with `match` = 0 and `next` = last index. The leader slowly (at most once per heartbeat) sends `replication message` to the follower and probes its progress.
+
+A progress changes to `replicate` when the follower replies with a non-rejection `msgAppResp`, which implies that it has matched the index sent. At this point, leader starts to stream log entries to the follower fast. The progress will fall back to `probe` when the follower replies a rejection `msgAppResp` or the link layer reports the follower is unreachable. We aggressively reset `next` to `match`+1 since if we receive any `msgAppResp` soon, both `match` and `next` will increase directly to the `index` in `msgAppResp`. (We might end up with sending some duplicate entries when aggressively reset `next` too low.  see open question)
+
+A progress changes from `probe` to `snapshot` when the follower falls very far behind and requires a snapshot. After sending `msgSnap`, the leader waits until the success, failure or abortion of the previous snapshot sent. The progress will go back to `probe` after the sending result is applied.
+
+### Flow Control
+
+1. limit the max size of message sent per message. Max should be configurable.
+Lower the cost at probing state as we limit the size per message; lower the penalty when aggressively decreased to a too low `next`
+
+2. limit the # of in flight messages < N when in `replicate` state. N should be configurable. Most implementation will have a sending buffer on top of its actual network transport layer (not blocking raft node). We want to make sure raft does not overflow that buffer, which can cause message dropping and triggering a bunch of unnecessary resending repeatedly. 
diff --git a/vendor/github.com/coreos/etcd/raft/doc.go b/vendor/github.com/coreos/etcd/raft/doc.go
new file mode 100644
index 0000000..b55c591
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/doc.go
@@ -0,0 +1,300 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package raft sends and receives messages in the Protocol Buffer format
+defined in the raftpb package.
+
+Raft is a protocol with which a cluster of nodes can maintain a replicated state machine.
+The state machine is kept in sync through the use of a replicated log.
+For more details on Raft, see "In Search of an Understandable Consensus Algorithm"
+(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout.
+
+A simple example application, _raftexample_, is also available to help illustrate
+how to use this package in practice:
+https://github.com/coreos/etcd/tree/master/contrib/raftexample
+
+Usage
+
+The primary object in raft is a Node. You either start a Node from scratch
+using raft.StartNode or start a Node from some initial state using raft.RestartNode.
+
+To start a node from scratch:
+
+  storage := raft.NewMemoryStorage()
+  c := &Config{
+    ID:              0x01,
+    ElectionTick:    10,
+    HeartbeatTick:   1,
+    Storage:         storage,
+    MaxSizePerMsg:   4096,
+    MaxInflightMsgs: 256,
+  }
+  n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}})
+
+To restart a node from previous state:
+
+  storage := raft.NewMemoryStorage()
+
+  // recover the in-memory storage from persistent
+  // snapshot, state and entries.
+  storage.ApplySnapshot(snapshot)
+  storage.SetHardState(state)
+  storage.Append(entries)
+
+  c := &Config{
+    ID:              0x01,
+    ElectionTick:    10,
+    HeartbeatTick:   1,
+    Storage:         storage,
+    MaxSizePerMsg:   4096,
+    MaxInflightMsgs: 256,
+  }
+
+  // restart raft without peer information.
+  // peer information is already included in the storage.
+  n := raft.RestartNode(c)
+
+Now that you are holding onto a Node you have a few responsibilities:
+
+First, you must read from the Node.Ready() channel and process the updates
+it contains. These steps may be performed in parallel, except as noted in step
+2.
+
+1. Write HardState, Entries, and Snapshot to persistent storage if they are
+not empty. Note that when writing an Entry with Index i, any
+previously-persisted entries with Index >= i must be discarded.
+
+2. Send all Messages to the nodes named in the To field. It is important that
+no messages be sent until the latest HardState has been persisted to disk,
+and all Entries written by any previous Ready batch (Messages may be sent while
+entries from the same batch are being persisted). To reduce the I/O latency, an
+optimization can be applied to make leader write to disk in parallel with its
+followers (as explained at section 10.2.1 in Raft thesis). If any Message has type
+MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be
+large).
+
+Note: Marshalling messages is not thread-safe; it is important that you
+make sure that no new entries are persisted while marshalling.
+The easiest way to achieve this is to serialise the messages directly inside
+your main raft loop.
+
+3. Apply Snapshot (if any) and CommittedEntries to the state machine.
+If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange()
+to apply it to the node. The configuration change may be cancelled at this point
+by setting the NodeID field to zero before calling ApplyConfChange
+(but ApplyConfChange must be called one way or the other, and the decision to cancel
+must be based solely on the state machine and not external information such as
+the observed health of the node).
+
+4. Call Node.Advance() to signal readiness for the next batch of updates.
+This may be done at any time after step 1, although all updates must be processed
+in the order they were returned by Ready.
+
+Second, all persisted log entries must be made available via an
+implementation of the Storage interface. The provided MemoryStorage
+type can be used for this (if you repopulate its state upon a
+restart), or you can supply your own disk-backed implementation.
+
+Third, when you receive a message from another node, pass it to Node.Step:
+
+	func recvRaftRPC(ctx context.Context, m raftpb.Message) {
+		n.Step(ctx, m)
+	}
+
+Finally, you need to call Node.Tick() at regular intervals (probably
+via a time.Ticker). Raft has two important timeouts: heartbeat and the
+election timeout. However, internally to the raft package time is
+represented by an abstract "tick".
+
+The total state machine handling loop will look something like this:
+
+  for {
+    select {
+    case <-s.Ticker:
+      n.Tick()
+    case rd := <-s.Node.Ready():
+      saveToStorage(rd.State, rd.Entries, rd.Snapshot)
+      send(rd.Messages)
+      if !raft.IsEmptySnap(rd.Snapshot) {
+        processSnapshot(rd.Snapshot)
+      }
+      for _, entry := range rd.CommittedEntries {
+        process(entry)
+        if entry.Type == raftpb.EntryConfChange {
+          var cc raftpb.ConfChange
+          cc.Unmarshal(entry.Data)
+          s.Node.ApplyConfChange(cc)
+        }
+      }
+      s.Node.Advance()
+    case <-s.done:
+      return
+    }
+  }
+
+To propose changes to the state machine from your node take your application
+data, serialize it into a byte slice and call:
+
+	n.Propose(ctx, data)
+
+If the proposal is committed, data will appear in committed entries with type
+raftpb.EntryNormal. There is no guarantee that a proposed command will be
+committed; you may have to re-propose after a timeout.
+
+To add or remove node in a cluster, build ConfChange struct 'cc' and call:
+
+	n.ProposeConfChange(ctx, cc)
+
+After config change is committed, some committed entry with type
+raftpb.EntryConfChange will be returned. You must apply it to node through:
+
+	var cc raftpb.ConfChange
+	cc.Unmarshal(data)
+	n.ApplyConfChange(cc)
+
+Note: An ID represents a unique node in a cluster for all time. A
+given ID MUST be used only once even if the old node has been removed.
+This means that for example IP addresses make poor node IDs since they
+may be reused. Node IDs must be non-zero.
+
+Implementation notes
+
+This implementation is up to date with the final Raft thesis
+(https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although our
+implementation of the membership change protocol differs somewhat from
+that described in chapter 4. The key invariant that membership changes
+happen one node at a time is preserved, but in our implementation the
+membership change takes effect when its entry is applied, not when it
+is added to the log (so the entry is committed under the old
+membership instead of the new). This is equivalent in terms of safety,
+since the old and new configurations are guaranteed to overlap.
+
+To ensure that we do not attempt to commit two membership changes at
+once by matching log positions (which would be unsafe since they
+should have different quorum requirements), we simply disallow any
+proposed membership change while any uncommitted change appears in
+the leader's log.
+
+This approach introduces a problem when you try to remove a member
+from a two-member cluster: If one of the members dies before the
+other one receives the commit of the confchange entry, then the member
+cannot be removed any more since the cluster cannot make progress.
+For this reason it is highly recommended to use three or more nodes in
+every cluster.
+
+MessageType
+
+Package raft sends and receives message in Protocol Buffer format (defined
+in raftpb package). Each state (follower, candidate, leader) implements its
+own 'step' method ('stepFollower', 'stepCandidate', 'stepLeader') when
+advancing with the given raftpb.Message. Each step is determined by its
+raftpb.MessageType. Note that every step is checked by one common method
+'Step' that safety-checks the terms of node and incoming message to prevent
+stale log entries:
+
+	'MsgHup' is used for election. If a node is a follower or candidate, the
+	'tick' function in 'raft' struct is set as 'tickElection'. If a follower or
+	candidate has not received any heartbeat before the election timeout, it
+	passes 'MsgHup' to its Step method and becomes (or remains) a candidate to
+	start a new election.
+
+	'MsgBeat' is an internal type that signals the leader to send a heartbeat of
+	the 'MsgHeartbeat' type. If a node is a leader, the 'tick' function in
+	the 'raft' struct is set as 'tickHeartbeat', and triggers the leader to
+	send periodic 'MsgHeartbeat' messages to its followers.
+
+	'MsgProp' proposes to append data to its log entries. This is a special
+	type to redirect proposals to leader. Therefore, send method overwrites
+	raftpb.Message's term with its HardState's term to avoid attaching its
+	local term to 'MsgProp'. When 'MsgProp' is passed to the leader's 'Step'
+	method, the leader first calls the 'appendEntry' method to append entries
+	to its log, and then calls 'bcastAppend' method to send those entries to
+	its peers. When passed to candidate, 'MsgProp' is dropped. When passed to
+	follower, 'MsgProp' is stored in follower's mailbox(msgs) by the send
+	method. It is stored with sender's ID and later forwarded to leader by
+	rafthttp package.
+
+	'MsgApp' contains log entries to replicate. A leader calls bcastAppend,
+	which calls sendAppend, which sends soon-to-be-replicated logs in 'MsgApp'
+	type. When 'MsgApp' is passed to candidate's Step method, candidate reverts
+	back to follower, because it indicates that there is a valid leader sending
+	'MsgApp' messages. Candidate and follower respond to this message in
+	'MsgAppResp' type.
+
+	'MsgAppResp' is response to log replication request('MsgApp'). When
+	'MsgApp' is passed to candidate or follower's Step method, it responds by
+	calling 'handleAppendEntries' method, which sends 'MsgAppResp' to raft
+	mailbox.
+
+	'MsgVote' requests votes for election. When a node is a follower or
+	candidate and 'MsgHup' is passed to its Step method, then the node calls
+	'campaign' method to campaign itself to become a leader. Once 'campaign'
+	method is called, the node becomes candidate and sends 'MsgVote' to peers
+	in cluster to request votes. When passed to leader or candidate's Step
+	method and the message's Term is lower than leader's or candidate's,
+	'MsgVote' will be rejected ('MsgVoteResp' is returned with Reject true).
+	If leader or candidate receives 'MsgVote' with higher term, it will revert
+	back to follower. When 'MsgVote' is passed to follower, it votes for the
+	sender only when sender's last term is greater than MsgVote's term or
+	sender's last term is equal to MsgVote's term but sender's last committed
+	index is greater than or equal to follower's.
+
+	'MsgVoteResp' contains responses from voting request. When 'MsgVoteResp' is
+	passed to candidate, the candidate calculates how many votes it has won. If
+	it's more than majority (quorum), it becomes leader and calls 'bcastAppend'.
+	If candidate receives majority of votes of denials, it reverts back to
+	follower.
+
+	'MsgPreVote' and 'MsgPreVoteResp' are used in an optional two-phase election
+	protocol. When Config.PreVote is true, a pre-election is carried out first
+	(using the same rules as a regular election), and no node increases its term
+	number unless the pre-election indicates that the campaigining node would win.
+	This minimizes disruption when a partitioned node rejoins the cluster.
+
+	'MsgSnap' requests to install a snapshot message. When a node has just
+	become a leader or the leader receives 'MsgProp' message, it calls
+	'bcastAppend' method, which then calls 'sendAppend' method to each
+	follower. In 'sendAppend', if a leader fails to get term or entries,
+	the leader requests snapshot by sending 'MsgSnap' type message.
+
+	'MsgSnapStatus' tells the result of snapshot install message. When a
+	follower rejected 'MsgSnap', it indicates the snapshot request with
+	'MsgSnap' had failed from network issues which causes the network layer
+	to fail to send out snapshots to its followers. Then leader considers
+	follower's progress as probe. When 'MsgSnap' were not rejected, it
+	indicates that the snapshot succeeded and the leader sets follower's
+	progress to probe and resumes its log replication.
+
+	'MsgHeartbeat' sends heartbeat from leader. When 'MsgHeartbeat' is passed
+	to candidate and message's term is higher than candidate's, the candidate
+	reverts back to follower and updates its committed index from the one in
+	this heartbeat. And it sends the message to its mailbox. When
+	'MsgHeartbeat' is passed to follower's Step method and message's term is
+	higher than follower's, the follower updates its leaderID with the ID
+	from the message.
+
+	'MsgHeartbeatResp' is a response to 'MsgHeartbeat'. When 'MsgHeartbeatResp'
+	is passed to leader's Step method, the leader knows which follower
+	responded. And only when the leader's last committed index is greater than
+	follower's Match index, the leader runs 'sendAppend` method.
+
+	'MsgUnreachable' tells that request(message) wasn't delivered. When
+	'MsgUnreachable' is passed to leader's Step method, the leader discovers
+	that the follower that sent this 'MsgUnreachable' is not reachable, often
+	indicating 'MsgApp' is lost. When follower's progress state is replicate,
+	the leader sets it back to probe.
+
+*/
+package raft
diff --git a/vendor/github.com/coreos/etcd/raft/log.go b/vendor/github.com/coreos/etcd/raft/log.go
new file mode 100644
index 0000000..c3036d3
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/log.go
@@ -0,0 +1,358 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"fmt"
+	"log"
+
+	pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+type raftLog struct {
+	// storage contains all stable entries since the last snapshot.
+	storage Storage
+
+	// unstable contains all unstable entries and snapshot.
+	// they will be saved into storage.
+	unstable unstable
+
+	// committed is the highest log position that is known to be in
+	// stable storage on a quorum of nodes.
+	committed uint64
+	// applied is the highest log position that the application has
+	// been instructed to apply to its state machine.
+	// Invariant: applied <= committed
+	applied uint64
+
+	logger Logger
+}
+
+// newLog returns log using the given storage. It recovers the log to the state
+// that it just commits and applies the latest snapshot.
+func newLog(storage Storage, logger Logger) *raftLog {
+	if storage == nil {
+		log.Panic("storage must not be nil")
+	}
+	log := &raftLog{
+		storage: storage,
+		logger:  logger,
+	}
+	firstIndex, err := storage.FirstIndex()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+	lastIndex, err := storage.LastIndex()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+	log.unstable.offset = lastIndex + 1
+	log.unstable.logger = logger
+	// Initialize our committed and applied pointers to the time of the last compaction.
+	log.committed = firstIndex - 1
+	log.applied = firstIndex - 1
+
+	return log
+}
+
+func (l *raftLog) String() string {
+	return fmt.Sprintf("committed=%d, applied=%d, unstable.offset=%d, len(unstable.Entries)=%d", l.committed, l.applied, l.unstable.offset, len(l.unstable.entries))
+}
+
+// maybeAppend returns (0, false) if the entries cannot be appended. Otherwise,
+// it returns (last index of new entries, true).
+func (l *raftLog) maybeAppend(index, logTerm, committed uint64, ents ...pb.Entry) (lastnewi uint64, ok bool) {
+	if l.matchTerm(index, logTerm) {
+		lastnewi = index + uint64(len(ents))
+		ci := l.findConflict(ents)
+		switch {
+		case ci == 0:
+		case ci <= l.committed:
+			l.logger.Panicf("entry %d conflict with committed entry [committed(%d)]", ci, l.committed)
+		default:
+			offset := index + 1
+			l.append(ents[ci-offset:]...)
+		}
+		l.commitTo(min(committed, lastnewi))
+		return lastnewi, true
+	}
+	return 0, false
+}
+
+func (l *raftLog) append(ents ...pb.Entry) uint64 {
+	if len(ents) == 0 {
+		return l.lastIndex()
+	}
+	if after := ents[0].Index - 1; after < l.committed {
+		l.logger.Panicf("after(%d) is out of range [committed(%d)]", after, l.committed)
+	}
+	l.unstable.truncateAndAppend(ents)
+	return l.lastIndex()
+}
+
+// findConflict finds the index of the conflict.
+// It returns the first pair of conflicting entries between the existing
+// entries and the given entries, if there are any.
+// If there is no conflicting entries, and the existing entries contains
+// all the given entries, zero will be returned.
+// If there is no conflicting entries, but the given entries contains new
+// entries, the index of the first new entry will be returned.
+// An entry is considered to be conflicting if it has the same index but
+// a different term.
+// The first entry MUST have an index equal to the argument 'from'.
+// The index of the given entries MUST be continuously increasing.
+func (l *raftLog) findConflict(ents []pb.Entry) uint64 {
+	for _, ne := range ents {
+		if !l.matchTerm(ne.Index, ne.Term) {
+			if ne.Index <= l.lastIndex() {
+				l.logger.Infof("found conflict at index %d [existing term: %d, conflicting term: %d]",
+					ne.Index, l.zeroTermOnErrCompacted(l.term(ne.Index)), ne.Term)
+			}
+			return ne.Index
+		}
+	}
+	return 0
+}
+
+func (l *raftLog) unstableEntries() []pb.Entry {
+	if len(l.unstable.entries) == 0 {
+		return nil
+	}
+	return l.unstable.entries
+}
+
+// nextEnts returns all the available entries for execution.
+// If applied is smaller than the index of snapshot, it returns all committed
+// entries after the index of snapshot.
+func (l *raftLog) nextEnts() (ents []pb.Entry) {
+	off := max(l.applied+1, l.firstIndex())
+	if l.committed+1 > off {
+		ents, err := l.slice(off, l.committed+1, noLimit)
+		if err != nil {
+			l.logger.Panicf("unexpected error when getting unapplied entries (%v)", err)
+		}
+		return ents
+	}
+	return nil
+}
+
+// hasNextEnts returns if there is any available entries for execution. This
+// is a fast check without heavy raftLog.slice() in raftLog.nextEnts().
+func (l *raftLog) hasNextEnts() bool {
+	off := max(l.applied+1, l.firstIndex())
+	return l.committed+1 > off
+}
+
+func (l *raftLog) snapshot() (pb.Snapshot, error) {
+	if l.unstable.snapshot != nil {
+		return *l.unstable.snapshot, nil
+	}
+	return l.storage.Snapshot()
+}
+
+func (l *raftLog) firstIndex() uint64 {
+	if i, ok := l.unstable.maybeFirstIndex(); ok {
+		return i
+	}
+	index, err := l.storage.FirstIndex()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+	return index
+}
+
+func (l *raftLog) lastIndex() uint64 {
+	if i, ok := l.unstable.maybeLastIndex(); ok {
+		return i
+	}
+	i, err := l.storage.LastIndex()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+	return i
+}
+
+func (l *raftLog) commitTo(tocommit uint64) {
+	// never decrease commit
+	if l.committed < tocommit {
+		if l.lastIndex() < tocommit {
+			l.logger.Panicf("tocommit(%d) is out of range [lastIndex(%d)]. Was the raft log corrupted, truncated, or lost?", tocommit, l.lastIndex())
+		}
+		l.committed = tocommit
+	}
+}
+
+func (l *raftLog) appliedTo(i uint64) {
+	if i == 0 {
+		return
+	}
+	if l.committed < i || i < l.applied {
+		l.logger.Panicf("applied(%d) is out of range [prevApplied(%d), committed(%d)]", i, l.applied, l.committed)
+	}
+	l.applied = i
+}
+
+func (l *raftLog) stableTo(i, t uint64) { l.unstable.stableTo(i, t) }
+
+func (l *raftLog) stableSnapTo(i uint64) { l.unstable.stableSnapTo(i) }
+
+func (l *raftLog) lastTerm() uint64 {
+	t, err := l.term(l.lastIndex())
+	if err != nil {
+		l.logger.Panicf("unexpected error when getting the last term (%v)", err)
+	}
+	return t
+}
+
+func (l *raftLog) term(i uint64) (uint64, error) {
+	// the valid term range is [index of dummy entry, last index]
+	dummyIndex := l.firstIndex() - 1
+	if i < dummyIndex || i > l.lastIndex() {
+		// TODO: return an error instead?
+		return 0, nil
+	}
+
+	if t, ok := l.unstable.maybeTerm(i); ok {
+		return t, nil
+	}
+
+	t, err := l.storage.Term(i)
+	if err == nil {
+		return t, nil
+	}
+	if err == ErrCompacted || err == ErrUnavailable {
+		return 0, err
+	}
+	panic(err) // TODO(bdarnell)
+}
+
+func (l *raftLog) entries(i, maxsize uint64) ([]pb.Entry, error) {
+	if i > l.lastIndex() {
+		return nil, nil
+	}
+	return l.slice(i, l.lastIndex()+1, maxsize)
+}
+
+// allEntries returns all entries in the log.
+func (l *raftLog) allEntries() []pb.Entry {
+	ents, err := l.entries(l.firstIndex(), noLimit)
+	if err == nil {
+		return ents
+	}
+	if err == ErrCompacted { // try again if there was a racing compaction
+		return l.allEntries()
+	}
+	// TODO (xiangli): handle error?
+	panic(err)
+}
+
+// isUpToDate determines if the given (lastIndex,term) log is more up-to-date
+// by comparing the index and term of the last entries in the existing logs.
+// If the logs have last entries with different terms, then the log with the
+// later term is more up-to-date. If the logs end with the same term, then
+// whichever log has the larger lastIndex is more up-to-date. If the logs are
+// the same, the given log is up-to-date.
+func (l *raftLog) isUpToDate(lasti, term uint64) bool {
+	return term > l.lastTerm() || (term == l.lastTerm() && lasti >= l.lastIndex())
+}
+
+func (l *raftLog) matchTerm(i, term uint64) bool {
+	t, err := l.term(i)
+	if err != nil {
+		return false
+	}
+	return t == term
+}
+
+func (l *raftLog) maybeCommit(maxIndex, term uint64) bool {
+	if maxIndex > l.committed && l.zeroTermOnErrCompacted(l.term(maxIndex)) == term {
+		l.commitTo(maxIndex)
+		return true
+	}
+	return false
+}
+
+func (l *raftLog) restore(s pb.Snapshot) {
+	l.logger.Infof("log [%s] starts to restore snapshot [index: %d, term: %d]", l, s.Metadata.Index, s.Metadata.Term)
+	l.committed = s.Metadata.Index
+	l.unstable.restore(s)
+}
+
+// slice returns a slice of log entries from lo through hi-1, inclusive.
+func (l *raftLog) slice(lo, hi, maxSize uint64) ([]pb.Entry, error) {
+	err := l.mustCheckOutOfBounds(lo, hi)
+	if err != nil {
+		return nil, err
+	}
+	if lo == hi {
+		return nil, nil
+	}
+	var ents []pb.Entry
+	if lo < l.unstable.offset {
+		storedEnts, err := l.storage.Entries(lo, min(hi, l.unstable.offset), maxSize)
+		if err == ErrCompacted {
+			return nil, err
+		} else if err == ErrUnavailable {
+			l.logger.Panicf("entries[%d:%d) is unavailable from storage", lo, min(hi, l.unstable.offset))
+		} else if err != nil {
+			panic(err) // TODO(bdarnell)
+		}
+
+		// check if ents has reached the size limitation
+		if uint64(len(storedEnts)) < min(hi, l.unstable.offset)-lo {
+			return storedEnts, nil
+		}
+
+		ents = storedEnts
+	}
+	if hi > l.unstable.offset {
+		unstable := l.unstable.slice(max(lo, l.unstable.offset), hi)
+		if len(ents) > 0 {
+			ents = append([]pb.Entry{}, ents...)
+			ents = append(ents, unstable...)
+		} else {
+			ents = unstable
+		}
+	}
+	return limitSize(ents, maxSize), nil
+}
+
+// l.firstIndex <= lo <= hi <= l.firstIndex + len(l.entries)
+func (l *raftLog) mustCheckOutOfBounds(lo, hi uint64) error {
+	if lo > hi {
+		l.logger.Panicf("invalid slice %d > %d", lo, hi)
+	}
+	fi := l.firstIndex()
+	if lo < fi {
+		return ErrCompacted
+	}
+
+	length := l.lastIndex() + 1 - fi
+	if lo < fi || hi > fi+length {
+		l.logger.Panicf("slice[%d,%d) out of bound [%d,%d]", lo, hi, fi, l.lastIndex())
+	}
+	return nil
+}
+
+func (l *raftLog) zeroTermOnErrCompacted(t uint64, err error) uint64 {
+	if err == nil {
+		return t
+	}
+	if err == ErrCompacted {
+		return 0
+	}
+	l.logger.Panicf("unexpected error (%v)", err)
+	return 0
+}
diff --git a/vendor/github.com/coreos/etcd/raft/log_unstable.go b/vendor/github.com/coreos/etcd/raft/log_unstable.go
new file mode 100644
index 0000000..263af9c
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/log_unstable.go
@@ -0,0 +1,159 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import pb "github.com/coreos/etcd/raft/raftpb"
+
+// unstable.entries[i] has raft log position i+unstable.offset.
+// Note that unstable.offset may be less than the highest log
+// position in storage; this means that the next write to storage
+// might need to truncate the log before persisting unstable.entries.
+type unstable struct {
+	// the incoming unstable snapshot, if any.
+	snapshot *pb.Snapshot
+	// all entries that have not yet been written to storage.
+	entries []pb.Entry
+	offset  uint64
+
+	logger Logger
+}
+
+// maybeFirstIndex returns the index of the first possible entry in entries
+// if it has a snapshot.
+func (u *unstable) maybeFirstIndex() (uint64, bool) {
+	if u.snapshot != nil {
+		return u.snapshot.Metadata.Index + 1, true
+	}
+	return 0, false
+}
+
+// maybeLastIndex returns the last index if it has at least one
+// unstable entry or snapshot.
+func (u *unstable) maybeLastIndex() (uint64, bool) {
+	if l := len(u.entries); l != 0 {
+		return u.offset + uint64(l) - 1, true
+	}
+	if u.snapshot != nil {
+		return u.snapshot.Metadata.Index, true
+	}
+	return 0, false
+}
+
+// maybeTerm returns the term of the entry at index i, if there
+// is any.
+func (u *unstable) maybeTerm(i uint64) (uint64, bool) {
+	if i < u.offset {
+		if u.snapshot == nil {
+			return 0, false
+		}
+		if u.snapshot.Metadata.Index == i {
+			return u.snapshot.Metadata.Term, true
+		}
+		return 0, false
+	}
+
+	last, ok := u.maybeLastIndex()
+	if !ok {
+		return 0, false
+	}
+	if i > last {
+		return 0, false
+	}
+	return u.entries[i-u.offset].Term, true
+}
+
+func (u *unstable) stableTo(i, t uint64) {
+	gt, ok := u.maybeTerm(i)
+	if !ok {
+		return
+	}
+	// if i < offset, term is matched with the snapshot
+	// only update the unstable entries if term is matched with
+	// an unstable entry.
+	if gt == t && i >= u.offset {
+		u.entries = u.entries[i+1-u.offset:]
+		u.offset = i + 1
+		u.shrinkEntriesArray()
+	}
+}
+
+// shrinkEntriesArray discards the underlying array used by the entries slice
+// if most of it isn't being used. This avoids holding references to a bunch of
+// potentially large entries that aren't needed anymore. Simply clearing the
+// entries wouldn't be safe because clients might still be using them.
+func (u *unstable) shrinkEntriesArray() {
+	// We replace the array if we're using less than half of the space in
+	// it. This number is fairly arbitrary, chosen as an attempt to balance
+	// memory usage vs number of allocations. It could probably be improved
+	// with some focused tuning.
+	const lenMultiple = 2
+	if len(u.entries) == 0 {
+		u.entries = nil
+	} else if len(u.entries)*lenMultiple < cap(u.entries) {
+		newEntries := make([]pb.Entry, len(u.entries))
+		copy(newEntries, u.entries)
+		u.entries = newEntries
+	}
+}
+
+func (u *unstable) stableSnapTo(i uint64) {
+	if u.snapshot != nil && u.snapshot.Metadata.Index == i {
+		u.snapshot = nil
+	}
+}
+
+func (u *unstable) restore(s pb.Snapshot) {
+	u.offset = s.Metadata.Index + 1
+	u.entries = nil
+	u.snapshot = &s
+}
+
+func (u *unstable) truncateAndAppend(ents []pb.Entry) {
+	after := ents[0].Index
+	switch {
+	case after == u.offset+uint64(len(u.entries)):
+		// after is the next index in the u.entries
+		// directly append
+		u.entries = append(u.entries, ents...)
+	case after <= u.offset:
+		u.logger.Infof("replace the unstable entries from index %d", after)
+		// The log is being truncated to before our current offset
+		// portion, so set the offset and replace the entries
+		u.offset = after
+		u.entries = ents
+	default:
+		// truncate to after and copy to u.entries
+		// then append
+		u.logger.Infof("truncate the unstable entries before index %d", after)
+		u.entries = append([]pb.Entry{}, u.slice(u.offset, after)...)
+		u.entries = append(u.entries, ents...)
+	}
+}
+
+func (u *unstable) slice(lo uint64, hi uint64) []pb.Entry {
+	u.mustCheckOutOfBounds(lo, hi)
+	return u.entries[lo-u.offset : hi-u.offset]
+}
+
+// u.offset <= lo <= hi <= u.offset+len(u.offset)
+func (u *unstable) mustCheckOutOfBounds(lo, hi uint64) {
+	if lo > hi {
+		u.logger.Panicf("invalid unstable.slice %d > %d", lo, hi)
+	}
+	upper := u.offset + uint64(len(u.entries))
+	if lo < u.offset || hi > upper {
+		u.logger.Panicf("unstable.slice[%d,%d) out of bound [%d,%d]", lo, hi, u.offset, upper)
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/raft/logger.go b/vendor/github.com/coreos/etcd/raft/logger.go
new file mode 100644
index 0000000..426a77d
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/logger.go
@@ -0,0 +1,126 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"fmt"
+	"io/ioutil"
+	"log"
+	"os"
+)
+
+type Logger interface {
+	Debug(v ...interface{})
+	Debugf(format string, v ...interface{})
+
+	Error(v ...interface{})
+	Errorf(format string, v ...interface{})
+
+	Info(v ...interface{})
+	Infof(format string, v ...interface{})
+
+	Warning(v ...interface{})
+	Warningf(format string, v ...interface{})
+
+	Fatal(v ...interface{})
+	Fatalf(format string, v ...interface{})
+
+	Panic(v ...interface{})
+	Panicf(format string, v ...interface{})
+}
+
+func SetLogger(l Logger) { raftLogger = l }
+
+var (
+	defaultLogger = &DefaultLogger{Logger: log.New(os.Stderr, "raft", log.LstdFlags)}
+	discardLogger = &DefaultLogger{Logger: log.New(ioutil.Discard, "", 0)}
+	raftLogger    = Logger(defaultLogger)
+)
+
+const (
+	calldepth = 2
+)
+
+// DefaultLogger is a default implementation of the Logger interface.
+type DefaultLogger struct {
+	*log.Logger
+	debug bool
+}
+
+func (l *DefaultLogger) EnableTimestamps() {
+	l.SetFlags(l.Flags() | log.Ldate | log.Ltime)
+}
+
+func (l *DefaultLogger) EnableDebug() {
+	l.debug = true
+}
+
+func (l *DefaultLogger) Debug(v ...interface{}) {
+	if l.debug {
+		l.Output(calldepth, header("DEBUG", fmt.Sprint(v...)))
+	}
+}
+
+func (l *DefaultLogger) Debugf(format string, v ...interface{}) {
+	if l.debug {
+		l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...)))
+	}
+}
+
+func (l *DefaultLogger) Info(v ...interface{}) {
+	l.Output(calldepth, header("INFO", fmt.Sprint(v...)))
+}
+
+func (l *DefaultLogger) Infof(format string, v ...interface{}) {
+	l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...)))
+}
+
+func (l *DefaultLogger) Error(v ...interface{}) {
+	l.Output(calldepth, header("ERROR", fmt.Sprint(v...)))
+}
+
+func (l *DefaultLogger) Errorf(format string, v ...interface{}) {
+	l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...)))
+}
+
+func (l *DefaultLogger) Warning(v ...interface{}) {
+	l.Output(calldepth, header("WARN", fmt.Sprint(v...)))
+}
+
+func (l *DefaultLogger) Warningf(format string, v ...interface{}) {
+	l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...)))
+}
+
+func (l *DefaultLogger) Fatal(v ...interface{}) {
+	l.Output(calldepth, header("FATAL", fmt.Sprint(v...)))
+	os.Exit(1)
+}
+
+func (l *DefaultLogger) Fatalf(format string, v ...interface{}) {
+	l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...)))
+	os.Exit(1)
+}
+
+func (l *DefaultLogger) Panic(v ...interface{}) {
+	l.Logger.Panic(v...)
+}
+
+func (l *DefaultLogger) Panicf(format string, v ...interface{}) {
+	l.Logger.Panicf(format, v...)
+}
+
+func header(lvl, msg string) string {
+	return fmt.Sprintf("%s: %s", lvl, msg)
+}
diff --git a/vendor/github.com/coreos/etcd/raft/node.go b/vendor/github.com/coreos/etcd/raft/node.go
new file mode 100644
index 0000000..33a9db8
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/node.go
@@ -0,0 +1,539 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"context"
+	"errors"
+
+	pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+type SnapshotStatus int
+
+const (
+	SnapshotFinish  SnapshotStatus = 1
+	SnapshotFailure SnapshotStatus = 2
+)
+
+var (
+	emptyState = pb.HardState{}
+
+	// ErrStopped is returned by methods on Nodes that have been stopped.
+	ErrStopped = errors.New("raft: stopped")
+)
+
+// SoftState provides state that is useful for logging and debugging.
+// The state is volatile and does not need to be persisted to the WAL.
+type SoftState struct {
+	Lead      uint64 // must use atomic operations to access; keep 64-bit aligned.
+	RaftState StateType
+}
+
+func (a *SoftState) equal(b *SoftState) bool {
+	return a.Lead == b.Lead && a.RaftState == b.RaftState
+}
+
+// Ready encapsulates the entries and messages that are ready to read,
+// be saved to stable storage, committed or sent to other peers.
+// All fields in Ready are read-only.
+type Ready struct {
+	// The current volatile state of a Node.
+	// SoftState will be nil if there is no update.
+	// It is not required to consume or store SoftState.
+	*SoftState
+
+	// The current state of a Node to be saved to stable storage BEFORE
+	// Messages are sent.
+	// HardState will be equal to empty state if there is no update.
+	pb.HardState
+
+	// ReadStates can be used for node to serve linearizable read requests locally
+	// when its applied index is greater than the index in ReadState.
+	// Note that the readState will be returned when raft receives msgReadIndex.
+	// The returned is only valid for the request that requested to read.
+	ReadStates []ReadState
+
+	// Entries specifies entries to be saved to stable storage BEFORE
+	// Messages are sent.
+	Entries []pb.Entry
+
+	// Snapshot specifies the snapshot to be saved to stable storage.
+	Snapshot pb.Snapshot
+
+	// CommittedEntries specifies entries to be committed to a
+	// store/state-machine. These have previously been committed to stable
+	// store.
+	CommittedEntries []pb.Entry
+
+	// Messages specifies outbound messages to be sent AFTER Entries are
+	// committed to stable storage.
+	// If it contains a MsgSnap message, the application MUST report back to raft
+	// when the snapshot has been received or has failed by calling ReportSnapshot.
+	Messages []pb.Message
+
+	// MustSync indicates whether the HardState and Entries must be synchronously
+	// written to disk or if an asynchronous write is permissible.
+	MustSync bool
+}
+
+func isHardStateEqual(a, b pb.HardState) bool {
+	return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit
+}
+
+// IsEmptyHardState returns true if the given HardState is empty.
+func IsEmptyHardState(st pb.HardState) bool {
+	return isHardStateEqual(st, emptyState)
+}
+
+// IsEmptySnap returns true if the given Snapshot is empty.
+func IsEmptySnap(sp pb.Snapshot) bool {
+	return sp.Metadata.Index == 0
+}
+
+func (rd Ready) containsUpdates() bool {
+	return rd.SoftState != nil || !IsEmptyHardState(rd.HardState) ||
+		!IsEmptySnap(rd.Snapshot) || len(rd.Entries) > 0 ||
+		len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0 || len(rd.ReadStates) != 0
+}
+
+// Node represents a node in a raft cluster.
+type Node interface {
+	// Tick increments the internal logical clock for the Node by a single tick. Election
+	// timeouts and heartbeat timeouts are in units of ticks.
+	Tick()
+	// Campaign causes the Node to transition to candidate state and start campaigning to become leader.
+	Campaign(ctx context.Context) error
+	// Propose proposes that data be appended to the log.
+	Propose(ctx context.Context, data []byte) error
+	// ProposeConfChange proposes config change.
+	// At most one ConfChange can be in the process of going through consensus.
+	// Application needs to call ApplyConfChange when applying EntryConfChange type entry.
+	ProposeConfChange(ctx context.Context, cc pb.ConfChange) error
+	// Step advances the state machine using the given message. ctx.Err() will be returned, if any.
+	Step(ctx context.Context, msg pb.Message) error
+
+	// Ready returns a channel that returns the current point-in-time state.
+	// Users of the Node must call Advance after retrieving the state returned by Ready.
+	//
+	// NOTE: No committed entries from the next Ready may be applied until all committed entries
+	// and snapshots from the previous one have finished.
+	Ready() <-chan Ready
+
+	// Advance notifies the Node that the application has saved progress up to the last Ready.
+	// It prepares the node to return the next available Ready.
+	//
+	// The application should generally call Advance after it applies the entries in last Ready.
+	//
+	// However, as an optimization, the application may call Advance while it is applying the
+	// commands. For example. when the last Ready contains a snapshot, the application might take
+	// a long time to apply the snapshot data. To continue receiving Ready without blocking raft
+	// progress, it can call Advance before finishing applying the last ready.
+	Advance()
+	// ApplyConfChange applies config change to the local node.
+	// Returns an opaque ConfState protobuf which must be recorded
+	// in snapshots. Will never return nil; it returns a pointer only
+	// to match MemoryStorage.Compact.
+	ApplyConfChange(cc pb.ConfChange) *pb.ConfState
+
+	// TransferLeadership attempts to transfer leadership to the given transferee.
+	TransferLeadership(ctx context.Context, lead, transferee uint64)
+
+	// ReadIndex request a read state. The read state will be set in the ready.
+	// Read state has a read index. Once the application advances further than the read
+	// index, any linearizable read requests issued before the read request can be
+	// processed safely. The read state will have the same rctx attached.
+	ReadIndex(ctx context.Context, rctx []byte) error
+
+	// Status returns the current status of the raft state machine.
+	Status() Status
+	// ReportUnreachable reports the given node is not reachable for the last send.
+	ReportUnreachable(id uint64)
+	// ReportSnapshot reports the status of the sent snapshot.
+	ReportSnapshot(id uint64, status SnapshotStatus)
+	// Stop performs any necessary termination of the Node.
+	Stop()
+}
+
+type Peer struct {
+	ID      uint64
+	Context []byte
+}
+
+// StartNode returns a new Node given configuration and a list of raft peers.
+// It appends a ConfChangeAddNode entry for each given peer to the initial log.
+func StartNode(c *Config, peers []Peer) Node {
+	r := newRaft(c)
+	// become the follower at term 1 and apply initial configuration
+	// entries of term 1
+	r.becomeFollower(1, None)
+	for _, peer := range peers {
+		cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context}
+		d, err := cc.Marshal()
+		if err != nil {
+			panic("unexpected marshal error")
+		}
+		e := pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: r.raftLog.lastIndex() + 1, Data: d}
+		r.raftLog.append(e)
+	}
+	// Mark these initial entries as committed.
+	// TODO(bdarnell): These entries are still unstable; do we need to preserve
+	// the invariant that committed < unstable?
+	r.raftLog.committed = r.raftLog.lastIndex()
+	// Now apply them, mainly so that the application can call Campaign
+	// immediately after StartNode in tests. Note that these nodes will
+	// be added to raft twice: here and when the application's Ready
+	// loop calls ApplyConfChange. The calls to addNode must come after
+	// all calls to raftLog.append so progress.next is set after these
+	// bootstrapping entries (it is an error if we try to append these
+	// entries since they have already been committed).
+	// We do not set raftLog.applied so the application will be able
+	// to observe all conf changes via Ready.CommittedEntries.
+	for _, peer := range peers {
+		r.addNode(peer.ID)
+	}
+
+	n := newNode()
+	n.logger = c.Logger
+	go n.run(r)
+	return &n
+}
+
+// RestartNode is similar to StartNode but does not take a list of peers.
+// The current membership of the cluster will be restored from the Storage.
+// If the caller has an existing state machine, pass in the last log index that
+// has been applied to it; otherwise use zero.
+func RestartNode(c *Config) Node {
+	r := newRaft(c)
+
+	n := newNode()
+	n.logger = c.Logger
+	go n.run(r)
+	return &n
+}
+
+// node is the canonical implementation of the Node interface
+type node struct {
+	propc      chan pb.Message
+	recvc      chan pb.Message
+	confc      chan pb.ConfChange
+	confstatec chan pb.ConfState
+	readyc     chan Ready
+	advancec   chan struct{}
+	tickc      chan struct{}
+	done       chan struct{}
+	stop       chan struct{}
+	status     chan chan Status
+
+	logger Logger
+}
+
+func newNode() node {
+	return node{
+		propc:      make(chan pb.Message),
+		recvc:      make(chan pb.Message),
+		confc:      make(chan pb.ConfChange),
+		confstatec: make(chan pb.ConfState),
+		readyc:     make(chan Ready),
+		advancec:   make(chan struct{}),
+		// make tickc a buffered chan, so raft node can buffer some ticks when the node
+		// is busy processing raft messages. Raft node will resume process buffered
+		// ticks when it becomes idle.
+		tickc:  make(chan struct{}, 128),
+		done:   make(chan struct{}),
+		stop:   make(chan struct{}),
+		status: make(chan chan Status),
+	}
+}
+
+func (n *node) Stop() {
+	select {
+	case n.stop <- struct{}{}:
+		// Not already stopped, so trigger it
+	case <-n.done:
+		// Node has already been stopped - no need to do anything
+		return
+	}
+	// Block until the stop has been acknowledged by run()
+	<-n.done
+}
+
+func (n *node) run(r *raft) {
+	var propc chan pb.Message
+	var readyc chan Ready
+	var advancec chan struct{}
+	var prevLastUnstablei, prevLastUnstablet uint64
+	var havePrevLastUnstablei bool
+	var prevSnapi uint64
+	var rd Ready
+
+	lead := None
+	prevSoftSt := r.softState()
+	prevHardSt := emptyState
+
+	for {
+		if advancec != nil {
+			readyc = nil
+		} else {
+			rd = newReady(r, prevSoftSt, prevHardSt)
+			if rd.containsUpdates() {
+				readyc = n.readyc
+			} else {
+				readyc = nil
+			}
+		}
+
+		if lead != r.lead {
+			if r.hasLeader() {
+				if lead == None {
+					r.logger.Infof("raft.node: %x elected leader %x at term %d", r.id, r.lead, r.Term)
+				} else {
+					r.logger.Infof("raft.node: %x changed leader from %x to %x at term %d", r.id, lead, r.lead, r.Term)
+				}
+				propc = n.propc
+			} else {
+				r.logger.Infof("raft.node: %x lost leader %x at term %d", r.id, lead, r.Term)
+				propc = nil
+			}
+			lead = r.lead
+		}
+
+		select {
+		// TODO: maybe buffer the config propose if there exists one (the way
+		// described in raft dissertation)
+		// Currently it is dropped in Step silently.
+		case m := <-propc:
+			m.From = r.id
+			r.Step(m)
+		case m := <-n.recvc:
+			// filter out response message from unknown From.
+			if pr := r.getProgress(m.From); pr != nil || !IsResponseMsg(m.Type) {
+				r.Step(m) // raft never returns an error
+			}
+		case cc := <-n.confc:
+			if cc.NodeID == None {
+				r.resetPendingConf()
+				select {
+				case n.confstatec <- pb.ConfState{Nodes: r.nodes()}:
+				case <-n.done:
+				}
+				break
+			}
+			switch cc.Type {
+			case pb.ConfChangeAddNode:
+				r.addNode(cc.NodeID)
+			case pb.ConfChangeAddLearnerNode:
+				r.addLearner(cc.NodeID)
+			case pb.ConfChangeRemoveNode:
+				// block incoming proposal when local node is
+				// removed
+				if cc.NodeID == r.id {
+					propc = nil
+				}
+				r.removeNode(cc.NodeID)
+			case pb.ConfChangeUpdateNode:
+				r.resetPendingConf()
+			default:
+				panic("unexpected conf type")
+			}
+			select {
+			case n.confstatec <- pb.ConfState{Nodes: r.nodes()}:
+			case <-n.done:
+			}
+		case <-n.tickc:
+			r.tick()
+		case readyc <- rd:
+			if rd.SoftState != nil {
+				prevSoftSt = rd.SoftState
+			}
+			if len(rd.Entries) > 0 {
+				prevLastUnstablei = rd.Entries[len(rd.Entries)-1].Index
+				prevLastUnstablet = rd.Entries[len(rd.Entries)-1].Term
+				havePrevLastUnstablei = true
+			}
+			if !IsEmptyHardState(rd.HardState) {
+				prevHardSt = rd.HardState
+			}
+			if !IsEmptySnap(rd.Snapshot) {
+				prevSnapi = rd.Snapshot.Metadata.Index
+			}
+
+			r.msgs = nil
+			r.readStates = nil
+			advancec = n.advancec
+		case <-advancec:
+			if prevHardSt.Commit != 0 {
+				r.raftLog.appliedTo(prevHardSt.Commit)
+			}
+			if havePrevLastUnstablei {
+				r.raftLog.stableTo(prevLastUnstablei, prevLastUnstablet)
+				havePrevLastUnstablei = false
+			}
+			r.raftLog.stableSnapTo(prevSnapi)
+			advancec = nil
+		case c := <-n.status:
+			c <- getStatus(r)
+		case <-n.stop:
+			close(n.done)
+			return
+		}
+	}
+}
+
+// Tick increments the internal logical clock for this Node. Election timeouts
+// and heartbeat timeouts are in units of ticks.
+func (n *node) Tick() {
+	select {
+	case n.tickc <- struct{}{}:
+	case <-n.done:
+	default:
+		n.logger.Warningf("A tick missed to fire. Node blocks too long!")
+	}
+}
+
+func (n *node) Campaign(ctx context.Context) error { return n.step(ctx, pb.Message{Type: pb.MsgHup}) }
+
+func (n *node) Propose(ctx context.Context, data []byte) error {
+	return n.step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}})
+}
+
+func (n *node) Step(ctx context.Context, m pb.Message) error {
+	// ignore unexpected local messages receiving over network
+	if IsLocalMsg(m.Type) {
+		// TODO: return an error?
+		return nil
+	}
+	return n.step(ctx, m)
+}
+
+func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChange) error {
+	data, err := cc.Marshal()
+	if err != nil {
+		return err
+	}
+	return n.Step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: pb.EntryConfChange, Data: data}}})
+}
+
+// Step advances the state machine using msgs. The ctx.Err() will be returned,
+// if any.
+func (n *node) step(ctx context.Context, m pb.Message) error {
+	ch := n.recvc
+	if m.Type == pb.MsgProp {
+		ch = n.propc
+	}
+
+	select {
+	case ch <- m:
+		return nil
+	case <-ctx.Done():
+		return ctx.Err()
+	case <-n.done:
+		return ErrStopped
+	}
+}
+
+func (n *node) Ready() <-chan Ready { return n.readyc }
+
+func (n *node) Advance() {
+	select {
+	case n.advancec <- struct{}{}:
+	case <-n.done:
+	}
+}
+
+func (n *node) ApplyConfChange(cc pb.ConfChange) *pb.ConfState {
+	var cs pb.ConfState
+	select {
+	case n.confc <- cc:
+	case <-n.done:
+	}
+	select {
+	case cs = <-n.confstatec:
+	case <-n.done:
+	}
+	return &cs
+}
+
+func (n *node) Status() Status {
+	c := make(chan Status)
+	select {
+	case n.status <- c:
+		return <-c
+	case <-n.done:
+		return Status{}
+	}
+}
+
+func (n *node) ReportUnreachable(id uint64) {
+	select {
+	case n.recvc <- pb.Message{Type: pb.MsgUnreachable, From: id}:
+	case <-n.done:
+	}
+}
+
+func (n *node) ReportSnapshot(id uint64, status SnapshotStatus) {
+	rej := status == SnapshotFailure
+
+	select {
+	case n.recvc <- pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}:
+	case <-n.done:
+	}
+}
+
+func (n *node) TransferLeadership(ctx context.Context, lead, transferee uint64) {
+	select {
+	// manually set 'from' and 'to', so that leader can voluntarily transfers its leadership
+	case n.recvc <- pb.Message{Type: pb.MsgTransferLeader, From: transferee, To: lead}:
+	case <-n.done:
+	case <-ctx.Done():
+	}
+}
+
+func (n *node) ReadIndex(ctx context.Context, rctx []byte) error {
+	return n.step(ctx, pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
+}
+
+func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready {
+	rd := Ready{
+		Entries:          r.raftLog.unstableEntries(),
+		CommittedEntries: r.raftLog.nextEnts(),
+		Messages:         r.msgs,
+	}
+	if softSt := r.softState(); !softSt.equal(prevSoftSt) {
+		rd.SoftState = softSt
+	}
+	if hardSt := r.hardState(); !isHardStateEqual(hardSt, prevHardSt) {
+		rd.HardState = hardSt
+	}
+	if r.raftLog.unstable.snapshot != nil {
+		rd.Snapshot = *r.raftLog.unstable.snapshot
+	}
+	if len(r.readStates) != 0 {
+		rd.ReadStates = r.readStates
+	}
+	rd.MustSync = MustSync(rd.HardState, prevHardSt, len(rd.Entries))
+	return rd
+}
+
+// MustSync returns true if the hard state and count of Raft entries indicate
+// that a synchronous write to persistent storage is required.
+func MustSync(st, prevst pb.HardState, entsnum int) bool {
+	// Persistent state on all servers:
+	// (Updated on stable storage before responding to RPCs)
+	// currentTerm
+	// votedFor
+	// log entries[]
+	return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term
+}
diff --git a/vendor/github.com/coreos/etcd/raft/progress.go b/vendor/github.com/coreos/etcd/raft/progress.go
new file mode 100644
index 0000000..ef3787d
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/progress.go
@@ -0,0 +1,284 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import "fmt"
+
+const (
+	ProgressStateProbe ProgressStateType = iota
+	ProgressStateReplicate
+	ProgressStateSnapshot
+)
+
+type ProgressStateType uint64
+
+var prstmap = [...]string{
+	"ProgressStateProbe",
+	"ProgressStateReplicate",
+	"ProgressStateSnapshot",
+}
+
+func (st ProgressStateType) String() string { return prstmap[uint64(st)] }
+
+// Progress represents a follower’s progress in the view of the leader. Leader maintains
+// progresses of all followers, and sends entries to the follower based on its progress.
+type Progress struct {
+	Match, Next uint64
+	// State defines how the leader should interact with the follower.
+	//
+	// When in ProgressStateProbe, leader sends at most one replication message
+	// per heartbeat interval. It also probes actual progress of the follower.
+	//
+	// When in ProgressStateReplicate, leader optimistically increases next
+	// to the latest entry sent after sending replication message. This is
+	// an optimized state for fast replicating log entries to the follower.
+	//
+	// When in ProgressStateSnapshot, leader should have sent out snapshot
+	// before and stops sending any replication message.
+	State ProgressStateType
+
+	// Paused is used in ProgressStateProbe.
+	// When Paused is true, raft should pause sending replication message to this peer.
+	Paused bool
+	// PendingSnapshot is used in ProgressStateSnapshot.
+	// If there is a pending snapshot, the pendingSnapshot will be set to the
+	// index of the snapshot. If pendingSnapshot is set, the replication process of
+	// this Progress will be paused. raft will not resend snapshot until the pending one
+	// is reported to be failed.
+	PendingSnapshot uint64
+
+	// RecentActive is true if the progress is recently active. Receiving any messages
+	// from the corresponding follower indicates the progress is active.
+	// RecentActive can be reset to false after an election timeout.
+	RecentActive bool
+
+	// inflights is a sliding window for the inflight messages.
+	// Each inflight message contains one or more log entries.
+	// The max number of entries per message is defined in raft config as MaxSizePerMsg.
+	// Thus inflight effectively limits both the number of inflight messages
+	// and the bandwidth each Progress can use.
+	// When inflights is full, no more message should be sent.
+	// When a leader sends out a message, the index of the last
+	// entry should be added to inflights. The index MUST be added
+	// into inflights in order.
+	// When a leader receives a reply, the previous inflights should
+	// be freed by calling inflights.freeTo with the index of the last
+	// received entry.
+	ins *inflights
+
+	// IsLearner is true if this progress is tracked for a learner.
+	IsLearner bool
+}
+
+func (pr *Progress) resetState(state ProgressStateType) {
+	pr.Paused = false
+	pr.PendingSnapshot = 0
+	pr.State = state
+	pr.ins.reset()
+}
+
+func (pr *Progress) becomeProbe() {
+	// If the original state is ProgressStateSnapshot, progress knows that
+	// the pending snapshot has been sent to this peer successfully, then
+	// probes from pendingSnapshot + 1.
+	if pr.State == ProgressStateSnapshot {
+		pendingSnapshot := pr.PendingSnapshot
+		pr.resetState(ProgressStateProbe)
+		pr.Next = max(pr.Match+1, pendingSnapshot+1)
+	} else {
+		pr.resetState(ProgressStateProbe)
+		pr.Next = pr.Match + 1
+	}
+}
+
+func (pr *Progress) becomeReplicate() {
+	pr.resetState(ProgressStateReplicate)
+	pr.Next = pr.Match + 1
+}
+
+func (pr *Progress) becomeSnapshot(snapshoti uint64) {
+	pr.resetState(ProgressStateSnapshot)
+	pr.PendingSnapshot = snapshoti
+}
+
+// maybeUpdate returns false if the given n index comes from an outdated message.
+// Otherwise it updates the progress and returns true.
+func (pr *Progress) maybeUpdate(n uint64) bool {
+	var updated bool
+	if pr.Match < n {
+		pr.Match = n
+		updated = true
+		pr.resume()
+	}
+	if pr.Next < n+1 {
+		pr.Next = n + 1
+	}
+	return updated
+}
+
+func (pr *Progress) optimisticUpdate(n uint64) { pr.Next = n + 1 }
+
+// maybeDecrTo returns false if the given to index comes from an out of order message.
+// Otherwise it decreases the progress next index to min(rejected, last) and returns true.
+func (pr *Progress) maybeDecrTo(rejected, last uint64) bool {
+	if pr.State == ProgressStateReplicate {
+		// the rejection must be stale if the progress has matched and "rejected"
+		// is smaller than "match".
+		if rejected <= pr.Match {
+			return false
+		}
+		// directly decrease next to match + 1
+		pr.Next = pr.Match + 1
+		return true
+	}
+
+	// the rejection must be stale if "rejected" does not match next - 1
+	if pr.Next-1 != rejected {
+		return false
+	}
+
+	if pr.Next = min(rejected, last+1); pr.Next < 1 {
+		pr.Next = 1
+	}
+	pr.resume()
+	return true
+}
+
+func (pr *Progress) pause()  { pr.Paused = true }
+func (pr *Progress) resume() { pr.Paused = false }
+
+// IsPaused returns whether sending log entries to this node has been
+// paused. A node may be paused because it has rejected recent
+// MsgApps, is currently waiting for a snapshot, or has reached the
+// MaxInflightMsgs limit.
+func (pr *Progress) IsPaused() bool {
+	switch pr.State {
+	case ProgressStateProbe:
+		return pr.Paused
+	case ProgressStateReplicate:
+		return pr.ins.full()
+	case ProgressStateSnapshot:
+		return true
+	default:
+		panic("unexpected state")
+	}
+}
+
+func (pr *Progress) snapshotFailure() { pr.PendingSnapshot = 0 }
+
+// needSnapshotAbort returns true if snapshot progress's Match
+// is equal or higher than the pendingSnapshot.
+func (pr *Progress) needSnapshotAbort() bool {
+	return pr.State == ProgressStateSnapshot && pr.Match >= pr.PendingSnapshot
+}
+
+func (pr *Progress) String() string {
+	return fmt.Sprintf("next = %d, match = %d, state = %s, waiting = %v, pendingSnapshot = %d", pr.Next, pr.Match, pr.State, pr.IsPaused(), pr.PendingSnapshot)
+}
+
+type inflights struct {
+	// the starting index in the buffer
+	start int
+	// number of inflights in the buffer
+	count int
+
+	// the size of the buffer
+	size int
+
+	// buffer contains the index of the last entry
+	// inside one message.
+	buffer []uint64
+}
+
+func newInflights(size int) *inflights {
+	return &inflights{
+		size: size,
+	}
+}
+
+// add adds an inflight into inflights
+func (in *inflights) add(inflight uint64) {
+	if in.full() {
+		panic("cannot add into a full inflights")
+	}
+	next := in.start + in.count
+	size := in.size
+	if next >= size {
+		next -= size
+	}
+	if next >= len(in.buffer) {
+		in.growBuf()
+	}
+	in.buffer[next] = inflight
+	in.count++
+}
+
+// grow the inflight buffer by doubling up to inflights.size. We grow on demand
+// instead of preallocating to inflights.size to handle systems which have
+// thousands of Raft groups per process.
+func (in *inflights) growBuf() {
+	newSize := len(in.buffer) * 2
+	if newSize == 0 {
+		newSize = 1
+	} else if newSize > in.size {
+		newSize = in.size
+	}
+	newBuffer := make([]uint64, newSize)
+	copy(newBuffer, in.buffer)
+	in.buffer = newBuffer
+}
+
+// freeTo frees the inflights smaller or equal to the given `to` flight.
+func (in *inflights) freeTo(to uint64) {
+	if in.count == 0 || to < in.buffer[in.start] {
+		// out of the left side of the window
+		return
+	}
+
+	idx := in.start
+	var i int
+	for i = 0; i < in.count; i++ {
+		if to < in.buffer[idx] { // found the first large inflight
+			break
+		}
+
+		// increase index and maybe rotate
+		size := in.size
+		if idx++; idx >= size {
+			idx -= size
+		}
+	}
+	// free i inflights and set new start index
+	in.count -= i
+	in.start = idx
+	if in.count == 0 {
+		// inflights is empty, reset the start index so that we don't grow the
+		// buffer unnecessarily.
+		in.start = 0
+	}
+}
+
+func (in *inflights) freeFirstOne() { in.freeTo(in.buffer[in.start]) }
+
+// full returns true if the inflights is full.
+func (in *inflights) full() bool {
+	return in.count == in.size
+}
+
+// resets frees all inflights.
+func (in *inflights) reset() {
+	in.count = 0
+	in.start = 0
+}
diff --git a/vendor/github.com/coreos/etcd/raft/raft.go b/vendor/github.com/coreos/etcd/raft/raft.go
new file mode 100644
index 0000000..22ff138
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/raft.go
@@ -0,0 +1,1407 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"math"
+	"math/rand"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+
+	pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+// None is a placeholder node ID used when there is no leader.
+const None uint64 = 0
+const noLimit = math.MaxUint64
+
+// Possible values for StateType.
+const (
+	StateFollower StateType = iota
+	StateCandidate
+	StateLeader
+	StatePreCandidate
+	numStates
+)
+
+type ReadOnlyOption int
+
+const (
+	// ReadOnlySafe guarantees the linearizability of the read only request by
+	// communicating with the quorum. It is the default and suggested option.
+	ReadOnlySafe ReadOnlyOption = iota
+	// ReadOnlyLeaseBased ensures linearizability of the read only request by
+	// relying on the leader lease. It can be affected by clock drift.
+	// If the clock drift is unbounded, leader might keep the lease longer than it
+	// should (clock can move backward/pause without any bound). ReadIndex is not safe
+	// in that case.
+	ReadOnlyLeaseBased
+)
+
+// Possible values for CampaignType
+const (
+	// campaignPreElection represents the first phase of a normal election when
+	// Config.PreVote is true.
+	campaignPreElection CampaignType = "CampaignPreElection"
+	// campaignElection represents a normal (time-based) election (the second phase
+	// of the election when Config.PreVote is true).
+	campaignElection CampaignType = "CampaignElection"
+	// campaignTransfer represents the type of leader transfer
+	campaignTransfer CampaignType = "CampaignTransfer"
+)
+
+// lockedRand is a small wrapper around rand.Rand to provide
+// synchronization. Only the methods needed by the code are exposed
+// (e.g. Intn).
+type lockedRand struct {
+	mu   sync.Mutex
+	rand *rand.Rand
+}
+
+func (r *lockedRand) Intn(n int) int {
+	r.mu.Lock()
+	v := r.rand.Intn(n)
+	r.mu.Unlock()
+	return v
+}
+
+var globalRand = &lockedRand{
+	rand: rand.New(rand.NewSource(time.Now().UnixNano())),
+}
+
+// CampaignType represents the type of campaigning
+// the reason we use the type of string instead of uint64
+// is because it's simpler to compare and fill in raft entries
+type CampaignType string
+
+// StateType represents the role of a node in a cluster.
+type StateType uint64
+
+var stmap = [...]string{
+	"StateFollower",
+	"StateCandidate",
+	"StateLeader",
+	"StatePreCandidate",
+}
+
+func (st StateType) String() string {
+	return stmap[uint64(st)]
+}
+
+// Config contains the parameters to start a raft.
+type Config struct {
+	// ID is the identity of the local raft. ID cannot be 0.
+	ID uint64
+
+	// peers contains the IDs of all nodes (including self) in the raft cluster. It
+	// should only be set when starting a new raft cluster. Restarting raft from
+	// previous configuration will panic if peers is set. peer is private and only
+	// used for testing right now.
+	peers []uint64
+
+	// learners contains the IDs of all leaner nodes (including self if the local node is a leaner) in the raft cluster.
+	// learners only receives entries from the leader node. It does not vote or promote itself.
+	learners []uint64
+
+	// ElectionTick is the number of Node.Tick invocations that must pass between
+	// elections. That is, if a follower does not receive any message from the
+	// leader of current term before ElectionTick has elapsed, it will become
+	// candidate and start an election. ElectionTick must be greater than
+	// HeartbeatTick. We suggest ElectionTick = 10 * HeartbeatTick to avoid
+	// unnecessary leader switching.
+	ElectionTick int
+	// HeartbeatTick is the number of Node.Tick invocations that must pass between
+	// heartbeats. That is, a leader sends heartbeat messages to maintain its
+	// leadership every HeartbeatTick ticks.
+	HeartbeatTick int
+
+	// Storage is the storage for raft. raft generates entries and states to be
+	// stored in storage. raft reads the persisted entries and states out of
+	// Storage when it needs. raft reads out the previous state and configuration
+	// out of storage when restarting.
+	Storage Storage
+	// Applied is the last applied index. It should only be set when restarting
+	// raft. raft will not return entries to the application smaller or equal to
+	// Applied. If Applied is unset when restarting, raft might return previous
+	// applied entries. This is a very application dependent configuration.
+	Applied uint64
+
+	// MaxSizePerMsg limits the max size of each append message. Smaller value
+	// lowers the raft recovery cost(initial probing and message lost during normal
+	// operation). On the other side, it might affect the throughput during normal
+	// replication. Note: math.MaxUint64 for unlimited, 0 for at most one entry per
+	// message.
+	MaxSizePerMsg uint64
+	// MaxInflightMsgs limits the max number of in-flight append messages during
+	// optimistic replication phase. The application transportation layer usually
+	// has its own sending buffer over TCP/UDP. Setting MaxInflightMsgs to avoid
+	// overflowing that sending buffer. TODO (xiangli): feedback to application to
+	// limit the proposal rate?
+	MaxInflightMsgs int
+
+	// CheckQuorum specifies if the leader should check quorum activity. Leader
+	// steps down when quorum is not active for an electionTimeout.
+	CheckQuorum bool
+
+	// PreVote enables the Pre-Vote algorithm described in raft thesis section
+	// 9.6. This prevents disruption when a node that has been partitioned away
+	// rejoins the cluster.
+	PreVote bool
+
+	// ReadOnlyOption specifies how the read only request is processed.
+	//
+	// ReadOnlySafe guarantees the linearizability of the read only request by
+	// communicating with the quorum. It is the default and suggested option.
+	//
+	// ReadOnlyLeaseBased ensures linearizability of the read only request by
+	// relying on the leader lease. It can be affected by clock drift.
+	// If the clock drift is unbounded, leader might keep the lease longer than it
+	// should (clock can move backward/pause without any bound). ReadIndex is not safe
+	// in that case.
+	// CheckQuorum MUST be enabled if ReadOnlyOption is ReadOnlyLeaseBased.
+	ReadOnlyOption ReadOnlyOption
+
+	// Logger is the logger used for raft log. For multinode which can host
+	// multiple raft group, each raft group can have its own logger
+	Logger Logger
+
+	// DisableProposalForwarding set to true means that followers will drop
+	// proposals, rather than forwarding them to the leader. One use case for
+	// this feature would be in a situation where the Raft leader is used to
+	// compute the data of a proposal, for example, adding a timestamp from a
+	// hybrid logical clock to data in a monotonically increasing way. Forwarding
+	// should be disabled to prevent a follower with an innaccurate hybrid
+	// logical clock from assigning the timestamp and then forwarding the data
+	// to the leader.
+	DisableProposalForwarding bool
+}
+
+func (c *Config) validate() error {
+	if c.ID == None {
+		return errors.New("cannot use none as id")
+	}
+
+	if c.HeartbeatTick <= 0 {
+		return errors.New("heartbeat tick must be greater than 0")
+	}
+
+	if c.ElectionTick <= c.HeartbeatTick {
+		return errors.New("election tick must be greater than heartbeat tick")
+	}
+
+	if c.Storage == nil {
+		return errors.New("storage cannot be nil")
+	}
+
+	if c.MaxInflightMsgs <= 0 {
+		return errors.New("max inflight messages must be greater than 0")
+	}
+
+	if c.Logger == nil {
+		c.Logger = raftLogger
+	}
+
+	if c.ReadOnlyOption == ReadOnlyLeaseBased && !c.CheckQuorum {
+		return errors.New("CheckQuorum must be enabled when ReadOnlyOption is ReadOnlyLeaseBased")
+	}
+
+	return nil
+}
+
+type raft struct {
+	id uint64
+
+	Term uint64
+	Vote uint64
+
+	readStates []ReadState
+
+	// the log
+	raftLog *raftLog
+
+	maxInflight int
+	maxMsgSize  uint64
+	prs         map[uint64]*Progress
+	learnerPrs  map[uint64]*Progress
+
+	state StateType
+
+	// isLearner is true if the local raft node is a learner.
+	isLearner bool
+
+	votes map[uint64]bool
+
+	msgs []pb.Message
+
+	// the leader id
+	lead uint64
+	// leadTransferee is id of the leader transfer target when its value is not zero.
+	// Follow the procedure defined in raft thesis 3.10.
+	leadTransferee uint64
+	// New configuration is ignored if there exists unapplied configuration.
+	pendingConf bool
+
+	readOnly *readOnly
+
+	// number of ticks since it reached last electionTimeout when it is leader
+	// or candidate.
+	// number of ticks since it reached last electionTimeout or received a
+	// valid message from current leader when it is a follower.
+	electionElapsed int
+
+	// number of ticks since it reached last heartbeatTimeout.
+	// only leader keeps heartbeatElapsed.
+	heartbeatElapsed int
+
+	checkQuorum bool
+	preVote     bool
+
+	heartbeatTimeout int
+	electionTimeout  int
+	// randomizedElectionTimeout is a random number between
+	// [electiontimeout, 2 * electiontimeout - 1]. It gets reset
+	// when raft changes its state to follower or candidate.
+	randomizedElectionTimeout int
+	disableProposalForwarding bool
+
+	tick func()
+	step stepFunc
+
+	logger Logger
+}
+
+func newRaft(c *Config) *raft {
+	if err := c.validate(); err != nil {
+		panic(err.Error())
+	}
+	raftlog := newLog(c.Storage, c.Logger)
+	hs, cs, err := c.Storage.InitialState()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+	peers := c.peers
+	learners := c.learners
+	if len(cs.Nodes) > 0 || len(cs.Learners) > 0 {
+		if len(peers) > 0 || len(learners) > 0 {
+			// TODO(bdarnell): the peers argument is always nil except in
+			// tests; the argument should be removed and these tests should be
+			// updated to specify their nodes through a snapshot.
+			panic("cannot specify both newRaft(peers, learners) and ConfState.(Nodes, Learners)")
+		}
+		peers = cs.Nodes
+		learners = cs.Learners
+	}
+	r := &raft{
+		id:                        c.ID,
+		lead:                      None,
+		isLearner:                 false,
+		raftLog:                   raftlog,
+		maxMsgSize:                c.MaxSizePerMsg,
+		maxInflight:               c.MaxInflightMsgs,
+		prs:                       make(map[uint64]*Progress),
+		learnerPrs:                make(map[uint64]*Progress),
+		electionTimeout:           c.ElectionTick,
+		heartbeatTimeout:          c.HeartbeatTick,
+		logger:                    c.Logger,
+		checkQuorum:               c.CheckQuorum,
+		preVote:                   c.PreVote,
+		readOnly:                  newReadOnly(c.ReadOnlyOption),
+		disableProposalForwarding: c.DisableProposalForwarding,
+	}
+	for _, p := range peers {
+		r.prs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight)}
+	}
+	for _, p := range learners {
+		if _, ok := r.prs[p]; ok {
+			panic(fmt.Sprintf("node %x is in both learner and peer list", p))
+		}
+		r.learnerPrs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight), IsLearner: true}
+		if r.id == p {
+			r.isLearner = true
+		}
+	}
+
+	if !isHardStateEqual(hs, emptyState) {
+		r.loadState(hs)
+	}
+	if c.Applied > 0 {
+		raftlog.appliedTo(c.Applied)
+	}
+	r.becomeFollower(r.Term, None)
+
+	var nodesStrs []string
+	for _, n := range r.nodes() {
+		nodesStrs = append(nodesStrs, fmt.Sprintf("%x", n))
+	}
+
+	r.logger.Infof("newRaft %x [peers: [%s], term: %d, commit: %d, applied: %d, lastindex: %d, lastterm: %d]",
+		r.id, strings.Join(nodesStrs, ","), r.Term, r.raftLog.committed, r.raftLog.applied, r.raftLog.lastIndex(), r.raftLog.lastTerm())
+	return r
+}
+
+func (r *raft) hasLeader() bool { return r.lead != None }
+
+func (r *raft) softState() *SoftState { return &SoftState{Lead: r.lead, RaftState: r.state} }
+
+func (r *raft) hardState() pb.HardState {
+	return pb.HardState{
+		Term:   r.Term,
+		Vote:   r.Vote,
+		Commit: r.raftLog.committed,
+	}
+}
+
+func (r *raft) quorum() int { return len(r.prs)/2 + 1 }
+
+func (r *raft) nodes() []uint64 {
+	nodes := make([]uint64, 0, len(r.prs)+len(r.learnerPrs))
+	for id := range r.prs {
+		nodes = append(nodes, id)
+	}
+	for id := range r.learnerPrs {
+		nodes = append(nodes, id)
+	}
+	sort.Sort(uint64Slice(nodes))
+	return nodes
+}
+
+// send persists state to stable storage and then sends to its mailbox.
+func (r *raft) send(m pb.Message) {
+	m.From = r.id
+	if m.Type == pb.MsgVote || m.Type == pb.MsgVoteResp || m.Type == pb.MsgPreVote || m.Type == pb.MsgPreVoteResp {
+		if m.Term == 0 {
+			// All {pre-,}campaign messages need to have the term set when
+			// sending.
+			// - MsgVote: m.Term is the term the node is campaigning for,
+			//   non-zero as we increment the term when campaigning.
+			// - MsgVoteResp: m.Term is the new r.Term if the MsgVote was
+			//   granted, non-zero for the same reason MsgVote is
+			// - MsgPreVote: m.Term is the term the node will campaign,
+			//   non-zero as we use m.Term to indicate the next term we'll be
+			//   campaigning for
+			// - MsgPreVoteResp: m.Term is the term received in the original
+			//   MsgPreVote if the pre-vote was granted, non-zero for the
+			//   same reasons MsgPreVote is
+			panic(fmt.Sprintf("term should be set when sending %s", m.Type))
+		}
+	} else {
+		if m.Term != 0 {
+			panic(fmt.Sprintf("term should not be set when sending %s (was %d)", m.Type, m.Term))
+		}
+		// do not attach term to MsgProp, MsgReadIndex
+		// proposals are a way to forward to the leader and
+		// should be treated as local message.
+		// MsgReadIndex is also forwarded to leader.
+		if m.Type != pb.MsgProp && m.Type != pb.MsgReadIndex {
+			m.Term = r.Term
+		}
+	}
+	r.msgs = append(r.msgs, m)
+}
+
+func (r *raft) getProgress(id uint64) *Progress {
+	if pr, ok := r.prs[id]; ok {
+		return pr
+	}
+
+	return r.learnerPrs[id]
+}
+
+// sendAppend sends RPC, with entries to the given peer.
+func (r *raft) sendAppend(to uint64) {
+	pr := r.getProgress(to)
+	if pr.IsPaused() {
+		return
+	}
+	m := pb.Message{}
+	m.To = to
+
+	term, errt := r.raftLog.term(pr.Next - 1)
+	ents, erre := r.raftLog.entries(pr.Next, r.maxMsgSize)
+
+	if errt != nil || erre != nil { // send snapshot if we failed to get term or entries
+		if !pr.RecentActive {
+			r.logger.Debugf("ignore sending snapshot to %x since it is not recently active", to)
+			return
+		}
+
+		m.Type = pb.MsgSnap
+		snapshot, err := r.raftLog.snapshot()
+		if err != nil {
+			if err == ErrSnapshotTemporarilyUnavailable {
+				r.logger.Debugf("%x failed to send snapshot to %x because snapshot is temporarily unavailable", r.id, to)
+				return
+			}
+			panic(err) // TODO(bdarnell)
+		}
+		if IsEmptySnap(snapshot) {
+			panic("need non-empty snapshot")
+		}
+		m.Snapshot = snapshot
+		sindex, sterm := snapshot.Metadata.Index, snapshot.Metadata.Term
+		r.logger.Debugf("%x [firstindex: %d, commit: %d] sent snapshot[index: %d, term: %d] to %x [%s]",
+			r.id, r.raftLog.firstIndex(), r.raftLog.committed, sindex, sterm, to, pr)
+		pr.becomeSnapshot(sindex)
+		r.logger.Debugf("%x paused sending replication messages to %x [%s]", r.id, to, pr)
+	} else {
+		m.Type = pb.MsgApp
+		m.Index = pr.Next - 1
+		m.LogTerm = term
+		m.Entries = ents
+		m.Commit = r.raftLog.committed
+		if n := len(m.Entries); n != 0 {
+			switch pr.State {
+			// optimistically increase the next when in ProgressStateReplicate
+			case ProgressStateReplicate:
+				last := m.Entries[n-1].Index
+				pr.optimisticUpdate(last)
+				pr.ins.add(last)
+			case ProgressStateProbe:
+				pr.pause()
+			default:
+				r.logger.Panicf("%x is sending append in unhandled state %s", r.id, pr.State)
+			}
+		}
+	}
+	r.send(m)
+}
+
+// sendHeartbeat sends an empty MsgApp
+func (r *raft) sendHeartbeat(to uint64, ctx []byte) {
+	// Attach the commit as min(to.matched, r.committed).
+	// When the leader sends out heartbeat message,
+	// the receiver(follower) might not be matched with the leader
+	// or it might not have all the committed entries.
+	// The leader MUST NOT forward the follower's commit to
+	// an unmatched index.
+	commit := min(r.getProgress(to).Match, r.raftLog.committed)
+	m := pb.Message{
+		To:      to,
+		Type:    pb.MsgHeartbeat,
+		Commit:  commit,
+		Context: ctx,
+	}
+
+	r.send(m)
+}
+
+func (r *raft) forEachProgress(f func(id uint64, pr *Progress)) {
+	for id, pr := range r.prs {
+		f(id, pr)
+	}
+
+	for id, pr := range r.learnerPrs {
+		f(id, pr)
+	}
+}
+
+// bcastAppend sends RPC, with entries to all peers that are not up-to-date
+// according to the progress recorded in r.prs.
+func (r *raft) bcastAppend() {
+	r.forEachProgress(func(id uint64, _ *Progress) {
+		if id == r.id {
+			return
+		}
+
+		r.sendAppend(id)
+	})
+}
+
+// bcastHeartbeat sends RPC, without entries to all the peers.
+func (r *raft) bcastHeartbeat() {
+	lastCtx := r.readOnly.lastPendingRequestCtx()
+	if len(lastCtx) == 0 {
+		r.bcastHeartbeatWithCtx(nil)
+	} else {
+		r.bcastHeartbeatWithCtx([]byte(lastCtx))
+	}
+}
+
+func (r *raft) bcastHeartbeatWithCtx(ctx []byte) {
+	r.forEachProgress(func(id uint64, _ *Progress) {
+		if id == r.id {
+			return
+		}
+		r.sendHeartbeat(id, ctx)
+	})
+}
+
+// maybeCommit attempts to advance the commit index. Returns true if
+// the commit index changed (in which case the caller should call
+// r.bcastAppend).
+func (r *raft) maybeCommit() bool {
+	// TODO(bmizerany): optimize.. Currently naive
+	mis := make(uint64Slice, 0, len(r.prs))
+	for _, p := range r.prs {
+		mis = append(mis, p.Match)
+	}
+	sort.Sort(sort.Reverse(mis))
+	mci := mis[r.quorum()-1]
+	return r.raftLog.maybeCommit(mci, r.Term)
+}
+
+func (r *raft) reset(term uint64) {
+	if r.Term != term {
+		r.Term = term
+		r.Vote = None
+	}
+	r.lead = None
+
+	r.electionElapsed = 0
+	r.heartbeatElapsed = 0
+	r.resetRandomizedElectionTimeout()
+
+	r.abortLeaderTransfer()
+
+	r.votes = make(map[uint64]bool)
+	r.forEachProgress(func(id uint64, pr *Progress) {
+		*pr = Progress{Next: r.raftLog.lastIndex() + 1, ins: newInflights(r.maxInflight), IsLearner: pr.IsLearner}
+		if id == r.id {
+			pr.Match = r.raftLog.lastIndex()
+		}
+	})
+
+	r.pendingConf = false
+	r.readOnly = newReadOnly(r.readOnly.option)
+}
+
+func (r *raft) appendEntry(es ...pb.Entry) {
+	li := r.raftLog.lastIndex()
+	for i := range es {
+		es[i].Term = r.Term
+		es[i].Index = li + 1 + uint64(i)
+	}
+	r.raftLog.append(es...)
+	r.getProgress(r.id).maybeUpdate(r.raftLog.lastIndex())
+	// Regardless of maybeCommit's return, our caller will call bcastAppend.
+	r.maybeCommit()
+}
+
+// tickElection is run by followers and candidates after r.electionTimeout.
+func (r *raft) tickElection() {
+	r.electionElapsed++
+
+	if r.promotable() && r.pastElectionTimeout() {
+		r.electionElapsed = 0
+		r.Step(pb.Message{From: r.id, Type: pb.MsgHup})
+	}
+}
+
+// tickHeartbeat is run by leaders to send a MsgBeat after r.heartbeatTimeout.
+func (r *raft) tickHeartbeat() {
+	r.heartbeatElapsed++
+	r.electionElapsed++
+
+	if r.electionElapsed >= r.electionTimeout {
+		r.electionElapsed = 0
+		if r.checkQuorum {
+			r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum})
+		}
+		// If current leader cannot transfer leadership in electionTimeout, it becomes leader again.
+		if r.state == StateLeader && r.leadTransferee != None {
+			r.abortLeaderTransfer()
+		}
+	}
+
+	if r.state != StateLeader {
+		return
+	}
+
+	if r.heartbeatElapsed >= r.heartbeatTimeout {
+		r.heartbeatElapsed = 0
+		r.Step(pb.Message{From: r.id, Type: pb.MsgBeat})
+	}
+}
+
+func (r *raft) becomeFollower(term uint64, lead uint64) {
+	r.step = stepFollower
+	r.reset(term)
+	r.tick = r.tickElection
+	r.lead = lead
+	r.state = StateFollower
+	r.logger.Infof("%x became follower at term %d", r.id, r.Term)
+}
+
+func (r *raft) becomeCandidate() {
+	// TODO(xiangli) remove the panic when the raft implementation is stable
+	if r.state == StateLeader {
+		panic("invalid transition [leader -> candidate]")
+	}
+	r.step = stepCandidate
+	r.reset(r.Term + 1)
+	r.tick = r.tickElection
+	r.Vote = r.id
+	r.state = StateCandidate
+	r.logger.Infof("%x became candidate at term %d", r.id, r.Term)
+}
+
+func (r *raft) becomePreCandidate() {
+	// TODO(xiangli) remove the panic when the raft implementation is stable
+	if r.state == StateLeader {
+		panic("invalid transition [leader -> pre-candidate]")
+	}
+	// Becoming a pre-candidate changes our step functions and state,
+	// but doesn't change anything else. In particular it does not increase
+	// r.Term or change r.Vote.
+	r.step = stepCandidate
+	r.votes = make(map[uint64]bool)
+	r.tick = r.tickElection
+	r.lead = None
+	r.state = StatePreCandidate
+	r.logger.Infof("%x became pre-candidate at term %d", r.id, r.Term)
+}
+
+func (r *raft) becomeLeader() {
+	// TODO(xiangli) remove the panic when the raft implementation is stable
+	if r.state == StateFollower {
+		panic("invalid transition [follower -> leader]")
+	}
+	r.step = stepLeader
+	r.reset(r.Term)
+	r.tick = r.tickHeartbeat
+	r.lead = r.id
+	r.state = StateLeader
+	ents, err := r.raftLog.entries(r.raftLog.committed+1, noLimit)
+	if err != nil {
+		r.logger.Panicf("unexpected error getting uncommitted entries (%v)", err)
+	}
+
+	nconf := numOfPendingConf(ents)
+	if nconf > 1 {
+		panic("unexpected multiple uncommitted config entry")
+	}
+	if nconf == 1 {
+		r.pendingConf = true
+	}
+
+	r.appendEntry(pb.Entry{Data: nil})
+	r.logger.Infof("%x became leader at term %d", r.id, r.Term)
+}
+
+func (r *raft) campaign(t CampaignType) {
+	var term uint64
+	var voteMsg pb.MessageType
+	if t == campaignPreElection {
+		r.becomePreCandidate()
+		voteMsg = pb.MsgPreVote
+		// PreVote RPCs are sent for the next term before we've incremented r.Term.
+		term = r.Term + 1
+	} else {
+		r.becomeCandidate()
+		voteMsg = pb.MsgVote
+		term = r.Term
+	}
+	if r.quorum() == r.poll(r.id, voteRespMsgType(voteMsg), true) {
+		// We won the election after voting for ourselves (which must mean that
+		// this is a single-node cluster). Advance to the next state.
+		if t == campaignPreElection {
+			r.campaign(campaignElection)
+		} else {
+			r.becomeLeader()
+		}
+		return
+	}
+	for id := range r.prs {
+		if id == r.id {
+			continue
+		}
+		r.logger.Infof("%x [logterm: %d, index: %d] sent %s request to %x at term %d",
+			r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), voteMsg, id, r.Term)
+
+		var ctx []byte
+		if t == campaignTransfer {
+			ctx = []byte(t)
+		}
+		r.send(pb.Message{Term: term, To: id, Type: voteMsg, Index: r.raftLog.lastIndex(), LogTerm: r.raftLog.lastTerm(), Context: ctx})
+	}
+}
+
+func (r *raft) poll(id uint64, t pb.MessageType, v bool) (granted int) {
+	if v {
+		r.logger.Infof("%x received %s from %x at term %d", r.id, t, id, r.Term)
+	} else {
+		r.logger.Infof("%x received %s rejection from %x at term %d", r.id, t, id, r.Term)
+	}
+	if _, ok := r.votes[id]; !ok {
+		r.votes[id] = v
+	}
+	for _, vv := range r.votes {
+		if vv {
+			granted++
+		}
+	}
+	return granted
+}
+
+func (r *raft) Step(m pb.Message) error {
+	// Handle the message term, which may result in our stepping down to a follower.
+	switch {
+	case m.Term == 0:
+		// local message
+	case m.Term > r.Term:
+		if m.Type == pb.MsgVote || m.Type == pb.MsgPreVote {
+			force := bytes.Equal(m.Context, []byte(campaignTransfer))
+			inLease := r.checkQuorum && r.lead != None && r.electionElapsed < r.electionTimeout
+			if !force && inLease {
+				// If a server receives a RequestVote request within the minimum election timeout
+				// of hearing from a current leader, it does not update its term or grant its vote
+				r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: lease is not expired (remaining ticks: %d)",
+					r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term, r.electionTimeout-r.electionElapsed)
+				return nil
+			}
+		}
+		switch {
+		case m.Type == pb.MsgPreVote:
+			// Never change our term in response to a PreVote
+		case m.Type == pb.MsgPreVoteResp && !m.Reject:
+			// We send pre-vote requests with a term in our future. If the
+			// pre-vote is granted, we will increment our term when we get a
+			// quorum. If it is not, the term comes from the node that
+			// rejected our vote so we should become a follower at the new
+			// term.
+		default:
+			r.logger.Infof("%x [term: %d] received a %s message with higher term from %x [term: %d]",
+				r.id, r.Term, m.Type, m.From, m.Term)
+			if m.Type == pb.MsgApp || m.Type == pb.MsgHeartbeat || m.Type == pb.MsgSnap {
+				r.becomeFollower(m.Term, m.From)
+			} else {
+				r.becomeFollower(m.Term, None)
+			}
+		}
+
+	case m.Term < r.Term:
+		if r.checkQuorum && (m.Type == pb.MsgHeartbeat || m.Type == pb.MsgApp) {
+			// We have received messages from a leader at a lower term. It is possible
+			// that these messages were simply delayed in the network, but this could
+			// also mean that this node has advanced its term number during a network
+			// partition, and it is now unable to either win an election or to rejoin
+			// the majority on the old term. If checkQuorum is false, this will be
+			// handled by incrementing term numbers in response to MsgVote with a
+			// higher term, but if checkQuorum is true we may not advance the term on
+			// MsgVote and must generate other messages to advance the term. The net
+			// result of these two features is to minimize the disruption caused by
+			// nodes that have been removed from the cluster's configuration: a
+			// removed node will send MsgVotes (or MsgPreVotes) which will be ignored,
+			// but it will not receive MsgApp or MsgHeartbeat, so it will not create
+			// disruptive term increases
+			r.send(pb.Message{To: m.From, Type: pb.MsgAppResp})
+		} else {
+			// ignore other cases
+			r.logger.Infof("%x [term: %d] ignored a %s message with lower term from %x [term: %d]",
+				r.id, r.Term, m.Type, m.From, m.Term)
+		}
+		return nil
+	}
+
+	switch m.Type {
+	case pb.MsgHup:
+		if r.state != StateLeader {
+			ents, err := r.raftLog.slice(r.raftLog.applied+1, r.raftLog.committed+1, noLimit)
+			if err != nil {
+				r.logger.Panicf("unexpected error getting unapplied entries (%v)", err)
+			}
+			if n := numOfPendingConf(ents); n != 0 && r.raftLog.committed > r.raftLog.applied {
+				r.logger.Warningf("%x cannot campaign at term %d since there are still %d pending configuration changes to apply", r.id, r.Term, n)
+				return nil
+			}
+
+			r.logger.Infof("%x is starting a new election at term %d", r.id, r.Term)
+			if r.preVote {
+				r.campaign(campaignPreElection)
+			} else {
+				r.campaign(campaignElection)
+			}
+		} else {
+			r.logger.Debugf("%x ignoring MsgHup because already leader", r.id)
+		}
+
+	case pb.MsgVote, pb.MsgPreVote:
+		if r.isLearner {
+			// TODO: learner may need to vote, in case of node down when confchange.
+			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: learner can not vote",
+				r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
+			return nil
+		}
+		// The m.Term > r.Term clause is for MsgPreVote. For MsgVote m.Term should
+		// always equal r.Term.
+		if (r.Vote == None || m.Term > r.Term || r.Vote == m.From) && r.raftLog.isUpToDate(m.Index, m.LogTerm) {
+			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] cast %s for %x [logterm: %d, index: %d] at term %d",
+				r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
+			// When responding to Msg{Pre,}Vote messages we include the term
+			// from the message, not the local term. To see why consider the
+			// case where a single node was previously partitioned away and
+			// it's local term is now of date. If we include the local term
+			// (recall that for pre-votes we don't update the local term), the
+			// (pre-)campaigning node on the other end will proceed to ignore
+			// the message (it ignores all out of date messages).
+			// The term in the original message and current local term are the
+			// same in the case of regular votes, but different for pre-votes.
+			r.send(pb.Message{To: m.From, Term: m.Term, Type: voteRespMsgType(m.Type)})
+			if m.Type == pb.MsgVote {
+				// Only record real votes.
+				r.electionElapsed = 0
+				r.Vote = m.From
+			}
+		} else {
+			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d",
+				r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
+			r.send(pb.Message{To: m.From, Term: r.Term, Type: voteRespMsgType(m.Type), Reject: true})
+		}
+
+	default:
+		r.step(r, m)
+	}
+	return nil
+}
+
+type stepFunc func(r *raft, m pb.Message)
+
+func stepLeader(r *raft, m pb.Message) {
+	// These message types do not require any progress for m.From.
+	switch m.Type {
+	case pb.MsgBeat:
+		r.bcastHeartbeat()
+		return
+	case pb.MsgCheckQuorum:
+		if !r.checkQuorumActive() {
+			r.logger.Warningf("%x stepped down to follower since quorum is not active", r.id)
+			r.becomeFollower(r.Term, None)
+		}
+		return
+	case pb.MsgProp:
+		if len(m.Entries) == 0 {
+			r.logger.Panicf("%x stepped empty MsgProp", r.id)
+		}
+		if _, ok := r.prs[r.id]; !ok {
+			// If we are not currently a member of the range (i.e. this node
+			// was removed from the configuration while serving as leader),
+			// drop any new proposals.
+			return
+		}
+		if r.leadTransferee != None {
+			r.logger.Debugf("%x [term %d] transfer leadership to %x is in progress; dropping proposal", r.id, r.Term, r.leadTransferee)
+			return
+		}
+
+		for i, e := range m.Entries {
+			if e.Type == pb.EntryConfChange {
+				if r.pendingConf {
+					r.logger.Infof("propose conf %s ignored since pending unapplied configuration", e.String())
+					m.Entries[i] = pb.Entry{Type: pb.EntryNormal}
+				}
+				r.pendingConf = true
+			}
+		}
+		r.appendEntry(m.Entries...)
+		r.bcastAppend()
+		return
+	case pb.MsgReadIndex:
+		if r.quorum() > 1 {
+			if r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(r.raftLog.committed)) != r.Term {
+				// Reject read only request when this leader has not committed any log entry at its term.
+				return
+			}
+
+			// thinking: use an interally defined context instead of the user given context.
+			// We can express this in terms of the term and index instead of a user-supplied value.
+			// This would allow multiple reads to piggyback on the same message.
+			switch r.readOnly.option {
+			case ReadOnlySafe:
+				r.readOnly.addRequest(r.raftLog.committed, m)
+				r.bcastHeartbeatWithCtx(m.Entries[0].Data)
+			case ReadOnlyLeaseBased:
+				ri := r.raftLog.committed
+				if m.From == None || m.From == r.id { // from local member
+					r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data})
+				} else {
+					r.send(pb.Message{To: m.From, Type: pb.MsgReadIndexResp, Index: ri, Entries: m.Entries})
+				}
+			}
+		} else {
+			r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data})
+		}
+
+		return
+	}
+
+	// All other message types require a progress for m.From (pr).
+	pr := r.getProgress(m.From)
+	if pr == nil {
+		r.logger.Debugf("%x no progress available for %x", r.id, m.From)
+		return
+	}
+	switch m.Type {
+	case pb.MsgAppResp:
+		pr.RecentActive = true
+
+		if m.Reject {
+			r.logger.Debugf("%x received msgApp rejection(lastindex: %d) from %x for index %d",
+				r.id, m.RejectHint, m.From, m.Index)
+			if pr.maybeDecrTo(m.Index, m.RejectHint) {
+				r.logger.Debugf("%x decreased progress of %x to [%s]", r.id, m.From, pr)
+				if pr.State == ProgressStateReplicate {
+					pr.becomeProbe()
+				}
+				r.sendAppend(m.From)
+			}
+		} else {
+			oldPaused := pr.IsPaused()
+			if pr.maybeUpdate(m.Index) {
+				switch {
+				case pr.State == ProgressStateProbe:
+					pr.becomeReplicate()
+				case pr.State == ProgressStateSnapshot && pr.needSnapshotAbort():
+					r.logger.Debugf("%x snapshot aborted, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
+					pr.becomeProbe()
+				case pr.State == ProgressStateReplicate:
+					pr.ins.freeTo(m.Index)
+				}
+
+				if r.maybeCommit() {
+					r.bcastAppend()
+				} else if oldPaused {
+					// update() reset the wait state on this node. If we had delayed sending
+					// an update before, send it now.
+					r.sendAppend(m.From)
+				}
+				// Transfer leadership is in progress.
+				if m.From == r.leadTransferee && pr.Match == r.raftLog.lastIndex() {
+					r.logger.Infof("%x sent MsgTimeoutNow to %x after received MsgAppResp", r.id, m.From)
+					r.sendTimeoutNow(m.From)
+				}
+			}
+		}
+	case pb.MsgHeartbeatResp:
+		pr.RecentActive = true
+		pr.resume()
+
+		// free one slot for the full inflights window to allow progress.
+		if pr.State == ProgressStateReplicate && pr.ins.full() {
+			pr.ins.freeFirstOne()
+		}
+		if pr.Match < r.raftLog.lastIndex() {
+			r.sendAppend(m.From)
+		}
+
+		if r.readOnly.option != ReadOnlySafe || len(m.Context) == 0 {
+			return
+		}
+
+		ackCount := r.readOnly.recvAck(m)
+		if ackCount < r.quorum() {
+			return
+		}
+
+		rss := r.readOnly.advance(m)
+		for _, rs := range rss {
+			req := rs.req
+			if req.From == None || req.From == r.id { // from local member
+				r.readStates = append(r.readStates, ReadState{Index: rs.index, RequestCtx: req.Entries[0].Data})
+			} else {
+				r.send(pb.Message{To: req.From, Type: pb.MsgReadIndexResp, Index: rs.index, Entries: req.Entries})
+			}
+		}
+	case pb.MsgSnapStatus:
+		if pr.State != ProgressStateSnapshot {
+			return
+		}
+		if !m.Reject {
+			pr.becomeProbe()
+			r.logger.Debugf("%x snapshot succeeded, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
+		} else {
+			pr.snapshotFailure()
+			pr.becomeProbe()
+			r.logger.Debugf("%x snapshot failed, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
+		}
+		// If snapshot finish, wait for the msgAppResp from the remote node before sending
+		// out the next msgApp.
+		// If snapshot failure, wait for a heartbeat interval before next try
+		pr.pause()
+	case pb.MsgUnreachable:
+		// During optimistic replication, if the remote becomes unreachable,
+		// there is huge probability that a MsgApp is lost.
+		if pr.State == ProgressStateReplicate {
+			pr.becomeProbe()
+		}
+		r.logger.Debugf("%x failed to send message to %x because it is unreachable [%s]", r.id, m.From, pr)
+	case pb.MsgTransferLeader:
+		if pr.IsLearner {
+			r.logger.Debugf("%x is learner. Ignored transferring leadership", r.id)
+			return
+		}
+		leadTransferee := m.From
+		lastLeadTransferee := r.leadTransferee
+		if lastLeadTransferee != None {
+			if lastLeadTransferee == leadTransferee {
+				r.logger.Infof("%x [term %d] transfer leadership to %x is in progress, ignores request to same node %x",
+					r.id, r.Term, leadTransferee, leadTransferee)
+				return
+			}
+			r.abortLeaderTransfer()
+			r.logger.Infof("%x [term %d] abort previous transferring leadership to %x", r.id, r.Term, lastLeadTransferee)
+		}
+		if leadTransferee == r.id {
+			r.logger.Debugf("%x is already leader. Ignored transferring leadership to self", r.id)
+			return
+		}
+		// Transfer leadership to third party.
+		r.logger.Infof("%x [term %d] starts to transfer leadership to %x", r.id, r.Term, leadTransferee)
+		// Transfer leadership should be finished in one electionTimeout, so reset r.electionElapsed.
+		r.electionElapsed = 0
+		r.leadTransferee = leadTransferee
+		if pr.Match == r.raftLog.lastIndex() {
+			r.sendTimeoutNow(leadTransferee)
+			r.logger.Infof("%x sends MsgTimeoutNow to %x immediately as %x already has up-to-date log", r.id, leadTransferee, leadTransferee)
+		} else {
+			r.sendAppend(leadTransferee)
+		}
+	}
+}
+
+// stepCandidate is shared by StateCandidate and StatePreCandidate; the difference is
+// whether they respond to MsgVoteResp or MsgPreVoteResp.
+func stepCandidate(r *raft, m pb.Message) {
+	// Only handle vote responses corresponding to our candidacy (while in
+	// StateCandidate, we may get stale MsgPreVoteResp messages in this term from
+	// our pre-candidate state).
+	var myVoteRespType pb.MessageType
+	if r.state == StatePreCandidate {
+		myVoteRespType = pb.MsgPreVoteResp
+	} else {
+		myVoteRespType = pb.MsgVoteResp
+	}
+	switch m.Type {
+	case pb.MsgProp:
+		r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
+		return
+	case pb.MsgApp:
+		r.becomeFollower(r.Term, m.From)
+		r.handleAppendEntries(m)
+	case pb.MsgHeartbeat:
+		r.becomeFollower(r.Term, m.From)
+		r.handleHeartbeat(m)
+	case pb.MsgSnap:
+		r.becomeFollower(m.Term, m.From)
+		r.handleSnapshot(m)
+	case myVoteRespType:
+		gr := r.poll(m.From, m.Type, !m.Reject)
+		r.logger.Infof("%x [quorum:%d] has received %d %s votes and %d vote rejections", r.id, r.quorum(), gr, m.Type, len(r.votes)-gr)
+		switch r.quorum() {
+		case gr:
+			if r.state == StatePreCandidate {
+				r.campaign(campaignElection)
+			} else {
+				r.becomeLeader()
+				r.bcastAppend()
+			}
+		case len(r.votes) - gr:
+			r.becomeFollower(r.Term, None)
+		}
+	case pb.MsgTimeoutNow:
+		r.logger.Debugf("%x [term %d state %v] ignored MsgTimeoutNow from %x", r.id, r.Term, r.state, m.From)
+	}
+}
+
+func stepFollower(r *raft, m pb.Message) {
+	switch m.Type {
+	case pb.MsgProp:
+		if r.lead == None {
+			r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
+			return
+		} else if r.disableProposalForwarding {
+			r.logger.Infof("%x not forwarding to leader %x at term %d; dropping proposal", r.id, r.lead, r.Term)
+			return
+		}
+		m.To = r.lead
+		r.send(m)
+	case pb.MsgApp:
+		r.electionElapsed = 0
+		r.lead = m.From
+		r.handleAppendEntries(m)
+	case pb.MsgHeartbeat:
+		r.electionElapsed = 0
+		r.lead = m.From
+		r.handleHeartbeat(m)
+	case pb.MsgSnap:
+		r.electionElapsed = 0
+		r.lead = m.From
+		r.handleSnapshot(m)
+	case pb.MsgTransferLeader:
+		if r.lead == None {
+			r.logger.Infof("%x no leader at term %d; dropping leader transfer msg", r.id, r.Term)
+			return
+		}
+		m.To = r.lead
+		r.send(m)
+	case pb.MsgTimeoutNow:
+		if r.promotable() {
+			r.logger.Infof("%x [term %d] received MsgTimeoutNow from %x and starts an election to get leadership.", r.id, r.Term, m.From)
+			// Leadership transfers never use pre-vote even if r.preVote is true; we
+			// know we are not recovering from a partition so there is no need for the
+			// extra round trip.
+			r.campaign(campaignTransfer)
+		} else {
+			r.logger.Infof("%x received MsgTimeoutNow from %x but is not promotable", r.id, m.From)
+		}
+	case pb.MsgReadIndex:
+		if r.lead == None {
+			r.logger.Infof("%x no leader at term %d; dropping index reading msg", r.id, r.Term)
+			return
+		}
+		m.To = r.lead
+		r.send(m)
+	case pb.MsgReadIndexResp:
+		if len(m.Entries) != 1 {
+			r.logger.Errorf("%x invalid format of MsgReadIndexResp from %x, entries count: %d", r.id, m.From, len(m.Entries))
+			return
+		}
+		r.readStates = append(r.readStates, ReadState{Index: m.Index, RequestCtx: m.Entries[0].Data})
+	}
+}
+
+func (r *raft) handleAppendEntries(m pb.Message) {
+	if m.Index < r.raftLog.committed {
+		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed})
+		return
+	}
+
+	if mlastIndex, ok := r.raftLog.maybeAppend(m.Index, m.LogTerm, m.Commit, m.Entries...); ok {
+		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: mlastIndex})
+	} else {
+		r.logger.Debugf("%x [logterm: %d, index: %d] rejected msgApp [logterm: %d, index: %d] from %x",
+			r.id, r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(m.Index)), m.Index, m.LogTerm, m.Index, m.From)
+		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: m.Index, Reject: true, RejectHint: r.raftLog.lastIndex()})
+	}
+}
+
+func (r *raft) handleHeartbeat(m pb.Message) {
+	r.raftLog.commitTo(m.Commit)
+	r.send(pb.Message{To: m.From, Type: pb.MsgHeartbeatResp, Context: m.Context})
+}
+
+func (r *raft) handleSnapshot(m pb.Message) {
+	sindex, sterm := m.Snapshot.Metadata.Index, m.Snapshot.Metadata.Term
+	if r.restore(m.Snapshot) {
+		r.logger.Infof("%x [commit: %d] restored snapshot [index: %d, term: %d]",
+			r.id, r.raftLog.committed, sindex, sterm)
+		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.lastIndex()})
+	} else {
+		r.logger.Infof("%x [commit: %d] ignored snapshot [index: %d, term: %d]",
+			r.id, r.raftLog.committed, sindex, sterm)
+		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed})
+	}
+}
+
+// restore recovers the state machine from a snapshot. It restores the log and the
+// configuration of state machine.
+func (r *raft) restore(s pb.Snapshot) bool {
+	if s.Metadata.Index <= r.raftLog.committed {
+		return false
+	}
+	if r.raftLog.matchTerm(s.Metadata.Index, s.Metadata.Term) {
+		r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] fast-forwarded commit to snapshot [index: %d, term: %d]",
+			r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term)
+		r.raftLog.commitTo(s.Metadata.Index)
+		return false
+	}
+
+	// The normal peer can't become learner.
+	if !r.isLearner {
+		for _, id := range s.Metadata.ConfState.Learners {
+			if id == r.id {
+				r.logger.Errorf("%x can't become learner when restores snapshot [index: %d, term: %d]", r.id, s.Metadata.Index, s.Metadata.Term)
+				return false
+			}
+		}
+	}
+
+	r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] starts to restore snapshot [index: %d, term: %d]",
+		r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term)
+
+	r.raftLog.restore(s)
+	r.prs = make(map[uint64]*Progress)
+	r.learnerPrs = make(map[uint64]*Progress)
+	r.restoreNode(s.Metadata.ConfState.Nodes, false)
+	r.restoreNode(s.Metadata.ConfState.Learners, true)
+	return true
+}
+
+func (r *raft) restoreNode(nodes []uint64, isLearner bool) {
+	for _, n := range nodes {
+		match, next := uint64(0), r.raftLog.lastIndex()+1
+		if n == r.id {
+			match = next - 1
+			r.isLearner = isLearner
+		}
+		r.setProgress(n, match, next, isLearner)
+		r.logger.Infof("%x restored progress of %x [%s]", r.id, n, r.getProgress(n))
+	}
+}
+
+// promotable indicates whether state machine can be promoted to leader,
+// which is true when its own id is in progress list.
+func (r *raft) promotable() bool {
+	_, ok := r.prs[r.id]
+	return ok
+}
+
+func (r *raft) addNode(id uint64) {
+	r.addNodeOrLearnerNode(id, false)
+}
+
+func (r *raft) addLearner(id uint64) {
+	r.addNodeOrLearnerNode(id, true)
+}
+
+func (r *raft) addNodeOrLearnerNode(id uint64, isLearner bool) {
+	r.pendingConf = false
+	pr := r.getProgress(id)
+	if pr == nil {
+		r.setProgress(id, 0, r.raftLog.lastIndex()+1, isLearner)
+	} else {
+		if isLearner && !pr.IsLearner {
+			// can only change Learner to Voter
+			r.logger.Infof("%x ignored addLeaner: do not support changing %x from raft peer to learner.", r.id, id)
+			return
+		}
+
+		if isLearner == pr.IsLearner {
+			// Ignore any redundant addNode calls (which can happen because the
+			// initial bootstrapping entries are applied twice).
+			return
+		}
+
+		// change Learner to Voter, use origin Learner progress
+		delete(r.learnerPrs, id)
+		pr.IsLearner = false
+		r.prs[id] = pr
+	}
+
+	if r.id == id {
+		r.isLearner = isLearner
+	}
+
+	// When a node is first added, we should mark it as recently active.
+	// Otherwise, CheckQuorum may cause us to step down if it is invoked
+	// before the added node has a chance to communicate with us.
+	pr = r.getProgress(id)
+	pr.RecentActive = true
+}
+
+func (r *raft) removeNode(id uint64) {
+	r.delProgress(id)
+	r.pendingConf = false
+
+	// do not try to commit or abort transferring if there is no nodes in the cluster.
+	if len(r.prs) == 0 && len(r.learnerPrs) == 0 {
+		return
+	}
+
+	// The quorum size is now smaller, so see if any pending entries can
+	// be committed.
+	if r.maybeCommit() {
+		r.bcastAppend()
+	}
+	// If the removed node is the leadTransferee, then abort the leadership transferring.
+	if r.state == StateLeader && r.leadTransferee == id {
+		r.abortLeaderTransfer()
+	}
+}
+
+func (r *raft) resetPendingConf() { r.pendingConf = false }
+
+func (r *raft) setProgress(id, match, next uint64, isLearner bool) {
+	if !isLearner {
+		delete(r.learnerPrs, id)
+		r.prs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight)}
+		return
+	}
+
+	if _, ok := r.prs[id]; ok {
+		panic(fmt.Sprintf("%x unexpected changing from voter to learner for %x", r.id, id))
+	}
+	r.learnerPrs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight), IsLearner: true}
+}
+
+func (r *raft) delProgress(id uint64) {
+	delete(r.prs, id)
+	delete(r.learnerPrs, id)
+}
+
+func (r *raft) loadState(state pb.HardState) {
+	if state.Commit < r.raftLog.committed || state.Commit > r.raftLog.lastIndex() {
+		r.logger.Panicf("%x state.commit %d is out of range [%d, %d]", r.id, state.Commit, r.raftLog.committed, r.raftLog.lastIndex())
+	}
+	r.raftLog.committed = state.Commit
+	r.Term = state.Term
+	r.Vote = state.Vote
+}
+
+// pastElectionTimeout returns true iff r.electionElapsed is greater
+// than or equal to the randomized election timeout in
+// [electiontimeout, 2 * electiontimeout - 1].
+func (r *raft) pastElectionTimeout() bool {
+	return r.electionElapsed >= r.randomizedElectionTimeout
+}
+
+func (r *raft) resetRandomizedElectionTimeout() {
+	r.randomizedElectionTimeout = r.electionTimeout + globalRand.Intn(r.electionTimeout)
+}
+
+// checkQuorumActive returns true if the quorum is active from
+// the view of the local raft state machine. Otherwise, it returns
+// false.
+// checkQuorumActive also resets all RecentActive to false.
+func (r *raft) checkQuorumActive() bool {
+	var act int
+
+	r.forEachProgress(func(id uint64, pr *Progress) {
+		if id == r.id { // self is always active
+			act++
+			return
+		}
+
+		if pr.RecentActive && !pr.IsLearner {
+			act++
+		}
+
+		pr.RecentActive = false
+	})
+
+	return act >= r.quorum()
+}
+
+func (r *raft) sendTimeoutNow(to uint64) {
+	r.send(pb.Message{To: to, Type: pb.MsgTimeoutNow})
+}
+
+func (r *raft) abortLeaderTransfer() {
+	r.leadTransferee = None
+}
+
+func numOfPendingConf(ents []pb.Entry) int {
+	n := 0
+	for i := range ents {
+		if ents[i].Type == pb.EntryConfChange {
+			n++
+		}
+	}
+	return n
+}
diff --git a/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go b/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go
new file mode 100644
index 0000000..753bd84
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go
@@ -0,0 +1,2365 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: raft.proto
+
+package raftpb
+
+import (
+	fmt "fmt"
+	io "io"
+	math "math"
+	math_bits "math/bits"
+
+	_ "github.com/gogo/protobuf/gogoproto"
+	proto "github.com/golang/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type EntryType int32
+
+const (
+	EntryNormal     EntryType = 0
+	EntryConfChange EntryType = 1
+)
+
+var EntryType_name = map[int32]string{
+	0: "EntryNormal",
+	1: "EntryConfChange",
+}
+
+var EntryType_value = map[string]int32{
+	"EntryNormal":     0,
+	"EntryConfChange": 1,
+}
+
+func (x EntryType) Enum() *EntryType {
+	p := new(EntryType)
+	*p = x
+	return p
+}
+
+func (x EntryType) String() string {
+	return proto.EnumName(EntryType_name, int32(x))
+}
+
+func (x *EntryType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(EntryType_value, data, "EntryType")
+	if err != nil {
+		return err
+	}
+	*x = EntryType(value)
+	return nil
+}
+
+func (EntryType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{0}
+}
+
+type MessageType int32
+
+const (
+	MsgHup            MessageType = 0
+	MsgBeat           MessageType = 1
+	MsgProp           MessageType = 2
+	MsgApp            MessageType = 3
+	MsgAppResp        MessageType = 4
+	MsgVote           MessageType = 5
+	MsgVoteResp       MessageType = 6
+	MsgSnap           MessageType = 7
+	MsgHeartbeat      MessageType = 8
+	MsgHeartbeatResp  MessageType = 9
+	MsgUnreachable    MessageType = 10
+	MsgSnapStatus     MessageType = 11
+	MsgCheckQuorum    MessageType = 12
+	MsgTransferLeader MessageType = 13
+	MsgTimeoutNow     MessageType = 14
+	MsgReadIndex      MessageType = 15
+	MsgReadIndexResp  MessageType = 16
+	MsgPreVote        MessageType = 17
+	MsgPreVoteResp    MessageType = 18
+)
+
+var MessageType_name = map[int32]string{
+	0:  "MsgHup",
+	1:  "MsgBeat",
+	2:  "MsgProp",
+	3:  "MsgApp",
+	4:  "MsgAppResp",
+	5:  "MsgVote",
+	6:  "MsgVoteResp",
+	7:  "MsgSnap",
+	8:  "MsgHeartbeat",
+	9:  "MsgHeartbeatResp",
+	10: "MsgUnreachable",
+	11: "MsgSnapStatus",
+	12: "MsgCheckQuorum",
+	13: "MsgTransferLeader",
+	14: "MsgTimeoutNow",
+	15: "MsgReadIndex",
+	16: "MsgReadIndexResp",
+	17: "MsgPreVote",
+	18: "MsgPreVoteResp",
+}
+
+var MessageType_value = map[string]int32{
+	"MsgHup":            0,
+	"MsgBeat":           1,
+	"MsgProp":           2,
+	"MsgApp":            3,
+	"MsgAppResp":        4,
+	"MsgVote":           5,
+	"MsgVoteResp":       6,
+	"MsgSnap":           7,
+	"MsgHeartbeat":      8,
+	"MsgHeartbeatResp":  9,
+	"MsgUnreachable":    10,
+	"MsgSnapStatus":     11,
+	"MsgCheckQuorum":    12,
+	"MsgTransferLeader": 13,
+	"MsgTimeoutNow":     14,
+	"MsgReadIndex":      15,
+	"MsgReadIndexResp":  16,
+	"MsgPreVote":        17,
+	"MsgPreVoteResp":    18,
+}
+
+func (x MessageType) Enum() *MessageType {
+	p := new(MessageType)
+	*p = x
+	return p
+}
+
+func (x MessageType) String() string {
+	return proto.EnumName(MessageType_name, int32(x))
+}
+
+func (x *MessageType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(MessageType_value, data, "MessageType")
+	if err != nil {
+		return err
+	}
+	*x = MessageType(value)
+	return nil
+}
+
+func (MessageType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{1}
+}
+
+type ConfChangeType int32
+
+const (
+	ConfChangeAddNode        ConfChangeType = 0
+	ConfChangeRemoveNode     ConfChangeType = 1
+	ConfChangeUpdateNode     ConfChangeType = 2
+	ConfChangeAddLearnerNode ConfChangeType = 3
+)
+
+var ConfChangeType_name = map[int32]string{
+	0: "ConfChangeAddNode",
+	1: "ConfChangeRemoveNode",
+	2: "ConfChangeUpdateNode",
+	3: "ConfChangeAddLearnerNode",
+}
+
+var ConfChangeType_value = map[string]int32{
+	"ConfChangeAddNode":        0,
+	"ConfChangeRemoveNode":     1,
+	"ConfChangeUpdateNode":     2,
+	"ConfChangeAddLearnerNode": 3,
+}
+
+func (x ConfChangeType) Enum() *ConfChangeType {
+	p := new(ConfChangeType)
+	*p = x
+	return p
+}
+
+func (x ConfChangeType) String() string {
+	return proto.EnumName(ConfChangeType_name, int32(x))
+}
+
+func (x *ConfChangeType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(ConfChangeType_value, data, "ConfChangeType")
+	if err != nil {
+		return err
+	}
+	*x = ConfChangeType(value)
+	return nil
+}
+
+func (ConfChangeType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{2}
+}
+
+type Entry struct {
+	Term                 uint64    `protobuf:"varint,2,opt,name=Term" json:"Term"`
+	Index                uint64    `protobuf:"varint,3,opt,name=Index" json:"Index"`
+	Type                 EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"`
+	Data                 []byte    `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *Entry) Reset()         { *m = Entry{} }
+func (m *Entry) String() string { return proto.CompactTextString(m) }
+func (*Entry) ProtoMessage()    {}
+func (*Entry) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{0}
+}
+func (m *Entry) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Entry.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Entry) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Entry.Merge(m, src)
+}
+func (m *Entry) XXX_Size() int {
+	return m.Size()
+}
+func (m *Entry) XXX_DiscardUnknown() {
+	xxx_messageInfo_Entry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Entry proto.InternalMessageInfo
+
+type SnapshotMetadata struct {
+	ConfState            ConfState `protobuf:"bytes,1,opt,name=conf_state,json=confState" json:"conf_state"`
+	Index                uint64    `protobuf:"varint,2,opt,name=index" json:"index"`
+	Term                 uint64    `protobuf:"varint,3,opt,name=term" json:"term"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *SnapshotMetadata) Reset()         { *m = SnapshotMetadata{} }
+func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) }
+func (*SnapshotMetadata) ProtoMessage()    {}
+func (*SnapshotMetadata) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{1}
+}
+func (m *SnapshotMetadata) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *SnapshotMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_SnapshotMetadata.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *SnapshotMetadata) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SnapshotMetadata.Merge(m, src)
+}
+func (m *SnapshotMetadata) XXX_Size() int {
+	return m.Size()
+}
+func (m *SnapshotMetadata) XXX_DiscardUnknown() {
+	xxx_messageInfo_SnapshotMetadata.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SnapshotMetadata proto.InternalMessageInfo
+
+type Snapshot struct {
+	Data                 []byte           `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"`
+	Metadata             SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *Snapshot) Reset()         { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage()    {}
+func (*Snapshot) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{2}
+}
+func (m *Snapshot) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Snapshot) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Snapshot.Merge(m, src)
+}
+func (m *Snapshot) XXX_Size() int {
+	return m.Size()
+}
+func (m *Snapshot) XXX_DiscardUnknown() {
+	xxx_messageInfo_Snapshot.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Snapshot proto.InternalMessageInfo
+
+type Message struct {
+	Type                 MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"`
+	To                   uint64      `protobuf:"varint,2,opt,name=to" json:"to"`
+	From                 uint64      `protobuf:"varint,3,opt,name=from" json:"from"`
+	Term                 uint64      `protobuf:"varint,4,opt,name=term" json:"term"`
+	LogTerm              uint64      `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"`
+	Index                uint64      `protobuf:"varint,6,opt,name=index" json:"index"`
+	Entries              []Entry     `protobuf:"bytes,7,rep,name=entries" json:"entries"`
+	Commit               uint64      `protobuf:"varint,8,opt,name=commit" json:"commit"`
+	Snapshot             Snapshot    `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot"`
+	Reject               bool        `protobuf:"varint,10,opt,name=reject" json:"reject"`
+	RejectHint           uint64      `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"`
+	Context              []byte      `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
+}
+
+func (m *Message) Reset()         { *m = Message{} }
+func (m *Message) String() string { return proto.CompactTextString(m) }
+func (*Message) ProtoMessage()    {}
+func (*Message) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{3}
+}
+func (m *Message) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Message.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Message) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Message.Merge(m, src)
+}
+func (m *Message) XXX_Size() int {
+	return m.Size()
+}
+func (m *Message) XXX_DiscardUnknown() {
+	xxx_messageInfo_Message.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Message proto.InternalMessageInfo
+
+type HardState struct {
+	Term                 uint64   `protobuf:"varint,1,opt,name=term" json:"term"`
+	Vote                 uint64   `protobuf:"varint,2,opt,name=vote" json:"vote"`
+	Commit               uint64   `protobuf:"varint,3,opt,name=commit" json:"commit"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *HardState) Reset()         { *m = HardState{} }
+func (m *HardState) String() string { return proto.CompactTextString(m) }
+func (*HardState) ProtoMessage()    {}
+func (*HardState) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{4}
+}
+func (m *HardState) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *HardState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_HardState.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *HardState) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HardState.Merge(m, src)
+}
+func (m *HardState) XXX_Size() int {
+	return m.Size()
+}
+func (m *HardState) XXX_DiscardUnknown() {
+	xxx_messageInfo_HardState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HardState proto.InternalMessageInfo
+
+type ConfState struct {
+	Nodes                []uint64 `protobuf:"varint,1,rep,name=nodes" json:"nodes,omitempty"`
+	Learners             []uint64 `protobuf:"varint,2,rep,name=learners" json:"learners,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ConfState) Reset()         { *m = ConfState{} }
+func (m *ConfState) String() string { return proto.CompactTextString(m) }
+func (*ConfState) ProtoMessage()    {}
+func (*ConfState) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{5}
+}
+func (m *ConfState) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ConfState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ConfState.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ConfState) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ConfState.Merge(m, src)
+}
+func (m *ConfState) XXX_Size() int {
+	return m.Size()
+}
+func (m *ConfState) XXX_DiscardUnknown() {
+	xxx_messageInfo_ConfState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfState proto.InternalMessageInfo
+
+type ConfChange struct {
+	ID                   uint64         `protobuf:"varint,1,opt,name=ID" json:"ID"`
+	Type                 ConfChangeType `protobuf:"varint,2,opt,name=Type,enum=raftpb.ConfChangeType" json:"Type"`
+	NodeID               uint64         `protobuf:"varint,3,opt,name=NodeID" json:"NodeID"`
+	Context              []byte         `protobuf:"bytes,4,opt,name=Context" json:"Context,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
+	XXX_unrecognized     []byte         `json:"-"`
+	XXX_sizecache        int32          `json:"-"`
+}
+
+func (m *ConfChange) Reset()         { *m = ConfChange{} }
+func (m *ConfChange) String() string { return proto.CompactTextString(m) }
+func (*ConfChange) ProtoMessage()    {}
+func (*ConfChange) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{6}
+}
+func (m *ConfChange) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ConfChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ConfChange.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ConfChange) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ConfChange.Merge(m, src)
+}
+func (m *ConfChange) XXX_Size() int {
+	return m.Size()
+}
+func (m *ConfChange) XXX_DiscardUnknown() {
+	xxx_messageInfo_ConfChange.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfChange proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value)
+	proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value)
+	proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value)
+	proto.RegisterType((*Entry)(nil), "raftpb.Entry")
+	proto.RegisterType((*SnapshotMetadata)(nil), "raftpb.SnapshotMetadata")
+	proto.RegisterType((*Snapshot)(nil), "raftpb.Snapshot")
+	proto.RegisterType((*Message)(nil), "raftpb.Message")
+	proto.RegisterType((*HardState)(nil), "raftpb.HardState")
+	proto.RegisterType((*ConfState)(nil), "raftpb.ConfState")
+	proto.RegisterType((*ConfChange)(nil), "raftpb.ConfChange")
+}
+
+func init() { proto.RegisterFile("raft.proto", fileDescriptor_b042552c306ae59b) }
+
+var fileDescriptor_b042552c306ae59b = []byte{
+	// 816 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x54, 0xcd, 0x6e, 0x23, 0x45,
+	0x10, 0xf6, 0x8c, 0xc7, 0x7f, 0x35, 0x8e, 0xd3, 0xa9, 0x35, 0xa8, 0x15, 0x45, 0xc6, 0xb2, 0x38,
+	0x58, 0x41, 0x1b, 0x20, 0x07, 0x0e, 0x48, 0x1c, 0x36, 0x09, 0x52, 0x22, 0xad, 0xa3, 0xc5, 0x9b,
+	0xe5, 0x80, 0x84, 0x50, 0xc7, 0x53, 0x9e, 0x18, 0x32, 0xd3, 0xa3, 0x9e, 0xf6, 0xb2, 0xb9, 0x20,
+	0x1e, 0x80, 0x07, 0xe0, 0xc2, 0xfb, 0xe4, 0xb8, 0x12, 0x77, 0xc4, 0x86, 0x17, 0x41, 0xdd, 0xd3,
+	0x63, 0xcf, 0x24, 0xb7, 0xae, 0xaf, 0x6a, 0xbe, 0xfa, 0xbe, 0xea, 0xea, 0x01, 0x50, 0x62, 0xa9,
+	0x8f, 0x32, 0x25, 0xb5, 0xc4, 0xb6, 0x39, 0x67, 0xd7, 0xfb, 0xc3, 0x58, 0xc6, 0xd2, 0x42, 0x9f,
+	0x9b, 0x53, 0x91, 0x9d, 0xfc, 0x06, 0xad, 0x6f, 0x53, 0xad, 0xee, 0x90, 0x43, 0x70, 0x45, 0x2a,
+	0xe1, 0xfe, 0xd8, 0x9b, 0x06, 0x27, 0xc1, 0xfd, 0x3f, 0x9f, 0x34, 0xe6, 0x16, 0xc1, 0x7d, 0x68,
+	0x5d, 0xa4, 0x11, 0xbd, 0xe3, 0xcd, 0x4a, 0xaa, 0x80, 0xf0, 0x33, 0x08, 0xae, 0xee, 0x32, 0xe2,
+	0xde, 0xd8, 0x9b, 0x0e, 0x8e, 0xf7, 0x8e, 0x8a, 0x5e, 0x47, 0x96, 0xd2, 0x24, 0x36, 0x44, 0x77,
+	0x19, 0x21, 0x42, 0x70, 0x26, 0xb4, 0xe0, 0xc1, 0xd8, 0x9b, 0xf6, 0xe7, 0xf6, 0x3c, 0xf9, 0xdd,
+	0x03, 0xf6, 0x3a, 0x15, 0x59, 0x7e, 0x23, 0xf5, 0x8c, 0xb4, 0x88, 0x84, 0x16, 0xf8, 0x15, 0xc0,
+	0x42, 0xa6, 0xcb, 0x9f, 0x72, 0x2d, 0x74, 0xc1, 0x1d, 0x6e, 0xb9, 0x4f, 0x65, 0xba, 0x7c, 0x6d,
+	0x12, 0x8e, 0xbb, 0xb7, 0x28, 0x01, 0xa3, 0x74, 0x65, 0x95, 0x56, 0x4d, 0x14, 0x90, 0xf1, 0xa7,
+	0x8d, 0xbf, 0xaa, 0x09, 0x8b, 0x4c, 0x7e, 0x80, 0x6e, 0xa9, 0xc0, 0x48, 0x34, 0x0a, 0x6c, 0xcf,
+	0xfe, 0xdc, 0x9e, 0xf1, 0x6b, 0xe8, 0x26, 0x4e, 0x99, 0x25, 0x0e, 0x8f, 0x79, 0xa9, 0xe5, 0xb1,
+	0x72, 0xc7, 0xbb, 0xa9, 0x9f, 0xfc, 0xd5, 0x84, 0xce, 0x8c, 0xf2, 0x5c, 0xc4, 0x84, 0xcf, 0x21,
+	0xd0, 0xdb, 0x59, 0x3d, 0x2b, 0x39, 0x5c, 0xba, 0x3a, 0x2d, 0x53, 0x86, 0x43, 0xf0, 0xb5, 0xac,
+	0x39, 0xf1, 0xb5, 0x34, 0x36, 0x96, 0x4a, 0x3e, 0xb2, 0x61, 0x90, 0x8d, 0xc1, 0xe0, 0xb1, 0x41,
+	0x1c, 0x41, 0xe7, 0x56, 0xc6, 0xf6, 0x76, 0x5b, 0x95, 0x64, 0x09, 0x6e, 0xc7, 0xd6, 0x7e, 0x3a,
+	0xb6, 0xe7, 0xd0, 0xa1, 0x54, 0xab, 0x15, 0xe5, 0xbc, 0x33, 0x6e, 0x4e, 0xc3, 0xe3, 0x9d, 0xda,
+	0x1d, 0x97, 0x54, 0xae, 0x06, 0x0f, 0xa0, 0xbd, 0x90, 0x49, 0xb2, 0xd2, 0xbc, 0x5b, 0xe1, 0x72,
+	0x18, 0x1e, 0x43, 0x37, 0x77, 0x13, 0xe3, 0x3d, 0x3b, 0x49, 0xf6, 0x78, 0x92, 0xe5, 0x04, 0xcb,
+	0x3a, 0xc3, 0xa8, 0xe8, 0x67, 0x5a, 0x68, 0x0e, 0x63, 0x6f, 0xda, 0x2d, 0x19, 0x0b, 0x0c, 0x3f,
+	0x05, 0x28, 0x4e, 0xe7, 0xab, 0x54, 0xf3, 0xb0, 0xd2, 0xb3, 0x82, 0x23, 0x87, 0xce, 0x42, 0xa6,
+	0x9a, 0xde, 0x69, 0xde, 0xb7, 0x17, 0x5b, 0x86, 0x93, 0x1f, 0xa1, 0x77, 0x2e, 0x54, 0x54, 0xac,
+	0x4f, 0x39, 0x41, 0xef, 0xc9, 0x04, 0x39, 0x04, 0x6f, 0xa5, 0xa6, 0xfa, 0xe3, 0x30, 0x48, 0xc5,
+	0x70, 0xf3, 0xa9, 0xe1, 0xc9, 0x37, 0xd0, 0xdb, 0xac, 0x2b, 0x0e, 0xa1, 0x95, 0xca, 0x88, 0x72,
+	0xee, 0x8d, 0x9b, 0xd3, 0x60, 0x5e, 0x04, 0xb8, 0x0f, 0xdd, 0x5b, 0x12, 0x2a, 0x25, 0x95, 0x73,
+	0xdf, 0x26, 0x36, 0xf1, 0xe4, 0x0f, 0x0f, 0xc0, 0x7c, 0x7f, 0x7a, 0x23, 0xd2, 0xd8, 0x6e, 0xc4,
+	0xc5, 0x59, 0x4d, 0x9d, 0x7f, 0x71, 0x86, 0x5f, 0xb8, 0x27, 0xe8, 0xdb, 0xb5, 0xfa, 0xb8, 0xfa,
+	0x4c, 0x8a, 0xef, 0x9e, 0xbc, 0xc3, 0x03, 0x68, 0x5f, 0xca, 0x88, 0x2e, 0xce, 0xea, 0x9a, 0x0b,
+	0xcc, 0x0c, 0xeb, 0xd4, 0x0d, 0xab, 0x78, 0xa8, 0x65, 0x78, 0xf8, 0x25, 0xf4, 0x36, 0x0f, 0x1b,
+	0x77, 0x21, 0xb4, 0xc1, 0xa5, 0x54, 0x89, 0xb8, 0x65, 0x0d, 0x7c, 0x06, 0xbb, 0x16, 0xd8, 0x36,
+	0x66, 0xde, 0xe1, 0xdf, 0x3e, 0x84, 0x95, 0x05, 0x47, 0x80, 0xf6, 0x2c, 0x8f, 0xcf, 0xd7, 0x19,
+	0x6b, 0x60, 0x08, 0x9d, 0x59, 0x1e, 0x9f, 0x90, 0xd0, 0xcc, 0x73, 0xc1, 0x2b, 0x25, 0x33, 0xe6,
+	0xbb, 0xaa, 0x17, 0x59, 0xc6, 0x9a, 0x38, 0x00, 0x28, 0xce, 0x73, 0xca, 0x33, 0x16, 0xb8, 0xc2,
+	0xef, 0xa5, 0x26, 0xd6, 0x32, 0x22, 0x5c, 0x60, 0xb3, 0x6d, 0x97, 0x35, 0xcb, 0xc4, 0x3a, 0xc8,
+	0xa0, 0x6f, 0x9a, 0x91, 0x50, 0xfa, 0xda, 0x74, 0xe9, 0xe2, 0x10, 0x58, 0x15, 0xb1, 0x1f, 0xf5,
+	0x10, 0x61, 0x30, 0xcb, 0xe3, 0x37, 0xa9, 0x22, 0xb1, 0xb8, 0x11, 0xd7, 0xb7, 0xc4, 0x00, 0xf7,
+	0x60, 0xc7, 0x11, 0x99, 0xcb, 0x5b, 0xe7, 0x2c, 0x74, 0x65, 0xa7, 0x37, 0xb4, 0xf8, 0xe5, 0xbb,
+	0xb5, 0x54, 0xeb, 0x84, 0xf5, 0xf1, 0x23, 0xd8, 0x9b, 0xe5, 0xf1, 0x95, 0x12, 0x69, 0xbe, 0x24,
+	0xf5, 0x92, 0x44, 0x44, 0x8a, 0xed, 0xb8, 0xaf, 0xaf, 0x56, 0x09, 0xc9, 0xb5, 0xbe, 0x94, 0xbf,
+	0xb2, 0x81, 0x13, 0x33, 0x27, 0x11, 0xd9, 0x3f, 0x27, 0xdb, 0x75, 0x62, 0x36, 0x88, 0x15, 0xc3,
+	0x9c, 0xdf, 0x57, 0x8a, 0xac, 0xc5, 0x3d, 0xd7, 0xd5, 0xc5, 0xb6, 0x06, 0x0f, 0xef, 0x60, 0x50,
+	0xbf, 0x5e, 0xa3, 0x63, 0x8b, 0xbc, 0x88, 0x22, 0x73, 0x97, 0xac, 0x81, 0x1c, 0x86, 0x5b, 0x78,
+	0x4e, 0x89, 0x7c, 0x4b, 0x36, 0xe3, 0xd5, 0x33, 0x6f, 0xb2, 0x48, 0xe8, 0x22, 0xe3, 0xe3, 0x01,
+	0xf0, 0x1a, 0xd5, 0xcb, 0x62, 0x1b, 0x6d, 0xb6, 0x79, 0xc2, 0xef, 0x3f, 0x8c, 0x1a, 0xef, 0x3f,
+	0x8c, 0x1a, 0xf7, 0x0f, 0x23, 0xef, 0xfd, 0xc3, 0xc8, 0xfb, 0xf7, 0x61, 0xe4, 0xfd, 0xf9, 0xdf,
+	0xa8, 0xf1, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x30, 0xe1, 0x02, 0x69, 0x74, 0x06, 0x00, 0x00,
+}
+
+func (m *Entry) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Entry) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Entry) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Data != nil {
+		i -= len(m.Data)
+		copy(dAtA[i:], m.Data)
+		i = encodeVarintRaft(dAtA, i, uint64(len(m.Data)))
+		i--
+		dAtA[i] = 0x22
+	}
+	i = encodeVarintRaft(dAtA, i, uint64(m.Index))
+	i--
+	dAtA[i] = 0x18
+	i = encodeVarintRaft(dAtA, i, uint64(m.Term))
+	i--
+	dAtA[i] = 0x10
+	i = encodeVarintRaft(dAtA, i, uint64(m.Type))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *SnapshotMetadata) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SnapshotMetadata) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SnapshotMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	i = encodeVarintRaft(dAtA, i, uint64(m.Term))
+	i--
+	dAtA[i] = 0x18
+	i = encodeVarintRaft(dAtA, i, uint64(m.Index))
+	i--
+	dAtA[i] = 0x10
+	{
+		size, err := m.ConfState.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintRaft(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *Snapshot) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	{
+		size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintRaft(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	if m.Data != nil {
+		i -= len(m.Data)
+		copy(dAtA[i:], m.Data)
+		i = encodeVarintRaft(dAtA, i, uint64(len(m.Data)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Message) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Message) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Context != nil {
+		i -= len(m.Context)
+		copy(dAtA[i:], m.Context)
+		i = encodeVarintRaft(dAtA, i, uint64(len(m.Context)))
+		i--
+		dAtA[i] = 0x62
+	}
+	i = encodeVarintRaft(dAtA, i, uint64(m.RejectHint))
+	i--
+	dAtA[i] = 0x58
+	i--
+	if m.Reject {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x50
+	{
+		size, err := m.Snapshot.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintRaft(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x4a
+	i = encodeVarintRaft(dAtA, i, uint64(m.Commit))
+	i--
+	dAtA[i] = 0x40
+	if len(m.Entries) > 0 {
+		for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRaft(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x3a
+		}
+	}
+	i = encodeVarintRaft(dAtA, i, uint64(m.Index))
+	i--
+	dAtA[i] = 0x30
+	i = encodeVarintRaft(dAtA, i, uint64(m.LogTerm))
+	i--
+	dAtA[i] = 0x28
+	i = encodeVarintRaft(dAtA, i, uint64(m.Term))
+	i--
+	dAtA[i] = 0x20
+	i = encodeVarintRaft(dAtA, i, uint64(m.From))
+	i--
+	dAtA[i] = 0x18
+	i = encodeVarintRaft(dAtA, i, uint64(m.To))
+	i--
+	dAtA[i] = 0x10
+	i = encodeVarintRaft(dAtA, i, uint64(m.Type))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *HardState) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *HardState) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HardState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	i = encodeVarintRaft(dAtA, i, uint64(m.Commit))
+	i--
+	dAtA[i] = 0x18
+	i = encodeVarintRaft(dAtA, i, uint64(m.Vote))
+	i--
+	dAtA[i] = 0x10
+	i = encodeVarintRaft(dAtA, i, uint64(m.Term))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *ConfState) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ConfState) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConfState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Learners) > 0 {
+		for iNdEx := len(m.Learners) - 1; iNdEx >= 0; iNdEx-- {
+			i = encodeVarintRaft(dAtA, i, uint64(m.Learners[iNdEx]))
+			i--
+			dAtA[i] = 0x10
+		}
+	}
+	if len(m.Nodes) > 0 {
+		for iNdEx := len(m.Nodes) - 1; iNdEx >= 0; iNdEx-- {
+			i = encodeVarintRaft(dAtA, i, uint64(m.Nodes[iNdEx]))
+			i--
+			dAtA[i] = 0x8
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ConfChange) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ConfChange) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConfChange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Context != nil {
+		i -= len(m.Context)
+		copy(dAtA[i:], m.Context)
+		i = encodeVarintRaft(dAtA, i, uint64(len(m.Context)))
+		i--
+		dAtA[i] = 0x22
+	}
+	i = encodeVarintRaft(dAtA, i, uint64(m.NodeID))
+	i--
+	dAtA[i] = 0x18
+	i = encodeVarintRaft(dAtA, i, uint64(m.Type))
+	i--
+	dAtA[i] = 0x10
+	i = encodeVarintRaft(dAtA, i, uint64(m.ID))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintRaft(dAtA []byte, offset int, v uint64) int {
+	offset -= sovRaft(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *Entry) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovRaft(uint64(m.Type))
+	n += 1 + sovRaft(uint64(m.Term))
+	n += 1 + sovRaft(uint64(m.Index))
+	if m.Data != nil {
+		l = len(m.Data)
+		n += 1 + l + sovRaft(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *SnapshotMetadata) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ConfState.Size()
+	n += 1 + l + sovRaft(uint64(l))
+	n += 1 + sovRaft(uint64(m.Index))
+	n += 1 + sovRaft(uint64(m.Term))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *Snapshot) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Data != nil {
+		l = len(m.Data)
+		n += 1 + l + sovRaft(uint64(l))
+	}
+	l = m.Metadata.Size()
+	n += 1 + l + sovRaft(uint64(l))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *Message) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovRaft(uint64(m.Type))
+	n += 1 + sovRaft(uint64(m.To))
+	n += 1 + sovRaft(uint64(m.From))
+	n += 1 + sovRaft(uint64(m.Term))
+	n += 1 + sovRaft(uint64(m.LogTerm))
+	n += 1 + sovRaft(uint64(m.Index))
+	if len(m.Entries) > 0 {
+		for _, e := range m.Entries {
+			l = e.Size()
+			n += 1 + l + sovRaft(uint64(l))
+		}
+	}
+	n += 1 + sovRaft(uint64(m.Commit))
+	l = m.Snapshot.Size()
+	n += 1 + l + sovRaft(uint64(l))
+	n += 2
+	n += 1 + sovRaft(uint64(m.RejectHint))
+	if m.Context != nil {
+		l = len(m.Context)
+		n += 1 + l + sovRaft(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *HardState) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovRaft(uint64(m.Term))
+	n += 1 + sovRaft(uint64(m.Vote))
+	n += 1 + sovRaft(uint64(m.Commit))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *ConfState) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Nodes) > 0 {
+		for _, e := range m.Nodes {
+			n += 1 + sovRaft(uint64(e))
+		}
+	}
+	if len(m.Learners) > 0 {
+		for _, e := range m.Learners {
+			n += 1 + sovRaft(uint64(e))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *ConfChange) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovRaft(uint64(m.ID))
+	n += 1 + sovRaft(uint64(m.Type))
+	n += 1 + sovRaft(uint64(m.NodeID))
+	if m.Context != nil {
+		l = len(m.Context)
+		n += 1 + l + sovRaft(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func sovRaft(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozRaft(x uint64) (n int) {
+	return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Entry) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Entry: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			m.Type = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Type |= EntryType(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+			}
+			m.Term = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Term |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
+			}
+			m.Index = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Index |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+			if m.Data == nil {
+				m.Data = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SnapshotMetadata: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SnapshotMetadata: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ConfState", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ConfState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
+			}
+			m.Index = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Index |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+			}
+			m.Term = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Term |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Snapshot) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Snapshot: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+			if m.Data == nil {
+				m.Data = []byte{}
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Message) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Message: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			m.Type = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Type |= MessageType(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field To", wireType)
+			}
+			m.To = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.To |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
+			}
+			m.From = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.From |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+			}
+			m.Term = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Term |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LogTerm", wireType)
+			}
+			m.LogTerm = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.LogTerm |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
+			}
+			m.Index = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Index |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Entries = append(m.Entries, Entry{})
+			if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType)
+			}
+			m.Commit = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Commit |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Snapshot.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 10:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Reject", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Reject = bool(v != 0)
+		case 11:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RejectHint", wireType)
+			}
+			m.RejectHint = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.RejectHint |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 12:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...)
+			if m.Context == nil {
+				m.Context = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *HardState) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: HardState: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+			}
+			m.Term = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Term |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType)
+			}
+			m.Vote = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Vote |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType)
+			}
+			m.Commit = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Commit |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ConfState) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ConfState: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType == 0 {
+				var v uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowRaft
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					v |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				m.Nodes = append(m.Nodes, v)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowRaft
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= int(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLengthRaft
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex < 0 {
+					return ErrInvalidLengthRaft
+				}
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				var elementCount int
+				var count int
+				for _, integer := range dAtA[iNdEx:postIndex] {
+					if integer < 128 {
+						count++
+					}
+				}
+				elementCount = count
+				if elementCount != 0 && len(m.Nodes) == 0 {
+					m.Nodes = make([]uint64, 0, elementCount)
+				}
+				for iNdEx < postIndex {
+					var v uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowRaft
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						v |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					m.Nodes = append(m.Nodes, v)
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType)
+			}
+		case 2:
+			if wireType == 0 {
+				var v uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowRaft
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					v |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				m.Learners = append(m.Learners, v)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowRaft
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= int(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLengthRaft
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex < 0 {
+					return ErrInvalidLengthRaft
+				}
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				var elementCount int
+				var count int
+				for _, integer := range dAtA[iNdEx:postIndex] {
+					if integer < 128 {
+						count++
+					}
+				}
+				elementCount = count
+				if elementCount != 0 && len(m.Learners) == 0 {
+					m.Learners = make([]uint64, 0, elementCount)
+				}
+				for iNdEx < postIndex {
+					var v uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowRaft
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						v |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					m.Learners = append(m.Learners, v)
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field Learners", wireType)
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ConfChange) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ConfChange: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			m.Type = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Type |= ConfChangeType(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
+			}
+			m.NodeID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.NodeID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...)
+			if m.Context == nil {
+				m.Context = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipRaft(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthRaft
+			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthRaft
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowRaft
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipRaft(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthRaft
+				}
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowRaft   = fmt.Errorf("proto: integer overflow")
+)
diff --git a/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto b/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto
new file mode 100644
index 0000000..644ce7b
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto
@@ -0,0 +1,95 @@
+syntax = "proto2";
+package raftpb;
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+option (gogoproto.goproto_enum_prefix_all) = false;
+
+enum EntryType {
+	EntryNormal     = 0;
+	EntryConfChange = 1;
+}
+
+message Entry {
+	optional uint64     Term  = 2 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
+	optional uint64     Index = 3 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
+	optional EntryType  Type  = 1 [(gogoproto.nullable) = false];
+	optional bytes      Data  = 4;
+}
+
+message SnapshotMetadata {
+	optional ConfState conf_state = 1 [(gogoproto.nullable) = false];
+	optional uint64    index      = 2 [(gogoproto.nullable) = false];
+	optional uint64    term       = 3 [(gogoproto.nullable) = false];
+}
+
+message Snapshot {
+	optional bytes            data     = 1;
+	optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false];
+}
+
+enum MessageType {
+	MsgHup             = 0;
+	MsgBeat            = 1;
+	MsgProp            = 2;
+	MsgApp             = 3;
+	MsgAppResp         = 4;
+	MsgVote            = 5;
+	MsgVoteResp        = 6;
+	MsgSnap            = 7;
+	MsgHeartbeat       = 8;
+	MsgHeartbeatResp   = 9;
+	MsgUnreachable     = 10;
+	MsgSnapStatus      = 11;
+	MsgCheckQuorum     = 12;
+	MsgTransferLeader  = 13;
+	MsgTimeoutNow      = 14;
+	MsgReadIndex       = 15;
+	MsgReadIndexResp   = 16;
+	MsgPreVote         = 17;
+	MsgPreVoteResp     = 18;
+}
+
+message Message {
+	optional MessageType type        = 1  [(gogoproto.nullable) = false];
+	optional uint64      to          = 2  [(gogoproto.nullable) = false];
+	optional uint64      from        = 3  [(gogoproto.nullable) = false];
+	optional uint64      term        = 4  [(gogoproto.nullable) = false];
+	optional uint64      logTerm     = 5  [(gogoproto.nullable) = false];
+	optional uint64      index       = 6  [(gogoproto.nullable) = false];
+	repeated Entry       entries     = 7  [(gogoproto.nullable) = false];
+	optional uint64      commit      = 8  [(gogoproto.nullable) = false];
+	optional Snapshot    snapshot    = 9  [(gogoproto.nullable) = false];
+	optional bool        reject      = 10 [(gogoproto.nullable) = false];
+	optional uint64      rejectHint  = 11 [(gogoproto.nullable) = false];
+	optional bytes       context     = 12;
+}
+
+message HardState {
+	optional uint64 term   = 1 [(gogoproto.nullable) = false];
+	optional uint64 vote   = 2 [(gogoproto.nullable) = false];
+	optional uint64 commit = 3 [(gogoproto.nullable) = false];
+}
+
+message ConfState {
+	repeated uint64 nodes    = 1;
+	repeated uint64 learners = 2;
+}
+
+enum ConfChangeType {
+	ConfChangeAddNode        = 0;
+	ConfChangeRemoveNode     = 1;
+	ConfChangeUpdateNode     = 2;
+	ConfChangeAddLearnerNode = 3;
+}
+
+message ConfChange {
+	optional uint64          ID      = 1 [(gogoproto.nullable) = false];
+	optional ConfChangeType  Type    = 2 [(gogoproto.nullable) = false];
+	optional uint64          NodeID  = 3 [(gogoproto.nullable) = false];
+	optional bytes           Context = 4;
+}
diff --git a/vendor/github.com/coreos/etcd/raft/rawnode.go b/vendor/github.com/coreos/etcd/raft/rawnode.go
new file mode 100644
index 0000000..925cb85
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/rawnode.go
@@ -0,0 +1,266 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"errors"
+
+	pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+// ErrStepLocalMsg is returned when try to step a local raft message
+var ErrStepLocalMsg = errors.New("raft: cannot step raft local message")
+
+// ErrStepPeerNotFound is returned when try to step a response message
+// but there is no peer found in raft.prs for that node.
+var ErrStepPeerNotFound = errors.New("raft: cannot step as peer not found")
+
+// RawNode is a thread-unsafe Node.
+// The methods of this struct correspond to the methods of Node and are described
+// more fully there.
+type RawNode struct {
+	raft       *raft
+	prevSoftSt *SoftState
+	prevHardSt pb.HardState
+}
+
+func (rn *RawNode) newReady() Ready {
+	return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt)
+}
+
+func (rn *RawNode) commitReady(rd Ready) {
+	if rd.SoftState != nil {
+		rn.prevSoftSt = rd.SoftState
+	}
+	if !IsEmptyHardState(rd.HardState) {
+		rn.prevHardSt = rd.HardState
+	}
+	if rn.prevHardSt.Commit != 0 {
+		// In most cases, prevHardSt and rd.HardState will be the same
+		// because when there are new entries to apply we just sent a
+		// HardState with an updated Commit value. However, on initial
+		// startup the two are different because we don't send a HardState
+		// until something changes, but we do send any un-applied but
+		// committed entries (and previously-committed entries may be
+		// incorporated into the snapshot, even if rd.CommittedEntries is
+		// empty). Therefore we mark all committed entries as applied
+		// whether they were included in rd.HardState or not.
+		rn.raft.raftLog.appliedTo(rn.prevHardSt.Commit)
+	}
+	if len(rd.Entries) > 0 {
+		e := rd.Entries[len(rd.Entries)-1]
+		rn.raft.raftLog.stableTo(e.Index, e.Term)
+	}
+	if !IsEmptySnap(rd.Snapshot) {
+		rn.raft.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index)
+	}
+	if len(rd.ReadStates) != 0 {
+		rn.raft.readStates = nil
+	}
+}
+
+// NewRawNode returns a new RawNode given configuration and a list of raft peers.
+func NewRawNode(config *Config, peers []Peer) (*RawNode, error) {
+	if config.ID == 0 {
+		panic("config.ID must not be zero")
+	}
+	r := newRaft(config)
+	rn := &RawNode{
+		raft: r,
+	}
+	lastIndex, err := config.Storage.LastIndex()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+	// If the log is empty, this is a new RawNode (like StartNode); otherwise it's
+	// restoring an existing RawNode (like RestartNode).
+	// TODO(bdarnell): rethink RawNode initialization and whether the application needs
+	// to be able to tell us when it expects the RawNode to exist.
+	if lastIndex == 0 {
+		r.becomeFollower(1, None)
+		ents := make([]pb.Entry, len(peers))
+		for i, peer := range peers {
+			cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context}
+			data, err := cc.Marshal()
+			if err != nil {
+				panic("unexpected marshal error")
+			}
+
+			ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data}
+		}
+		r.raftLog.append(ents...)
+		r.raftLog.committed = uint64(len(ents))
+		for _, peer := range peers {
+			r.addNode(peer.ID)
+		}
+	}
+
+	// Set the initial hard and soft states after performing all initialization.
+	rn.prevSoftSt = r.softState()
+	if lastIndex == 0 {
+		rn.prevHardSt = emptyState
+	} else {
+		rn.prevHardSt = r.hardState()
+	}
+
+	return rn, nil
+}
+
+// Tick advances the internal logical clock by a single tick.
+func (rn *RawNode) Tick() {
+	rn.raft.tick()
+}
+
+// TickQuiesced advances the internal logical clock by a single tick without
+// performing any other state machine processing. It allows the caller to avoid
+// periodic heartbeats and elections when all of the peers in a Raft group are
+// known to be at the same state. Expected usage is to periodically invoke Tick
+// or TickQuiesced depending on whether the group is "active" or "quiesced".
+//
+// WARNING: Be very careful about using this method as it subverts the Raft
+// state machine. You should probably be using Tick instead.
+func (rn *RawNode) TickQuiesced() {
+	rn.raft.electionElapsed++
+}
+
+// Campaign causes this RawNode to transition to candidate state.
+func (rn *RawNode) Campaign() error {
+	return rn.raft.Step(pb.Message{
+		Type: pb.MsgHup,
+	})
+}
+
+// Propose proposes data be appended to the raft log.
+func (rn *RawNode) Propose(data []byte) error {
+	return rn.raft.Step(pb.Message{
+		Type: pb.MsgProp,
+		From: rn.raft.id,
+		Entries: []pb.Entry{
+			{Data: data},
+		}})
+}
+
+// ProposeConfChange proposes a config change.
+func (rn *RawNode) ProposeConfChange(cc pb.ConfChange) error {
+	data, err := cc.Marshal()
+	if err != nil {
+		return err
+	}
+	return rn.raft.Step(pb.Message{
+		Type: pb.MsgProp,
+		Entries: []pb.Entry{
+			{Type: pb.EntryConfChange, Data: data},
+		},
+	})
+}
+
+// ApplyConfChange applies a config change to the local node.
+func (rn *RawNode) ApplyConfChange(cc pb.ConfChange) *pb.ConfState {
+	if cc.NodeID == None {
+		rn.raft.resetPendingConf()
+		return &pb.ConfState{Nodes: rn.raft.nodes()}
+	}
+	switch cc.Type {
+	case pb.ConfChangeAddNode:
+		rn.raft.addNode(cc.NodeID)
+	case pb.ConfChangeAddLearnerNode:
+		rn.raft.addLearner(cc.NodeID)
+	case pb.ConfChangeRemoveNode:
+		rn.raft.removeNode(cc.NodeID)
+	case pb.ConfChangeUpdateNode:
+		rn.raft.resetPendingConf()
+	default:
+		panic("unexpected conf type")
+	}
+	return &pb.ConfState{Nodes: rn.raft.nodes()}
+}
+
+// Step advances the state machine using the given message.
+func (rn *RawNode) Step(m pb.Message) error {
+	// ignore unexpected local messages receiving over network
+	if IsLocalMsg(m.Type) {
+		return ErrStepLocalMsg
+	}
+	if pr := rn.raft.getProgress(m.From); pr != nil || !IsResponseMsg(m.Type) {
+		return rn.raft.Step(m)
+	}
+	return ErrStepPeerNotFound
+}
+
+// Ready returns the current point-in-time state of this RawNode.
+func (rn *RawNode) Ready() Ready {
+	rd := rn.newReady()
+	rn.raft.msgs = nil
+	return rd
+}
+
+// HasReady called when RawNode user need to check if any Ready pending.
+// Checking logic in this method should be consistent with Ready.containsUpdates().
+func (rn *RawNode) HasReady() bool {
+	r := rn.raft
+	if !r.softState().equal(rn.prevSoftSt) {
+		return true
+	}
+	if hardSt := r.hardState(); !IsEmptyHardState(hardSt) && !isHardStateEqual(hardSt, rn.prevHardSt) {
+		return true
+	}
+	if r.raftLog.unstable.snapshot != nil && !IsEmptySnap(*r.raftLog.unstable.snapshot) {
+		return true
+	}
+	if len(r.msgs) > 0 || len(r.raftLog.unstableEntries()) > 0 || r.raftLog.hasNextEnts() {
+		return true
+	}
+	if len(r.readStates) != 0 {
+		return true
+	}
+	return false
+}
+
+// Advance notifies the RawNode that the application has applied and saved progress in the
+// last Ready results.
+func (rn *RawNode) Advance(rd Ready) {
+	rn.commitReady(rd)
+}
+
+// Status returns the current status of the given group.
+func (rn *RawNode) Status() *Status {
+	status := getStatus(rn.raft)
+	return &status
+}
+
+// ReportUnreachable reports the given node is not reachable for the last send.
+func (rn *RawNode) ReportUnreachable(id uint64) {
+	_ = rn.raft.Step(pb.Message{Type: pb.MsgUnreachable, From: id})
+}
+
+// ReportSnapshot reports the status of the sent snapshot.
+func (rn *RawNode) ReportSnapshot(id uint64, status SnapshotStatus) {
+	rej := status == SnapshotFailure
+
+	_ = rn.raft.Step(pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej})
+}
+
+// TransferLeader tries to transfer leadership to the given transferee.
+func (rn *RawNode) TransferLeader(transferee uint64) {
+	_ = rn.raft.Step(pb.Message{Type: pb.MsgTransferLeader, From: transferee})
+}
+
+// ReadIndex requests a read state. The read state will be set in ready.
+// Read State has a read index. Once the application advances further than the read
+// index, any linearizable read requests issued before the read request can be
+// processed safely. The read state will have the same rctx attached.
+func (rn *RawNode) ReadIndex(rctx []byte) {
+	_ = rn.raft.Step(pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
+}
diff --git a/vendor/github.com/coreos/etcd/raft/read_only.go b/vendor/github.com/coreos/etcd/raft/read_only.go
new file mode 100644
index 0000000..ae746fa
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/read_only.go
@@ -0,0 +1,118 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import pb "github.com/coreos/etcd/raft/raftpb"
+
+// ReadState provides state for read only query.
+// It's caller's responsibility to call ReadIndex first before getting
+// this state from ready, it's also caller's duty to differentiate if this
+// state is what it requests through RequestCtx, eg. given a unique id as
+// RequestCtx
+type ReadState struct {
+	Index      uint64
+	RequestCtx []byte
+}
+
+type readIndexStatus struct {
+	req   pb.Message
+	index uint64
+	acks  map[uint64]struct{}
+}
+
+type readOnly struct {
+	option           ReadOnlyOption
+	pendingReadIndex map[string]*readIndexStatus
+	readIndexQueue   []string
+}
+
+func newReadOnly(option ReadOnlyOption) *readOnly {
+	return &readOnly{
+		option:           option,
+		pendingReadIndex: make(map[string]*readIndexStatus),
+	}
+}
+
+// addRequest adds a read only reuqest into readonly struct.
+// `index` is the commit index of the raft state machine when it received
+// the read only request.
+// `m` is the original read only request message from the local or remote node.
+func (ro *readOnly) addRequest(index uint64, m pb.Message) {
+	ctx := string(m.Entries[0].Data)
+	if _, ok := ro.pendingReadIndex[ctx]; ok {
+		return
+	}
+	ro.pendingReadIndex[ctx] = &readIndexStatus{index: index, req: m, acks: make(map[uint64]struct{})}
+	ro.readIndexQueue = append(ro.readIndexQueue, ctx)
+}
+
+// recvAck notifies the readonly struct that the raft state machine received
+// an acknowledgment of the heartbeat that attached with the read only request
+// context.
+func (ro *readOnly) recvAck(m pb.Message) int {
+	rs, ok := ro.pendingReadIndex[string(m.Context)]
+	if !ok {
+		return 0
+	}
+
+	rs.acks[m.From] = struct{}{}
+	// add one to include an ack from local node
+	return len(rs.acks) + 1
+}
+
+// advance advances the read only request queue kept by the readonly struct.
+// It dequeues the requests until it finds the read only request that has
+// the same context as the given `m`.
+func (ro *readOnly) advance(m pb.Message) []*readIndexStatus {
+	var (
+		i     int
+		found bool
+	)
+
+	ctx := string(m.Context)
+	rss := []*readIndexStatus{}
+
+	for _, okctx := range ro.readIndexQueue {
+		i++
+		rs, ok := ro.pendingReadIndex[okctx]
+		if !ok {
+			panic("cannot find corresponding read state from pending map")
+		}
+		rss = append(rss, rs)
+		if okctx == ctx {
+			found = true
+			break
+		}
+	}
+
+	if found {
+		ro.readIndexQueue = ro.readIndexQueue[i:]
+		for _, rs := range rss {
+			delete(ro.pendingReadIndex, string(rs.req.Entries[0].Data))
+		}
+		return rss
+	}
+
+	return nil
+}
+
+// lastPendingRequestCtx returns the context of the last pending read only
+// request in readonly struct.
+func (ro *readOnly) lastPendingRequestCtx() string {
+	if len(ro.readIndexQueue) == 0 {
+		return ""
+	}
+	return ro.readIndexQueue[len(ro.readIndexQueue)-1]
+}
diff --git a/vendor/github.com/coreos/etcd/raft/status.go b/vendor/github.com/coreos/etcd/raft/status.go
new file mode 100644
index 0000000..f4d3d86
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/status.go
@@ -0,0 +1,88 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"fmt"
+
+	pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+type Status struct {
+	ID uint64
+
+	pb.HardState
+	SoftState
+
+	Applied  uint64
+	Progress map[uint64]Progress
+
+	LeadTransferee uint64
+}
+
+// getStatus gets a copy of the current raft status.
+func getStatus(r *raft) Status {
+	s := Status{
+		ID:             r.id,
+		LeadTransferee: r.leadTransferee,
+	}
+
+	s.HardState = r.hardState()
+	s.SoftState = *r.softState()
+
+	s.Applied = r.raftLog.applied
+
+	if s.RaftState == StateLeader {
+		s.Progress = make(map[uint64]Progress)
+		for id, p := range r.prs {
+			s.Progress[id] = *p
+		}
+
+		for id, p := range r.learnerPrs {
+			s.Progress[id] = *p
+		}
+	}
+
+	return s
+}
+
+// MarshalJSON translates the raft status into JSON.
+// TODO: try to simplify this by introducing ID type into raft
+func (s Status) MarshalJSON() ([]byte, error) {
+	j := fmt.Sprintf(`{"id":"%x","term":%d,"vote":"%x","commit":%d,"lead":"%x","raftState":%q,"applied":%d,"progress":{`,
+		s.ID, s.Term, s.Vote, s.Commit, s.Lead, s.RaftState, s.Applied)
+
+	if len(s.Progress) == 0 {
+		j += "},"
+	} else {
+		for k, v := range s.Progress {
+			subj := fmt.Sprintf(`"%x":{"match":%d,"next":%d,"state":%q},`, k, v.Match, v.Next, v.State)
+			j += subj
+		}
+		// remove the trailing ","
+		j = j[:len(j)-1] + "},"
+	}
+
+	j += fmt.Sprintf(`"leadtransferee":"%x"}`, s.LeadTransferee)
+	return []byte(j), nil
+}
+
+func (s Status) String() string {
+	b, err := s.MarshalJSON()
+	if err != nil {
+		raftLogger.Panicf("unexpected error: %v", err)
+	}
+	return string(b)
+}
diff --git a/vendor/github.com/coreos/etcd/raft/storage.go b/vendor/github.com/coreos/etcd/raft/storage.go
new file mode 100644
index 0000000..69c3a7d
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/storage.go
@@ -0,0 +1,271 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"errors"
+	"sync"
+
+	pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+// ErrCompacted is returned by Storage.Entries/Compact when a requested
+// index is unavailable because it predates the last snapshot.
+var ErrCompacted = errors.New("requested index is unavailable due to compaction")
+
+// ErrSnapOutOfDate is returned by Storage.CreateSnapshot when a requested
+// index is older than the existing snapshot.
+var ErrSnapOutOfDate = errors.New("requested index is older than the existing snapshot")
+
+// ErrUnavailable is returned by Storage interface when the requested log entries
+// are unavailable.
+var ErrUnavailable = errors.New("requested entry at index is unavailable")
+
+// ErrSnapshotTemporarilyUnavailable is returned by the Storage interface when the required
+// snapshot is temporarily unavailable.
+var ErrSnapshotTemporarilyUnavailable = errors.New("snapshot is temporarily unavailable")
+
+// Storage is an interface that may be implemented by the application
+// to retrieve log entries from storage.
+//
+// If any Storage method returns an error, the raft instance will
+// become inoperable and refuse to participate in elections; the
+// application is responsible for cleanup and recovery in this case.
+type Storage interface {
+	// InitialState returns the saved HardState and ConfState information.
+	InitialState() (pb.HardState, pb.ConfState, error)
+	// Entries returns a slice of log entries in the range [lo,hi).
+	// MaxSize limits the total size of the log entries returned, but
+	// Entries returns at least one entry if any.
+	Entries(lo, hi, maxSize uint64) ([]pb.Entry, error)
+	// Term returns the term of entry i, which must be in the range
+	// [FirstIndex()-1, LastIndex()]. The term of the entry before
+	// FirstIndex is retained for matching purposes even though the
+	// rest of that entry may not be available.
+	Term(i uint64) (uint64, error)
+	// LastIndex returns the index of the last entry in the log.
+	LastIndex() (uint64, error)
+	// FirstIndex returns the index of the first log entry that is
+	// possibly available via Entries (older entries have been incorporated
+	// into the latest Snapshot; if storage only contains the dummy entry the
+	// first log entry is not available).
+	FirstIndex() (uint64, error)
+	// Snapshot returns the most recent snapshot.
+	// If snapshot is temporarily unavailable, it should return ErrSnapshotTemporarilyUnavailable,
+	// so raft state machine could know that Storage needs some time to prepare
+	// snapshot and call Snapshot later.
+	Snapshot() (pb.Snapshot, error)
+}
+
+// MemoryStorage implements the Storage interface backed by an
+// in-memory array.
+type MemoryStorage struct {
+	// Protects access to all fields. Most methods of MemoryStorage are
+	// run on the raft goroutine, but Append() is run on an application
+	// goroutine.
+	sync.Mutex
+
+	hardState pb.HardState
+	snapshot  pb.Snapshot
+	// ents[i] has raft log position i+snapshot.Metadata.Index
+	ents []pb.Entry
+}
+
+// NewMemoryStorage creates an empty MemoryStorage.
+func NewMemoryStorage() *MemoryStorage {
+	return &MemoryStorage{
+		// When starting from scratch populate the list with a dummy entry at term zero.
+		ents: make([]pb.Entry, 1),
+	}
+}
+
+// InitialState implements the Storage interface.
+func (ms *MemoryStorage) InitialState() (pb.HardState, pb.ConfState, error) {
+	return ms.hardState, ms.snapshot.Metadata.ConfState, nil
+}
+
+// SetHardState saves the current HardState.
+func (ms *MemoryStorage) SetHardState(st pb.HardState) error {
+	ms.Lock()
+	defer ms.Unlock()
+	ms.hardState = st
+	return nil
+}
+
+// Entries implements the Storage interface.
+func (ms *MemoryStorage) Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	offset := ms.ents[0].Index
+	if lo <= offset {
+		return nil, ErrCompacted
+	}
+	if hi > ms.lastIndex()+1 {
+		raftLogger.Panicf("entries' hi(%d) is out of bound lastindex(%d)", hi, ms.lastIndex())
+	}
+	// only contains dummy entries.
+	if len(ms.ents) == 1 {
+		return nil, ErrUnavailable
+	}
+
+	ents := ms.ents[lo-offset : hi-offset]
+	return limitSize(ents, maxSize), nil
+}
+
+// Term implements the Storage interface.
+func (ms *MemoryStorage) Term(i uint64) (uint64, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	offset := ms.ents[0].Index
+	if i < offset {
+		return 0, ErrCompacted
+	}
+	if int(i-offset) >= len(ms.ents) {
+		return 0, ErrUnavailable
+	}
+	return ms.ents[i-offset].Term, nil
+}
+
+// LastIndex implements the Storage interface.
+func (ms *MemoryStorage) LastIndex() (uint64, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	return ms.lastIndex(), nil
+}
+
+func (ms *MemoryStorage) lastIndex() uint64 {
+	return ms.ents[0].Index + uint64(len(ms.ents)) - 1
+}
+
+// FirstIndex implements the Storage interface.
+func (ms *MemoryStorage) FirstIndex() (uint64, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	return ms.firstIndex(), nil
+}
+
+func (ms *MemoryStorage) firstIndex() uint64 {
+	return ms.ents[0].Index + 1
+}
+
+// Snapshot implements the Storage interface.
+func (ms *MemoryStorage) Snapshot() (pb.Snapshot, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	return ms.snapshot, nil
+}
+
+// ApplySnapshot overwrites the contents of this Storage object with
+// those of the given snapshot.
+func (ms *MemoryStorage) ApplySnapshot(snap pb.Snapshot) error {
+	ms.Lock()
+	defer ms.Unlock()
+
+	//handle check for old snapshot being applied
+	msIndex := ms.snapshot.Metadata.Index
+	snapIndex := snap.Metadata.Index
+	if msIndex >= snapIndex {
+		return ErrSnapOutOfDate
+	}
+
+	ms.snapshot = snap
+	ms.ents = []pb.Entry{{Term: snap.Metadata.Term, Index: snap.Metadata.Index}}
+	return nil
+}
+
+// CreateSnapshot makes a snapshot which can be retrieved with Snapshot() and
+// can be used to reconstruct the state at that point.
+// If any configuration changes have been made since the last compaction,
+// the result of the last ApplyConfChange must be passed in.
+func (ms *MemoryStorage) CreateSnapshot(i uint64, cs *pb.ConfState, data []byte) (pb.Snapshot, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	if i <= ms.snapshot.Metadata.Index {
+		return pb.Snapshot{}, ErrSnapOutOfDate
+	}
+
+	offset := ms.ents[0].Index
+	if i > ms.lastIndex() {
+		raftLogger.Panicf("snapshot %d is out of bound lastindex(%d)", i, ms.lastIndex())
+	}
+
+	ms.snapshot.Metadata.Index = i
+	ms.snapshot.Metadata.Term = ms.ents[i-offset].Term
+	if cs != nil {
+		ms.snapshot.Metadata.ConfState = *cs
+	}
+	ms.snapshot.Data = data
+	return ms.snapshot, nil
+}
+
+// Compact discards all log entries prior to compactIndex.
+// It is the application's responsibility to not attempt to compact an index
+// greater than raftLog.applied.
+func (ms *MemoryStorage) Compact(compactIndex uint64) error {
+	ms.Lock()
+	defer ms.Unlock()
+	offset := ms.ents[0].Index
+	if compactIndex <= offset {
+		return ErrCompacted
+	}
+	if compactIndex > ms.lastIndex() {
+		raftLogger.Panicf("compact %d is out of bound lastindex(%d)", compactIndex, ms.lastIndex())
+	}
+
+	i := compactIndex - offset
+	ents := make([]pb.Entry, 1, 1+uint64(len(ms.ents))-i)
+	ents[0].Index = ms.ents[i].Index
+	ents[0].Term = ms.ents[i].Term
+	ents = append(ents, ms.ents[i+1:]...)
+	ms.ents = ents
+	return nil
+}
+
+// Append the new entries to storage.
+// TODO (xiangli): ensure the entries are continuous and
+// entries[0].Index > ms.entries[0].Index
+func (ms *MemoryStorage) Append(entries []pb.Entry) error {
+	if len(entries) == 0 {
+		return nil
+	}
+
+	ms.Lock()
+	defer ms.Unlock()
+
+	first := ms.firstIndex()
+	last := entries[0].Index + uint64(len(entries)) - 1
+
+	// shortcut if there is no new entry.
+	if last < first {
+		return nil
+	}
+	// truncate compacted entries
+	if first > entries[0].Index {
+		entries = entries[first-entries[0].Index:]
+	}
+
+	offset := entries[0].Index - ms.ents[0].Index
+	switch {
+	case uint64(len(ms.ents)) > offset:
+		ms.ents = append([]pb.Entry{}, ms.ents[:offset]...)
+		ms.ents = append(ms.ents, entries...)
+	case uint64(len(ms.ents)) == offset:
+		ms.ents = append(ms.ents, entries...)
+	default:
+		raftLogger.Panicf("missing log entry [last: %d, append at: %d]",
+			ms.lastIndex(), entries[0].Index)
+	}
+	return nil
+}
diff --git a/vendor/github.com/coreos/etcd/raft/util.go b/vendor/github.com/coreos/etcd/raft/util.go
new file mode 100644
index 0000000..f4141fe
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/util.go
@@ -0,0 +1,129 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"bytes"
+	"fmt"
+
+	pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+func (st StateType) MarshalJSON() ([]byte, error) {
+	return []byte(fmt.Sprintf("%q", st.String())), nil
+}
+
+// uint64Slice implements sort interface
+type uint64Slice []uint64
+
+func (p uint64Slice) Len() int           { return len(p) }
+func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p uint64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+func min(a, b uint64) uint64 {
+	if a > b {
+		return b
+	}
+	return a
+}
+
+func max(a, b uint64) uint64 {
+	if a > b {
+		return a
+	}
+	return b
+}
+
+func IsLocalMsg(msgt pb.MessageType) bool {
+	return msgt == pb.MsgHup || msgt == pb.MsgBeat || msgt == pb.MsgUnreachable ||
+		msgt == pb.MsgSnapStatus || msgt == pb.MsgCheckQuorum
+}
+
+func IsResponseMsg(msgt pb.MessageType) bool {
+	return msgt == pb.MsgAppResp || msgt == pb.MsgVoteResp || msgt == pb.MsgHeartbeatResp || msgt == pb.MsgUnreachable || msgt == pb.MsgPreVoteResp
+}
+
+// voteResponseType maps vote and prevote message types to their corresponding responses.
+func voteRespMsgType(msgt pb.MessageType) pb.MessageType {
+	switch msgt {
+	case pb.MsgVote:
+		return pb.MsgVoteResp
+	case pb.MsgPreVote:
+		return pb.MsgPreVoteResp
+	default:
+		panic(fmt.Sprintf("not a vote message: %s", msgt))
+	}
+}
+
+// EntryFormatter can be implemented by the application to provide human-readable formatting
+// of entry data. Nil is a valid EntryFormatter and will use a default format.
+type EntryFormatter func([]byte) string
+
+// DescribeMessage returns a concise human-readable description of a
+// Message for debugging.
+func DescribeMessage(m pb.Message, f EntryFormatter) string {
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index)
+	if m.Reject {
+		fmt.Fprintf(&buf, " Rejected")
+		if m.RejectHint != 0 {
+			fmt.Fprintf(&buf, "(Hint:%d)", m.RejectHint)
+		}
+	}
+	if m.Commit != 0 {
+		fmt.Fprintf(&buf, " Commit:%d", m.Commit)
+	}
+	if len(m.Entries) > 0 {
+		fmt.Fprintf(&buf, " Entries:[")
+		for i, e := range m.Entries {
+			if i != 0 {
+				buf.WriteString(", ")
+			}
+			buf.WriteString(DescribeEntry(e, f))
+		}
+		fmt.Fprintf(&buf, "]")
+	}
+	if !IsEmptySnap(m.Snapshot) {
+		fmt.Fprintf(&buf, " Snapshot:%v", m.Snapshot)
+	}
+	return buf.String()
+}
+
+// DescribeEntry returns a concise human-readable description of an
+// Entry for debugging.
+func DescribeEntry(e pb.Entry, f EntryFormatter) string {
+	var formatted string
+	if e.Type == pb.EntryNormal && f != nil {
+		formatted = f(e.Data)
+	} else {
+		formatted = fmt.Sprintf("%q", e.Data)
+	}
+	return fmt.Sprintf("%d/%d %s %s", e.Term, e.Index, e.Type, formatted)
+}
+
+func limitSize(ents []pb.Entry, maxSize uint64) []pb.Entry {
+	if len(ents) == 0 {
+		return ents
+	}
+	size := ents[0].Size()
+	var limit int
+	for limit = 1; limit < len(ents); limit++ {
+		size += ents[limit].Size()
+		if uint64(size) > maxSize {
+			break
+		}
+	}
+	return ents[:limit]
+}
diff --git a/vendor/github.com/coreos/etcd/version/version.go b/vendor/github.com/coreos/etcd/version/version.go
new file mode 100644
index 0000000..aa96ffa
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/version/version.go
@@ -0,0 +1,56 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package version implements etcd version parsing and contains latest version
+// information.
+package version
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/coreos/go-semver/semver"
+)
+
+var (
+	// MinClusterVersion is the min cluster version this etcd binary is compatible with.
+	MinClusterVersion = "3.0.0"
+	Version           = "3.3.25"
+	APIVersion        = "unknown"
+
+	// Git SHA Value will be set during build
+	GitSHA = "Not provided (use ./build instead of go build)"
+)
+
+func init() {
+	ver, err := semver.NewVersion(Version)
+	if err == nil {
+		APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor)
+	}
+}
+
+type Versions struct {
+	Server  string `json:"etcdserver"`
+	Cluster string `json:"etcdcluster"`
+	// TODO: raft state machine version
+}
+
+// Cluster only keeps the major.minor.
+func Cluster(v string) string {
+	vs := strings.Split(v, ".")
+	if len(vs) <= 2 {
+		return v
+	}
+	return fmt.Sprintf("%s.%s", vs[0], vs[1])
+}
diff --git a/vendor/github.com/coreos/go-semver/LICENSE b/vendor/github.com/coreos/go-semver/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/coreos/go-semver/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/coreos/go-semver/NOTICE b/vendor/github.com/coreos/go-semver/NOTICE
new file mode 100644
index 0000000..23a0ada
--- /dev/null
+++ b/vendor/github.com/coreos/go-semver/NOTICE
@@ -0,0 +1,5 @@
+CoreOS Project
+Copyright 2018 CoreOS, Inc
+
+This product includes software developed at CoreOS, Inc.
+(http://www.coreos.com/).
diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go
new file mode 100644
index 0000000..76cf485
--- /dev/null
+++ b/vendor/github.com/coreos/go-semver/semver/semver.go
@@ -0,0 +1,296 @@
+// Copyright 2013-2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Semantic Versions http://semver.org
+package semver
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+type Version struct {
+	Major      int64
+	Minor      int64
+	Patch      int64
+	PreRelease PreRelease
+	Metadata   string
+}
+
+type PreRelease string
+
+func splitOff(input *string, delim string) (val string) {
+	parts := strings.SplitN(*input, delim, 2)
+
+	if len(parts) == 2 {
+		*input = parts[0]
+		val = parts[1]
+	}
+
+	return val
+}
+
+func New(version string) *Version {
+	return Must(NewVersion(version))
+}
+
+func NewVersion(version string) (*Version, error) {
+	v := Version{}
+
+	if err := v.Set(version); err != nil {
+		return nil, err
+	}
+
+	return &v, nil
+}
+
+// Must is a helper for wrapping NewVersion and will panic if err is not nil.
+func Must(v *Version, err error) *Version {
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Set parses and updates v from the given version string. Implements flag.Value
+func (v *Version) Set(version string) error {
+	metadata := splitOff(&version, "+")
+	preRelease := PreRelease(splitOff(&version, "-"))
+	dotParts := strings.SplitN(version, ".", 3)
+
+	if len(dotParts) != 3 {
+		return fmt.Errorf("%s is not in dotted-tri format", version)
+	}
+
+	if err := validateIdentifier(string(preRelease)); err != nil {
+		return fmt.Errorf("failed to validate pre-release: %v", err)
+	}
+
+	if err := validateIdentifier(metadata); err != nil {
+		return fmt.Errorf("failed to validate metadata: %v", err)
+	}
+
+	parsed := make([]int64, 3, 3)
+
+	for i, v := range dotParts[:3] {
+		val, err := strconv.ParseInt(v, 10, 64)
+		parsed[i] = val
+		if err != nil {
+			return err
+		}
+	}
+
+	v.Metadata = metadata
+	v.PreRelease = preRelease
+	v.Major = parsed[0]
+	v.Minor = parsed[1]
+	v.Patch = parsed[2]
+	return nil
+}
+
+func (v Version) String() string {
+	var buffer bytes.Buffer
+
+	fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch)
+
+	if v.PreRelease != "" {
+		fmt.Fprintf(&buffer, "-%s", v.PreRelease)
+	}
+
+	if v.Metadata != "" {
+		fmt.Fprintf(&buffer, "+%s", v.Metadata)
+	}
+
+	return buffer.String()
+}
+
+func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error {
+	var data string
+	if err := unmarshal(&data); err != nil {
+		return err
+	}
+	return v.Set(data)
+}
+
+func (v Version) MarshalJSON() ([]byte, error) {
+	return []byte(`"` + v.String() + `"`), nil
+}
+
+func (v *Version) UnmarshalJSON(data []byte) error {
+	l := len(data)
+	if l == 0 || string(data) == `""` {
+		return nil
+	}
+	if l < 2 || data[0] != '"' || data[l-1] != '"' {
+		return errors.New("invalid semver string")
+	}
+	return v.Set(string(data[1 : l-1]))
+}
+
+// Compare tests if v is less than, equal to, or greater than versionB,
+// returning -1, 0, or +1 respectively.
+func (v Version) Compare(versionB Version) int {
+	if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 {
+		return cmp
+	}
+	return preReleaseCompare(v, versionB)
+}
+
+// Equal tests if v is equal to versionB.
+func (v Version) Equal(versionB Version) bool {
+	return v.Compare(versionB) == 0
+}
+
+// LessThan tests if v is less than versionB.
+func (v Version) LessThan(versionB Version) bool {
+	return v.Compare(versionB) < 0
+}
+
+// Slice converts the comparable parts of the semver into a slice of integers.
+func (v Version) Slice() []int64 {
+	return []int64{v.Major, v.Minor, v.Patch}
+}
+
+func (p PreRelease) Slice() []string {
+	preRelease := string(p)
+	return strings.Split(preRelease, ".")
+}
+
+func preReleaseCompare(versionA Version, versionB Version) int {
+	a := versionA.PreRelease
+	b := versionB.PreRelease
+
+	/* Handle the case where if two versions are otherwise equal it is the
+	 * one without a PreRelease that is greater */
+	if len(a) == 0 && (len(b) > 0) {
+		return 1
+	} else if len(b) == 0 && (len(a) > 0) {
+		return -1
+	}
+
+	// If there is a prerelease, check and compare each part.
+	return recursivePreReleaseCompare(a.Slice(), b.Slice())
+}
+
+func recursiveCompare(versionA []int64, versionB []int64) int {
+	if len(versionA) == 0 {
+		return 0
+	}
+
+	a := versionA[0]
+	b := versionB[0]
+
+	if a > b {
+		return 1
+	} else if a < b {
+		return -1
+	}
+
+	return recursiveCompare(versionA[1:], versionB[1:])
+}
+
+func recursivePreReleaseCompare(versionA []string, versionB []string) int {
+	// A larger set of pre-release fields has a higher precedence than a smaller set,
+	// if all of the preceding identifiers are equal.
+	if len(versionA) == 0 {
+		if len(versionB) > 0 {
+			return -1
+		}
+		return 0
+	} else if len(versionB) == 0 {
+		// We're longer than versionB so return 1.
+		return 1
+	}
+
+	a := versionA[0]
+	b := versionB[0]
+
+	aInt := false
+	bInt := false
+
+	aI, err := strconv.Atoi(versionA[0])
+	if err == nil {
+		aInt = true
+	}
+
+	bI, err := strconv.Atoi(versionB[0])
+	if err == nil {
+		bInt = true
+	}
+
+	// Numeric identifiers always have lower precedence than non-numeric identifiers.
+	if aInt && !bInt {
+		return -1
+	} else if !aInt && bInt {
+		return 1
+	}
+
+	// Handle Integer Comparison
+	if aInt && bInt {
+		if aI > bI {
+			return 1
+		} else if aI < bI {
+			return -1
+		}
+	}
+
+	// Handle String Comparison
+	if a > b {
+		return 1
+	} else if a < b {
+		return -1
+	}
+
+	return recursivePreReleaseCompare(versionA[1:], versionB[1:])
+}
+
+// BumpMajor increments the Major field by 1 and resets all other fields to their default values
+func (v *Version) BumpMajor() {
+	v.Major += 1
+	v.Minor = 0
+	v.Patch = 0
+	v.PreRelease = PreRelease("")
+	v.Metadata = ""
+}
+
+// BumpMinor increments the Minor field by 1 and resets all other fields to their default values
+func (v *Version) BumpMinor() {
+	v.Minor += 1
+	v.Patch = 0
+	v.PreRelease = PreRelease("")
+	v.Metadata = ""
+}
+
+// BumpPatch increments the Patch field by 1 and resets all other fields to their default values
+func (v *Version) BumpPatch() {
+	v.Patch += 1
+	v.PreRelease = PreRelease("")
+	v.Metadata = ""
+}
+
+// validateIdentifier makes sure the provided identifier satisfies semver spec
+func validateIdentifier(id string) error {
+	if id != "" && !reIdentifier.MatchString(id) {
+		return fmt.Errorf("%s is not a valid semver identifier", id)
+	}
+	return nil
+}
+
+// reIdentifier is a regular expression used to check that pre-release and metadata
+// identifiers satisfy the spec requirements
+var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`)
diff --git a/vendor/github.com/coreos/go-semver/semver/sort.go b/vendor/github.com/coreos/go-semver/semver/sort.go
new file mode 100644
index 0000000..e256b41
--- /dev/null
+++ b/vendor/github.com/coreos/go-semver/semver/sort.go
@@ -0,0 +1,38 @@
+// Copyright 2013-2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semver
+
+import (
+	"sort"
+)
+
+type Versions []*Version
+
+func (s Versions) Len() int {
+	return len(s)
+}
+
+func (s Versions) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+func (s Versions) Less(i, j int) bool {
+	return s[i].LessThan(*s[j])
+}
+
+// Sort sorts the given slice of Version
+func Sort(versions []*Version) {
+	sort.Sort(Versions(versions))
+}
diff --git a/vendor/github.com/coreos/pkg/capnslog/init.go b/vendor/github.com/coreos/pkg/capnslog/init.go
index 44b8cd3..38ce6d2 100644
--- a/vendor/github.com/coreos/pkg/capnslog/init.go
+++ b/vendor/github.com/coreos/pkg/capnslog/init.go
@@ -32,7 +32,7 @@
 func init() {
 	initHijack()
 
-	// Go `log` pacakge uses os.Stderr.
+	// Go `log` package uses os.Stderr.
 	SetFormatter(NewDefaultFormatter(os.Stderr))
 	SetGlobalLogLevel(INFO)
 }
diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go
index 1ce0be2..f85c0cc 100644
--- a/vendor/github.com/gogo/protobuf/proto/text_parser.go
+++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go
@@ -318,7 +318,7 @@
 		if i > utf8.MaxRune {
 			return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
 		}
-		return string(i), s, nil
+		return string(rune(i)), s, nil
 	}
 	return "", "", fmt.Errorf(`unknown escape \%c`, r)
 }
diff --git a/vendor/github.com/golang/mock/AUTHORS b/vendor/github.com/golang/mock/AUTHORS
new file mode 100644
index 0000000..660b8cc
--- /dev/null
+++ b/vendor/github.com/golang/mock/AUTHORS
@@ -0,0 +1,12 @@
+# This is the official list of GoMock authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as
+#	Name or Organization <email address>
+# The email address is not required for organizations.
+
+# Please keep the list sorted.
+
+Alex Reece <awreece@gmail.com>
+Google Inc.
diff --git a/vendor/github.com/golang/mock/CONTRIBUTORS b/vendor/github.com/golang/mock/CONTRIBUTORS
new file mode 100644
index 0000000..def849c
--- /dev/null
+++ b/vendor/github.com/golang/mock/CONTRIBUTORS
@@ -0,0 +1,37 @@
+# This is the official list of people who can contribute (and typically
+# have contributed) code to the gomock repository.
+# The AUTHORS file lists the copyright holders; this file
+# lists people.  For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# The submission process automatically checks to make sure
+# that people submitting code are listed in this file (by email address).
+#
+# Names should be added to this file only after verifying that
+# the individual or the individual's organization has agreed to
+# the appropriate Contributor License Agreement, found here:
+#
+#     http://code.google.com/legal/individual-cla-v1.0.html
+#     http://code.google.com/legal/corporate-cla-v1.0.html
+#
+# The agreement for individuals can be filled out on the web.
+#
+# When adding J Random Contributor's name to this file,
+# either J's name or J's organization's name should be
+# added to the AUTHORS file, depending on whether the
+# individual or corporate CLA was used.
+
+# Names should be added to this file like so:
+#     Name <email address>
+#
+# An entry with two email addresses specifies that the
+# first address should be used in the submit logs and
+# that the second address should be recognized as the
+# same person when interacting with Rietveld.
+
+# Please keep the list sorted.
+
+Aaron Jacobs <jacobsa@google.com> <aaronjjacobs@gmail.com>
+Alex Reece <awreece@gmail.com>
+David Symonds <dsymonds@golang.org>
+Ryan Barrett <ryanb@google.com>
diff --git a/vendor/github.com/golang/mock/LICENSE b/vendor/github.com/golang/mock/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/golang/mock/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/golang/mock/gomock/call.go b/vendor/github.com/golang/mock/gomock/call.go
new file mode 100644
index 0000000..13c9f44
--- /dev/null
+++ b/vendor/github.com/golang/mock/gomock/call.go
@@ -0,0 +1,445 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gomock
+
+import (
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+)
+
+// Call represents an expected call to a mock.
+type Call struct {
+	t TestHelper // for triggering test failures on invalid call setup
+
+	receiver   interface{}  // the receiver of the method call
+	method     string       // the name of the method
+	methodType reflect.Type // the type of the method
+	args       []Matcher    // the args
+	origin     string       // file and line number of call setup
+
+	preReqs []*Call // prerequisite calls
+
+	// Expectations
+	minCalls, maxCalls int
+
+	numCalls int // actual number made
+
+	// actions are called when this Call is called. Each action gets the args and
+	// can set the return values by returning a non-nil slice. Actions run in the
+	// order they are created.
+	actions []func([]interface{}) []interface{}
+}
+
+// newCall creates a *Call. It requires the method type in order to support
+// unexported methods.
+func newCall(t TestHelper, receiver interface{}, method string, methodType reflect.Type, args ...interface{}) *Call {
+	t.Helper()
+
+	// TODO: check arity, types.
+	mArgs := make([]Matcher, len(args))
+	for i, arg := range args {
+		if m, ok := arg.(Matcher); ok {
+			mArgs[i] = m
+		} else if arg == nil {
+			// Handle nil specially so that passing a nil interface value
+			// will match the typed nils of concrete args.
+			mArgs[i] = Nil()
+		} else {
+			mArgs[i] = Eq(arg)
+		}
+	}
+
+	// callerInfo's skip should be updated if the number of calls between the user's test
+	// and this line changes, i.e. this code is wrapped in another anonymous function.
+	// 0 is us, 1 is RecordCallWithMethodType(), 2 is the generated recorder, and 3 is the user's test.
+	origin := callerInfo(3)
+	actions := []func([]interface{}) []interface{}{func([]interface{}) []interface{} {
+		// Synthesize the zero value for each of the return args' types.
+		rets := make([]interface{}, methodType.NumOut())
+		for i := 0; i < methodType.NumOut(); i++ {
+			rets[i] = reflect.Zero(methodType.Out(i)).Interface()
+		}
+		return rets
+	}}
+	return &Call{t: t, receiver: receiver, method: method, methodType: methodType,
+		args: mArgs, origin: origin, minCalls: 1, maxCalls: 1, actions: actions}
+}
+
+// AnyTimes allows the expectation to be called 0 or more times
+func (c *Call) AnyTimes() *Call {
+	c.minCalls, c.maxCalls = 0, 1e8 // close enough to infinity
+	return c
+}
+
+// MinTimes requires the call to occur at least n times. If AnyTimes or MaxTimes have not been called or if MaxTimes
+// was previously called with 1, MinTimes also sets the maximum number of calls to infinity.
+func (c *Call) MinTimes(n int) *Call {
+	c.minCalls = n
+	if c.maxCalls == 1 {
+		c.maxCalls = 1e8
+	}
+	return c
+}
+
+// MaxTimes limits the number of calls to n times. If AnyTimes or MinTimes have not been called or if MinTimes was
+// previously called with 1, MaxTimes also sets the minimum number of calls to 0.
+func (c *Call) MaxTimes(n int) *Call {
+	c.maxCalls = n
+	if c.minCalls == 1 {
+		c.minCalls = 0
+	}
+	return c
+}
+
+// DoAndReturn declares the action to run when the call is matched.
+// The return values from this function are returned by the mocked function.
+// It takes an interface{} argument to support n-arity functions.
+func (c *Call) DoAndReturn(f interface{}) *Call {
+	// TODO: Check arity and types here, rather than dying badly elsewhere.
+	v := reflect.ValueOf(f)
+
+	c.addAction(func(args []interface{}) []interface{} {
+		c.t.Helper()
+		vArgs := make([]reflect.Value, len(args))
+		ft := v.Type()
+		if c.methodType.NumIn() != ft.NumIn() {
+			c.t.Fatalf("wrong number of arguments in DoAndReturn func for %T.%v: got %d, want %d [%s]",
+				c.receiver, c.method, ft.NumIn(), c.methodType.NumIn(), c.origin)
+			return nil
+		}
+		for i := 0; i < len(args); i++ {
+			if args[i] != nil {
+				vArgs[i] = reflect.ValueOf(args[i])
+			} else {
+				// Use the zero value for the arg.
+				vArgs[i] = reflect.Zero(ft.In(i))
+			}
+		}
+		vRets := v.Call(vArgs)
+		rets := make([]interface{}, len(vRets))
+		for i, ret := range vRets {
+			rets[i] = ret.Interface()
+		}
+		return rets
+	})
+	return c
+}
+
+// Do declares the action to run when the call is matched. The function's
+// return values are ignored to retain backward compatibility. To use the
+// return values call DoAndReturn.
+// It takes an interface{} argument to support n-arity functions.
+func (c *Call) Do(f interface{}) *Call {
+	// TODO: Check arity and types here, rather than dying badly elsewhere.
+	v := reflect.ValueOf(f)
+
+	c.addAction(func(args []interface{}) []interface{} {
+		c.t.Helper()
+		if c.methodType.NumIn() != v.Type().NumIn() {
+			c.t.Fatalf("wrong number of arguments in Do func for %T.%v: got %d, want %d [%s]",
+				c.receiver, c.method, v.Type().NumIn(), c.methodType.NumIn(), c.origin)
+			return nil
+		}
+		vArgs := make([]reflect.Value, len(args))
+		ft := v.Type()
+		for i := 0; i < len(args); i++ {
+			if args[i] != nil {
+				vArgs[i] = reflect.ValueOf(args[i])
+			} else {
+				// Use the zero value for the arg.
+				vArgs[i] = reflect.Zero(ft.In(i))
+			}
+		}
+		v.Call(vArgs)
+		return nil
+	})
+	return c
+}
+
+// Return declares the values to be returned by the mocked function call.
+func (c *Call) Return(rets ...interface{}) *Call {
+	c.t.Helper()
+
+	mt := c.methodType
+	if len(rets) != mt.NumOut() {
+		c.t.Fatalf("wrong number of arguments to Return for %T.%v: got %d, want %d [%s]",
+			c.receiver, c.method, len(rets), mt.NumOut(), c.origin)
+	}
+	for i, ret := range rets {
+		if got, want := reflect.TypeOf(ret), mt.Out(i); got == want {
+			// Identical types; nothing to do.
+		} else if got == nil {
+			// Nil needs special handling.
+			switch want.Kind() {
+			case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+				// ok
+			default:
+				c.t.Fatalf("argument %d to Return for %T.%v is nil, but %v is not nillable [%s]",
+					i, c.receiver, c.method, want, c.origin)
+			}
+		} else if got.AssignableTo(want) {
+			// Assignable type relation. Make the assignment now so that the generated code
+			// can return the values with a type assertion.
+			v := reflect.New(want).Elem()
+			v.Set(reflect.ValueOf(ret))
+			rets[i] = v.Interface()
+		} else {
+			c.t.Fatalf("wrong type of argument %d to Return for %T.%v: %v is not assignable to %v [%s]",
+				i, c.receiver, c.method, got, want, c.origin)
+		}
+	}
+
+	c.addAction(func([]interface{}) []interface{} {
+		return rets
+	})
+
+	return c
+}
+
+// Times declares the exact number of times a function call is expected to be executed.
+func (c *Call) Times(n int) *Call {
+	c.minCalls, c.maxCalls = n, n
+	return c
+}
+
+// SetArg declares an action that will set the nth argument's value,
+// indirected through a pointer. Or, in the case of a slice, SetArg
+// will copy value's elements into the nth argument.
+func (c *Call) SetArg(n int, value interface{}) *Call {
+	c.t.Helper()
+
+	mt := c.methodType
+	// TODO: This will break on variadic methods.
+	// We will need to check those at invocation time.
+	if n < 0 || n >= mt.NumIn() {
+		c.t.Fatalf("SetArg(%d, ...) called for a method with %d args [%s]",
+			n, mt.NumIn(), c.origin)
+	}
+	// Permit setting argument through an interface.
+	// In the interface case, we don't (nay, can't) check the type here.
+	at := mt.In(n)
+	switch at.Kind() {
+	case reflect.Ptr:
+		dt := at.Elem()
+		if vt := reflect.TypeOf(value); !vt.AssignableTo(dt) {
+			c.t.Fatalf("SetArg(%d, ...) argument is a %v, not assignable to %v [%s]",
+				n, vt, dt, c.origin)
+		}
+	case reflect.Interface:
+		// nothing to do
+	case reflect.Slice:
+		// nothing to do
+	default:
+		c.t.Fatalf("SetArg(%d, ...) referring to argument of non-pointer non-interface non-slice type %v [%s]",
+			n, at, c.origin)
+	}
+
+	c.addAction(func(args []interface{}) []interface{} {
+		v := reflect.ValueOf(value)
+		switch reflect.TypeOf(args[n]).Kind() {
+		case reflect.Slice:
+			setSlice(args[n], v)
+		default:
+			reflect.ValueOf(args[n]).Elem().Set(v)
+		}
+		return nil
+	})
+	return c
+}
+
+// isPreReq returns true if other is a direct or indirect prerequisite to c.
+func (c *Call) isPreReq(other *Call) bool {
+	for _, preReq := range c.preReqs {
+		if other == preReq || preReq.isPreReq(other) {
+			return true
+		}
+	}
+	return false
+}
+
+// After declares that the call may only match after preReq has been exhausted.
+func (c *Call) After(preReq *Call) *Call {
+	c.t.Helper()
+
+	if c == preReq {
+		c.t.Fatalf("A call isn't allowed to be its own prerequisite")
+	}
+	if preReq.isPreReq(c) {
+		c.t.Fatalf("Loop in call order: %v is a prerequisite to %v (possibly indirectly).", c, preReq)
+	}
+
+	c.preReqs = append(c.preReqs, preReq)
+	return c
+}
+
+// Returns true if the minimum number of calls have been made.
+func (c *Call) satisfied() bool {
+	return c.numCalls >= c.minCalls
+}
+
+// Returns true if the maximum number of calls have been made.
+func (c *Call) exhausted() bool {
+	return c.numCalls >= c.maxCalls
+}
+
+func (c *Call) String() string {
+	args := make([]string, len(c.args))
+	for i, arg := range c.args {
+		args[i] = arg.String()
+	}
+	arguments := strings.Join(args, ", ")
+	return fmt.Sprintf("%T.%v(%s) %s", c.receiver, c.method, arguments, c.origin)
+}
+
+// Tests if the given call matches the expected call.
+// If yes, returns nil. If no, returns error with message explaining why it does not match.
+func (c *Call) matches(args []interface{}) error {
+	if !c.methodType.IsVariadic() {
+		if len(args) != len(c.args) {
+			return fmt.Errorf("expected call at %s has the wrong number of arguments. Got: %d, want: %d",
+				c.origin, len(args), len(c.args))
+		}
+
+		for i, m := range c.args {
+			if !m.Matches(args[i]) {
+				return fmt.Errorf(
+					"expected call at %s doesn't match the argument at index %d.\nGot: %v\nWant: %v",
+					c.origin, i, formatGottenArg(m, args[i]), m,
+				)
+			}
+		}
+	} else {
+		if len(c.args) < c.methodType.NumIn()-1 {
+			return fmt.Errorf("expected call at %s has the wrong number of matchers. Got: %d, want: %d",
+				c.origin, len(c.args), c.methodType.NumIn()-1)
+		}
+		if len(c.args) != c.methodType.NumIn() && len(args) != len(c.args) {
+			return fmt.Errorf("expected call at %s has the wrong number of arguments. Got: %d, want: %d",
+				c.origin, len(args), len(c.args))
+		}
+		if len(args) < len(c.args)-1 {
+			return fmt.Errorf("expected call at %s has the wrong number of arguments. Got: %d, want: greater than or equal to %d",
+				c.origin, len(args), len(c.args)-1)
+		}
+
+		for i, m := range c.args {
+			if i < c.methodType.NumIn()-1 {
+				// Non-variadic args
+				if !m.Matches(args[i]) {
+					return fmt.Errorf("expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v",
+						c.origin, strconv.Itoa(i), formatGottenArg(m, args[i]), m)
+				}
+				continue
+			}
+			// The last arg has a possibility of a variadic argument, so let it branch
+
+			// sample: Foo(a int, b int, c ...int)
+			if i < len(c.args) && i < len(args) {
+				if m.Matches(args[i]) {
+					// Got Foo(a, b, c) want Foo(matcherA, matcherB, gomock.Any())
+					// Got Foo(a, b, c) want Foo(matcherA, matcherB, someSliceMatcher)
+					// Got Foo(a, b, c) want Foo(matcherA, matcherB, matcherC)
+					// Got Foo(a, b) want Foo(matcherA, matcherB)
+					// Got Foo(a, b, c, d) want Foo(matcherA, matcherB, matcherC, matcherD)
+					continue
+				}
+			}
+
+			// The number of actual args don't match the number of matchers,
+			// or the last matcher is a slice and the last arg is not.
+			// If this function still matches it is because the last matcher
+			// matches all the remaining arguments or the lack of any.
+			// Convert the remaining arguments, if any, into a slice of the
+			// expected type.
+			vArgsType := c.methodType.In(c.methodType.NumIn() - 1)
+			vArgs := reflect.MakeSlice(vArgsType, 0, len(args)-i)
+			for _, arg := range args[i:] {
+				vArgs = reflect.Append(vArgs, reflect.ValueOf(arg))
+			}
+			if m.Matches(vArgs.Interface()) {
+				// Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, gomock.Any())
+				// Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, someSliceMatcher)
+				// Got Foo(a, b) want Foo(matcherA, matcherB, gomock.Any())
+				// Got Foo(a, b) want Foo(matcherA, matcherB, someEmptySliceMatcher)
+				break
+			}
+			// Wrong number of matchers or not match. Fail.
+			// Got Foo(a, b) want Foo(matcherA, matcherB, matcherC, matcherD)
+			// Got Foo(a, b, c) want Foo(matcherA, matcherB, matcherC, matcherD)
+			// Got Foo(a, b, c, d) want Foo(matcherA, matcherB, matcherC, matcherD, matcherE)
+			// Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, matcherC, matcherD)
+			// Got Foo(a, b, c) want Foo(matcherA, matcherB)
+
+			return fmt.Errorf("expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v",
+				c.origin, strconv.Itoa(i), formatGottenArg(m, args[i:]), c.args[i])
+		}
+	}
+
+	// Check that all prerequisite calls have been satisfied.
+	for _, preReqCall := range c.preReqs {
+		if !preReqCall.satisfied() {
+			return fmt.Errorf("expected call at %s doesn't have a prerequisite call satisfied:\n%v\nshould be called before:\n%v",
+				c.origin, preReqCall, c)
+		}
+	}
+
+	// Check that the call is not exhausted.
+	if c.exhausted() {
+		return fmt.Errorf("expected call at %s has already been called the max number of times", c.origin)
+	}
+
+	return nil
+}
+
+// dropPrereqs tells the expected Call to not re-check prerequisite calls any
+// longer, and to return its current set.
+func (c *Call) dropPrereqs() (preReqs []*Call) {
+	preReqs = c.preReqs
+	c.preReqs = nil
+	return
+}
+
+func (c *Call) call() []func([]interface{}) []interface{} {
+	c.numCalls++
+	return c.actions
+}
+
+// InOrder declares that the given calls should occur in order.
+func InOrder(calls ...*Call) {
+	for i := 1; i < len(calls); i++ {
+		calls[i].After(calls[i-1])
+	}
+}
+
+func setSlice(arg interface{}, v reflect.Value) {
+	va := reflect.ValueOf(arg)
+	for i := 0; i < v.Len(); i++ {
+		va.Index(i).Set(v.Index(i))
+	}
+}
+
+func (c *Call) addAction(action func([]interface{}) []interface{}) {
+	c.actions = append(c.actions, action)
+}
+
+func formatGottenArg(m Matcher, arg interface{}) string {
+	got := fmt.Sprintf("%v (%T)", arg, arg)
+	if gs, ok := m.(GotFormatter); ok {
+		got = gs.Got(arg)
+	}
+	return got
+}
diff --git a/vendor/github.com/golang/mock/gomock/callset.go b/vendor/github.com/golang/mock/gomock/callset.go
new file mode 100644
index 0000000..49dba78
--- /dev/null
+++ b/vendor/github.com/golang/mock/gomock/callset.go
@@ -0,0 +1,113 @@
+// Copyright 2011 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gomock
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+)
+
+// callSet represents a set of expected calls, indexed by receiver and method
+// name.
+type callSet struct {
+	// Calls that are still expected.
+	expected map[callSetKey][]*Call
+	// Calls that have been exhausted.
+	exhausted map[callSetKey][]*Call
+}
+
+// callSetKey is the key in the maps in callSet
+type callSetKey struct {
+	receiver interface{}
+	fname    string
+}
+
+func newCallSet() *callSet {
+	return &callSet{make(map[callSetKey][]*Call), make(map[callSetKey][]*Call)}
+}
+
+// Add adds a new expected call.
+func (cs callSet) Add(call *Call) {
+	key := callSetKey{call.receiver, call.method}
+	m := cs.expected
+	if call.exhausted() {
+		m = cs.exhausted
+	}
+	m[key] = append(m[key], call)
+}
+
+// Remove removes an expected call.
+func (cs callSet) Remove(call *Call) {
+	key := callSetKey{call.receiver, call.method}
+	calls := cs.expected[key]
+	for i, c := range calls {
+		if c == call {
+			// maintain order for remaining calls
+			cs.expected[key] = append(calls[:i], calls[i+1:]...)
+			cs.exhausted[key] = append(cs.exhausted[key], call)
+			break
+		}
+	}
+}
+
+// FindMatch searches for a matching call. Returns error with explanation message if no call matched.
+func (cs callSet) FindMatch(receiver interface{}, method string, args []interface{}) (*Call, error) {
+	key := callSetKey{receiver, method}
+
+	// Search through the expected calls.
+	expected := cs.expected[key]
+	var callsErrors bytes.Buffer
+	for _, call := range expected {
+		err := call.matches(args)
+		if err != nil {
+			_, _ = fmt.Fprintf(&callsErrors, "\n%v", err)
+		} else {
+			return call, nil
+		}
+	}
+
+	// If we haven't found a match then search through the exhausted calls so we
+	// get useful error messages.
+	exhausted := cs.exhausted[key]
+	for _, call := range exhausted {
+		if err := call.matches(args); err != nil {
+			_, _ = fmt.Fprintf(&callsErrors, "\n%v", err)
+			continue
+		}
+		_, _ = fmt.Fprintf(
+			&callsErrors, "all expected calls for method %q have been exhausted", method,
+		)
+	}
+
+	if len(expected)+len(exhausted) == 0 {
+		_, _ = fmt.Fprintf(&callsErrors, "there are no expected calls of the method %q for that receiver", method)
+	}
+
+	return nil, errors.New(callsErrors.String())
+}
+
+// Failures returns the calls that are not satisfied.
+func (cs callSet) Failures() []*Call {
+	failures := make([]*Call, 0, len(cs.expected))
+	for _, calls := range cs.expected {
+		for _, call := range calls {
+			if !call.satisfied() {
+				failures = append(failures, call)
+			}
+		}
+	}
+	return failures
+}
diff --git a/vendor/github.com/golang/mock/gomock/controller.go b/vendor/github.com/golang/mock/gomock/controller.go
new file mode 100644
index 0000000..f054200
--- /dev/null
+++ b/vendor/github.com/golang/mock/gomock/controller.go
@@ -0,0 +1,336 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package gomock is a mock framework for Go.
+//
+// Standard usage:
+//   (1) Define an interface that you wish to mock.
+//         type MyInterface interface {
+//           SomeMethod(x int64, y string)
+//         }
+//   (2) Use mockgen to generate a mock from the interface.
+//   (3) Use the mock in a test:
+//         func TestMyThing(t *testing.T) {
+//           mockCtrl := gomock.NewController(t)
+//           defer mockCtrl.Finish()
+//
+//           mockObj := something.NewMockMyInterface(mockCtrl)
+//           mockObj.EXPECT().SomeMethod(4, "blah")
+//           // pass mockObj to a real object and play with it.
+//         }
+//
+// By default, expected calls are not enforced to run in any particular order.
+// Call order dependency can be enforced by use of InOrder and/or Call.After.
+// Call.After can create more varied call order dependencies, but InOrder is
+// often more convenient.
+//
+// The following examples create equivalent call order dependencies.
+//
+// Example of using Call.After to chain expected call order:
+//
+//     firstCall := mockObj.EXPECT().SomeMethod(1, "first")
+//     secondCall := mockObj.EXPECT().SomeMethod(2, "second").After(firstCall)
+//     mockObj.EXPECT().SomeMethod(3, "third").After(secondCall)
+//
+// Example of using InOrder to declare expected call order:
+//
+//     gomock.InOrder(
+//         mockObj.EXPECT().SomeMethod(1, "first"),
+//         mockObj.EXPECT().SomeMethod(2, "second"),
+//         mockObj.EXPECT().SomeMethod(3, "third"),
+//     )
+package gomock
+
+import (
+	"context"
+	"fmt"
+	"reflect"
+	"runtime"
+	"sync"
+)
+
+// A TestReporter is something that can be used to report test failures.  It
+// is satisfied by the standard library's *testing.T.
+type TestReporter interface {
+	Errorf(format string, args ...interface{})
+	Fatalf(format string, args ...interface{})
+}
+
+// TestHelper is a TestReporter that has the Helper method.  It is satisfied
+// by the standard library's *testing.T.
+type TestHelper interface {
+	TestReporter
+	Helper()
+}
+
+// cleanuper is used to check if TestHelper also has the `Cleanup` method. A
+// common pattern is to pass in a `*testing.T` to
+// `NewController(t TestReporter)`. In Go 1.14+, `*testing.T` has a cleanup
+// method. This can be utilized to call `Finish()` so the caller of this library
+// does not have to.
+type cleanuper interface {
+	Cleanup(func())
+}
+
+// A Controller represents the top-level control of a mock ecosystem.  It
+// defines the scope and lifetime of mock objects, as well as their
+// expectations.  It is safe to call Controller's methods from multiple
+// goroutines. Each test should create a new Controller and invoke Finish via
+// defer.
+//
+//   func TestFoo(t *testing.T) {
+//     ctrl := gomock.NewController(t)
+//     defer ctrl.Finish()
+//     // ..
+//   }
+//
+//   func TestBar(t *testing.T) {
+//     t.Run("Sub-Test-1", st) {
+//       ctrl := gomock.NewController(st)
+//       defer ctrl.Finish()
+//       // ..
+//     })
+//     t.Run("Sub-Test-2", st) {
+//       ctrl := gomock.NewController(st)
+//       defer ctrl.Finish()
+//       // ..
+//     })
+//   })
+type Controller struct {
+	// T should only be called within a generated mock. It is not intended to
+	// be used in user code and may be changed in future versions. T is the
+	// TestReporter passed in when creating the Controller via NewController.
+	// If the TestReporter does not implement a TestHelper it will be wrapped
+	// with a nopTestHelper.
+	T             TestHelper
+	mu            sync.Mutex
+	expectedCalls *callSet
+	finished      bool
+}
+
+// NewController returns a new Controller. It is the preferred way to create a
+// Controller.
+//
+// New in go1.14+, if you are passing a *testing.T into this function you no
+// longer need to call ctrl.Finish() in your test methods.
+func NewController(t TestReporter) *Controller {
+	h, ok := t.(TestHelper)
+	if !ok {
+		h = &nopTestHelper{t}
+	}
+	ctrl := &Controller{
+		T:             h,
+		expectedCalls: newCallSet(),
+	}
+	if c, ok := isCleanuper(ctrl.T); ok {
+		c.Cleanup(func() {
+			ctrl.T.Helper()
+			ctrl.finish(true, nil)
+		})
+	}
+
+	return ctrl
+}
+
+type cancelReporter struct {
+	t      TestHelper
+	cancel func()
+}
+
+func (r *cancelReporter) Errorf(format string, args ...interface{}) {
+	r.t.Errorf(format, args...)
+}
+func (r *cancelReporter) Fatalf(format string, args ...interface{}) {
+	defer r.cancel()
+	r.t.Fatalf(format, args...)
+}
+
+func (r *cancelReporter) Helper() {
+	r.t.Helper()
+}
+
+// WithContext returns a new Controller and a Context, which is cancelled on any
+// fatal failure.
+func WithContext(ctx context.Context, t TestReporter) (*Controller, context.Context) {
+	h, ok := t.(TestHelper)
+	if !ok {
+		h = &nopTestHelper{t: t}
+	}
+
+	ctx, cancel := context.WithCancel(ctx)
+	return NewController(&cancelReporter{t: h, cancel: cancel}), ctx
+}
+
+type nopTestHelper struct {
+	t TestReporter
+}
+
+func (h *nopTestHelper) Errorf(format string, args ...interface{}) {
+	h.t.Errorf(format, args...)
+}
+func (h *nopTestHelper) Fatalf(format string, args ...interface{}) {
+	h.t.Fatalf(format, args...)
+}
+
+func (h nopTestHelper) Helper() {}
+
+// RecordCall is called by a mock. It should not be called by user code.
+func (ctrl *Controller) RecordCall(receiver interface{}, method string, args ...interface{}) *Call {
+	ctrl.T.Helper()
+
+	recv := reflect.ValueOf(receiver)
+	for i := 0; i < recv.Type().NumMethod(); i++ {
+		if recv.Type().Method(i).Name == method {
+			return ctrl.RecordCallWithMethodType(receiver, method, recv.Method(i).Type(), args...)
+		}
+	}
+	ctrl.T.Fatalf("gomock: failed finding method %s on %T", method, receiver)
+	panic("unreachable")
+}
+
+// RecordCallWithMethodType is called by a mock. It should not be called by user code.
+func (ctrl *Controller) RecordCallWithMethodType(receiver interface{}, method string, methodType reflect.Type, args ...interface{}) *Call {
+	ctrl.T.Helper()
+
+	call := newCall(ctrl.T, receiver, method, methodType, args...)
+
+	ctrl.mu.Lock()
+	defer ctrl.mu.Unlock()
+	ctrl.expectedCalls.Add(call)
+
+	return call
+}
+
+// Call is called by a mock. It should not be called by user code.
+func (ctrl *Controller) Call(receiver interface{}, method string, args ...interface{}) []interface{} {
+	ctrl.T.Helper()
+
+	// Nest this code so we can use defer to make sure the lock is released.
+	actions := func() []func([]interface{}) []interface{} {
+		ctrl.T.Helper()
+		ctrl.mu.Lock()
+		defer ctrl.mu.Unlock()
+
+		expected, err := ctrl.expectedCalls.FindMatch(receiver, method, args)
+		if err != nil {
+			// callerInfo's skip should be updated if the number of calls between the user's test
+			// and this line changes, i.e. this code is wrapped in another anonymous function.
+			// 0 is us, 1 is controller.Call(), 2 is the generated mock, and 3 is the user's test.
+			origin := callerInfo(3)
+			ctrl.T.Fatalf("Unexpected call to %T.%v(%v) at %s because: %s", receiver, method, args, origin, err)
+		}
+
+		// Two things happen here:
+		// * the matching call no longer needs to check prerequite calls,
+		// * and the prerequite calls are no longer expected, so remove them.
+		preReqCalls := expected.dropPrereqs()
+		for _, preReqCall := range preReqCalls {
+			ctrl.expectedCalls.Remove(preReqCall)
+		}
+
+		actions := expected.call()
+		if expected.exhausted() {
+			ctrl.expectedCalls.Remove(expected)
+		}
+		return actions
+	}()
+
+	var rets []interface{}
+	for _, action := range actions {
+		if r := action(args); r != nil {
+			rets = r
+		}
+	}
+
+	return rets
+}
+
+// Finish checks to see if all the methods that were expected to be called
+// were called. It should be invoked for each Controller. It is not idempotent
+// and therefore can only be invoked once.
+//
+// New in go1.14+, if you are passing a *testing.T into NewController function you no
+// longer need to call ctrl.Finish() in your test methods.
+func (ctrl *Controller) Finish() {
+	// If we're currently panicking, probably because this is a deferred call.
+	// This must be recovered in the deferred function.
+	err := recover()
+	ctrl.finish(false, err)
+}
+
+func (ctrl *Controller) finish(cleanup bool, panicErr interface{}) {
+	ctrl.T.Helper()
+
+	ctrl.mu.Lock()
+	defer ctrl.mu.Unlock()
+
+	if ctrl.finished {
+		if _, ok := isCleanuper(ctrl.T); !ok {
+			ctrl.T.Fatalf("Controller.Finish was called more than once. It has to be called exactly once.")
+		}
+		return
+	}
+	ctrl.finished = true
+
+	// Short-circuit, pass through the panic.
+	if panicErr != nil {
+		panic(panicErr)
+	}
+
+	// Check that all remaining expected calls are satisfied.
+	failures := ctrl.expectedCalls.Failures()
+	for _, call := range failures {
+		ctrl.T.Errorf("missing call(s) to %v", call)
+	}
+	if len(failures) != 0 {
+		if !cleanup {
+			ctrl.T.Fatalf("aborting test due to missing call(s)")
+			return
+		}
+		ctrl.T.Errorf("aborting test due to missing call(s)")
+	}
+}
+
+// callerInfo returns the file:line of the call site. skip is the number
+// of stack frames to skip when reporting. 0 is callerInfo's call site.
+func callerInfo(skip int) string {
+	if _, file, line, ok := runtime.Caller(skip + 1); ok {
+		return fmt.Sprintf("%s:%d", file, line)
+	}
+	return "unknown file"
+}
+
+// isCleanuper checks it if t's base TestReporter has a Cleanup method.
+func isCleanuper(t TestReporter) (cleanuper, bool) {
+	tr := unwrapTestReporter(t)
+	c, ok := tr.(cleanuper)
+	return c, ok
+}
+
+// unwrapTestReporter unwraps TestReporter to the base implementation.
+func unwrapTestReporter(t TestReporter) TestReporter {
+	tr := t
+	switch nt := t.(type) {
+	case *cancelReporter:
+		tr = nt.t
+		if h, check := tr.(*nopTestHelper); check {
+			tr = h.t
+		}
+	case *nopTestHelper:
+		tr = nt.t
+	default:
+		// not wrapped
+	}
+	return tr
+}
diff --git a/vendor/github.com/golang/mock/gomock/matchers.go b/vendor/github.com/golang/mock/gomock/matchers.go
new file mode 100644
index 0000000..2822fb2
--- /dev/null
+++ b/vendor/github.com/golang/mock/gomock/matchers.go
@@ -0,0 +1,341 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gomock
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+)
+
+// A Matcher is a representation of a class of values.
+// It is used to represent the valid or expected arguments to a mocked method.
+type Matcher interface {
+	// Matches returns whether x is a match.
+	Matches(x interface{}) bool
+
+	// String describes what the matcher matches.
+	String() string
+}
+
+// WantFormatter modifies the given Matcher's String() method to the given
+// Stringer. This allows for control on how the "Want" is formatted when
+// printing .
+func WantFormatter(s fmt.Stringer, m Matcher) Matcher {
+	type matcher interface {
+		Matches(x interface{}) bool
+	}
+
+	return struct {
+		matcher
+		fmt.Stringer
+	}{
+		matcher:  m,
+		Stringer: s,
+	}
+}
+
+// StringerFunc type is an adapter to allow the use of ordinary functions as
+// a Stringer. If f is a function with the appropriate signature,
+// StringerFunc(f) is a Stringer that calls f.
+type StringerFunc func() string
+
+// String implements fmt.Stringer.
+func (f StringerFunc) String() string {
+	return f()
+}
+
+// GotFormatter is used to better print failure messages. If a matcher
+// implements GotFormatter, it will use the result from Got when printing
+// the failure message.
+type GotFormatter interface {
+	// Got is invoked with the received value. The result is used when
+	// printing the failure message.
+	Got(got interface{}) string
+}
+
+// GotFormatterFunc type is an adapter to allow the use of ordinary
+// functions as a GotFormatter. If f is a function with the appropriate
+// signature, GotFormatterFunc(f) is a GotFormatter that calls f.
+type GotFormatterFunc func(got interface{}) string
+
+// Got implements GotFormatter.
+func (f GotFormatterFunc) Got(got interface{}) string {
+	return f(got)
+}
+
+// GotFormatterAdapter attaches a GotFormatter to a Matcher.
+func GotFormatterAdapter(s GotFormatter, m Matcher) Matcher {
+	return struct {
+		GotFormatter
+		Matcher
+	}{
+		GotFormatter: s,
+		Matcher:      m,
+	}
+}
+
+type anyMatcher struct{}
+
+func (anyMatcher) Matches(interface{}) bool {
+	return true
+}
+
+func (anyMatcher) String() string {
+	return "is anything"
+}
+
+type eqMatcher struct {
+	x interface{}
+}
+
+func (e eqMatcher) Matches(x interface{}) bool {
+	// In case, some value is nil
+	if e.x == nil || x == nil {
+		return reflect.DeepEqual(e.x, x)
+	}
+
+	// Check if types assignable and convert them to common type
+	x1Val := reflect.ValueOf(e.x)
+	x2Val := reflect.ValueOf(x)
+
+	if x1Val.Type().AssignableTo(x2Val.Type()) {
+		x1ValConverted := x1Val.Convert(x2Val.Type())
+		return reflect.DeepEqual(x1ValConverted.Interface(), x2Val.Interface())
+	}
+
+	return false
+}
+
+func (e eqMatcher) String() string {
+	return fmt.Sprintf("is equal to %v (%T)", e.x, e.x)
+}
+
+type nilMatcher struct{}
+
+func (nilMatcher) Matches(x interface{}) bool {
+	if x == nil {
+		return true
+	}
+
+	v := reflect.ValueOf(x)
+	switch v.Kind() {
+	case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map,
+		reflect.Ptr, reflect.Slice:
+		return v.IsNil()
+	}
+
+	return false
+}
+
+func (nilMatcher) String() string {
+	return "is nil"
+}
+
+type notMatcher struct {
+	m Matcher
+}
+
+func (n notMatcher) Matches(x interface{}) bool {
+	return !n.m.Matches(x)
+}
+
+func (n notMatcher) String() string {
+	return "not(" + n.m.String() + ")"
+}
+
+type assignableToTypeOfMatcher struct {
+	targetType reflect.Type
+}
+
+func (m assignableToTypeOfMatcher) Matches(x interface{}) bool {
+	return reflect.TypeOf(x).AssignableTo(m.targetType)
+}
+
+func (m assignableToTypeOfMatcher) String() string {
+	return "is assignable to " + m.targetType.Name()
+}
+
+type allMatcher struct {
+	matchers []Matcher
+}
+
+func (am allMatcher) Matches(x interface{}) bool {
+	for _, m := range am.matchers {
+		if !m.Matches(x) {
+			return false
+		}
+	}
+	return true
+}
+
+func (am allMatcher) String() string {
+	ss := make([]string, 0, len(am.matchers))
+	for _, matcher := range am.matchers {
+		ss = append(ss, matcher.String())
+	}
+	return strings.Join(ss, "; ")
+}
+
+type lenMatcher struct {
+	i int
+}
+
+func (m lenMatcher) Matches(x interface{}) bool {
+	v := reflect.ValueOf(x)
+	switch v.Kind() {
+	case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
+		return v.Len() == m.i
+	default:
+		return false
+	}
+}
+
+func (m lenMatcher) String() string {
+	return fmt.Sprintf("has length %d", m.i)
+}
+
+type inAnyOrderMatcher struct {
+	x interface{}
+}
+
+func (m inAnyOrderMatcher) Matches(x interface{}) bool {
+	given, ok := m.prepareValue(x)
+	if !ok {
+		return false
+	}
+	wanted, ok := m.prepareValue(m.x)
+	if !ok {
+		return false
+	}
+
+	if given.Len() != wanted.Len() {
+		return false
+	}
+
+	usedFromGiven := make([]bool, given.Len())
+	foundFromWanted := make([]bool, wanted.Len())
+	for i := 0; i < wanted.Len(); i++ {
+		wantedMatcher := Eq(wanted.Index(i).Interface())
+		for j := 0; j < given.Len(); j++ {
+			if usedFromGiven[j] {
+				continue
+			}
+			if wantedMatcher.Matches(given.Index(j).Interface()) {
+				foundFromWanted[i] = true
+				usedFromGiven[j] = true
+				break
+			}
+		}
+	}
+
+	missingFromWanted := 0
+	for _, found := range foundFromWanted {
+		if !found {
+			missingFromWanted++
+		}
+	}
+	extraInGiven := 0
+	for _, used := range usedFromGiven {
+		if !used {
+			extraInGiven++
+		}
+	}
+
+	return extraInGiven == 0 && missingFromWanted == 0
+}
+
+func (m inAnyOrderMatcher) prepareValue(x interface{}) (reflect.Value, bool) {
+	xValue := reflect.ValueOf(x)
+	switch xValue.Kind() {
+	case reflect.Slice, reflect.Array:
+		return xValue, true
+	default:
+		return reflect.Value{}, false
+	}
+}
+
+func (m inAnyOrderMatcher) String() string {
+	return fmt.Sprintf("has the same elements as %v", m.x)
+}
+
+// Constructors
+
+// All returns a composite Matcher that returns true if and only all of the
+// matchers return true.
+func All(ms ...Matcher) Matcher { return allMatcher{ms} }
+
+// Any returns a matcher that always matches.
+func Any() Matcher { return anyMatcher{} }
+
+// Eq returns a matcher that matches on equality.
+//
+// Example usage:
+//   Eq(5).Matches(5) // returns true
+//   Eq(5).Matches(4) // returns false
+func Eq(x interface{}) Matcher { return eqMatcher{x} }
+
+// Len returns a matcher that matches on length. This matcher returns false if
+// is compared to a type that is not an array, chan, map, slice, or string.
+func Len(i int) Matcher {
+	return lenMatcher{i}
+}
+
+// Nil returns a matcher that matches if the received value is nil.
+//
+// Example usage:
+//   var x *bytes.Buffer
+//   Nil().Matches(x) // returns true
+//   x = &bytes.Buffer{}
+//   Nil().Matches(x) // returns false
+func Nil() Matcher { return nilMatcher{} }
+
+// Not reverses the results of its given child matcher.
+//
+// Example usage:
+//   Not(Eq(5)).Matches(4) // returns true
+//   Not(Eq(5)).Matches(5) // returns false
+func Not(x interface{}) Matcher {
+	if m, ok := x.(Matcher); ok {
+		return notMatcher{m}
+	}
+	return notMatcher{Eq(x)}
+}
+
+// AssignableToTypeOf is a Matcher that matches if the parameter to the mock
+// function is assignable to the type of the parameter to this function.
+//
+// Example usage:
+//   var s fmt.Stringer = &bytes.Buffer{}
+//   AssignableToTypeOf(s).Matches(time.Second) // returns true
+//   AssignableToTypeOf(s).Matches(99) // returns false
+//
+//   var ctx = reflect.TypeOf((*context.Context)(nil)).Elem()
+//   AssignableToTypeOf(ctx).Matches(context.Background()) // returns true
+func AssignableToTypeOf(x interface{}) Matcher {
+	if xt, ok := x.(reflect.Type); ok {
+		return assignableToTypeOfMatcher{xt}
+	}
+	return assignableToTypeOfMatcher{reflect.TypeOf(x)}
+}
+
+// InAnyOrder is a Matcher that returns true for collections of the same elements ignoring the order.
+//
+// Example usage:
+//   InAnyOrder([]int{1, 2, 3}).Matches([]int{1, 3, 2}) // returns true
+//   InAnyOrder([]int{1, 2, 3}).Matches([]int{1, 2}) // returns false
+func InAnyOrder(x interface{}) Matcher {
+	return inAnyOrderMatcher{x}
+}
diff --git a/vendor/github.com/golang/protobuf/jsonpb/decode.go b/vendor/github.com/golang/protobuf/jsonpb/decode.go
new file mode 100644
index 0000000..60e82ca
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/jsonpb/decode.go
@@ -0,0 +1,524 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package jsonpb
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	"google.golang.org/protobuf/encoding/protojson"
+	protoV2 "google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const wrapJSONUnmarshalV2 = false
+
+// UnmarshalNext unmarshals the next JSON object from d into m.
+func UnmarshalNext(d *json.Decoder, m proto.Message) error {
+	return new(Unmarshaler).UnmarshalNext(d, m)
+}
+
+// Unmarshal unmarshals a JSON object from r into m.
+func Unmarshal(r io.Reader, m proto.Message) error {
+	return new(Unmarshaler).Unmarshal(r, m)
+}
+
+// UnmarshalString unmarshals a JSON object from s into m.
+func UnmarshalString(s string, m proto.Message) error {
+	return new(Unmarshaler).Unmarshal(strings.NewReader(s), m)
+}
+
+// Unmarshaler is a configurable object for converting from a JSON
+// representation to a protocol buffer object.
+type Unmarshaler struct {
+	// AllowUnknownFields specifies whether to allow messages to contain
+	// unknown JSON fields, as opposed to failing to unmarshal.
+	AllowUnknownFields bool
+
+	// AnyResolver is used to resolve the google.protobuf.Any well-known type.
+	// If unset, the global registry is used by default.
+	AnyResolver AnyResolver
+}
+
+// JSONPBUnmarshaler is implemented by protobuf messages that customize the way
+// they are unmarshaled from JSON. Messages that implement this should also
+// implement JSONPBMarshaler so that the custom format can be produced.
+//
+// The JSON unmarshaling must follow the JSON to proto specification:
+//	https://developers.google.com/protocol-buffers/docs/proto3#json
+//
+// Deprecated: Custom types should implement protobuf reflection instead.
+type JSONPBUnmarshaler interface {
+	UnmarshalJSONPB(*Unmarshaler, []byte) error
+}
+
+// Unmarshal unmarshals a JSON object from r into m.
+func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error {
+	return u.UnmarshalNext(json.NewDecoder(r), m)
+}
+
+// UnmarshalNext unmarshals the next JSON object from d into m.
+func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error {
+	if m == nil {
+		return errors.New("invalid nil message")
+	}
+
+	// Parse the next JSON object from the stream.
+	raw := json.RawMessage{}
+	if err := d.Decode(&raw); err != nil {
+		return err
+	}
+
+	// Check for custom unmarshalers first since they may not properly
+	// implement protobuf reflection that the logic below relies on.
+	if jsu, ok := m.(JSONPBUnmarshaler); ok {
+		return jsu.UnmarshalJSONPB(u, raw)
+	}
+
+	mr := proto.MessageReflect(m)
+
+	// NOTE: For historical reasons, a top-level null is treated as a noop.
+	// This is incorrect, but kept for compatibility.
+	if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" {
+		return nil
+	}
+
+	if wrapJSONUnmarshalV2 {
+		// NOTE: If input message is non-empty, we need to preserve merge semantics
+		// of the old jsonpb implementation. These semantics are not supported by
+		// the protobuf JSON specification.
+		isEmpty := true
+		mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool {
+			isEmpty = false // at least one iteration implies non-empty
+			return false
+		})
+		if !isEmpty {
+			// Perform unmarshaling into a newly allocated, empty message.
+			mr = mr.New()
+
+			// Use a defer to copy all unmarshaled fields into the original message.
+			dst := proto.MessageReflect(m)
+			defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+				dst.Set(fd, v)
+				return true
+			})
+		}
+
+		// Unmarshal using the v2 JSON unmarshaler.
+		opts := protojson.UnmarshalOptions{
+			DiscardUnknown: u.AllowUnknownFields,
+		}
+		if u.AnyResolver != nil {
+			opts.Resolver = anyResolver{u.AnyResolver}
+		}
+		return opts.Unmarshal(raw, mr.Interface())
+	} else {
+		if err := u.unmarshalMessage(mr, raw); err != nil {
+			return err
+		}
+		return protoV2.CheckInitialized(mr.Interface())
+	}
+}
+
+func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error {
+	md := m.Descriptor()
+	fds := md.Fields()
+
+	if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok {
+		return jsu.UnmarshalJSONPB(u, in)
+	}
+
+	if string(in) == "null" && md.FullName() != "google.protobuf.Value" {
+		return nil
+	}
+
+	switch wellKnownType(md.FullName()) {
+	case "Any":
+		var jsonObject map[string]json.RawMessage
+		if err := json.Unmarshal(in, &jsonObject); err != nil {
+			return err
+		}
+
+		rawTypeURL, ok := jsonObject["@type"]
+		if !ok {
+			return errors.New("Any JSON doesn't have '@type'")
+		}
+		typeURL, err := unquoteString(string(rawTypeURL))
+		if err != nil {
+			return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL)
+		}
+		m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL))
+
+		var m2 protoreflect.Message
+		if u.AnyResolver != nil {
+			mi, err := u.AnyResolver.Resolve(typeURL)
+			if err != nil {
+				return err
+			}
+			m2 = proto.MessageReflect(mi)
+		} else {
+			mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL)
+			if err != nil {
+				if err == protoregistry.NotFound {
+					return fmt.Errorf("could not resolve Any message type: %v", typeURL)
+				}
+				return err
+			}
+			m2 = mt.New()
+		}
+
+		if wellKnownType(m2.Descriptor().FullName()) != "" {
+			rawValue, ok := jsonObject["value"]
+			if !ok {
+				return errors.New("Any JSON doesn't have 'value'")
+			}
+			if err := u.unmarshalMessage(m2, rawValue); err != nil {
+				return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err)
+			}
+		} else {
+			delete(jsonObject, "@type")
+			rawJSON, err := json.Marshal(jsonObject)
+			if err != nil {
+				return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err)
+			}
+			if err = u.unmarshalMessage(m2, rawJSON); err != nil {
+				return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err)
+			}
+		}
+
+		rawWire, err := protoV2.Marshal(m2.Interface())
+		if err != nil {
+			return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err)
+		}
+		m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire))
+		return nil
+	case "BoolValue", "BytesValue", "StringValue",
+		"Int32Value", "UInt32Value", "FloatValue",
+		"Int64Value", "UInt64Value", "DoubleValue":
+		fd := fds.ByNumber(1)
+		v, err := u.unmarshalValue(m.NewField(fd), in, fd)
+		if err != nil {
+			return err
+		}
+		m.Set(fd, v)
+		return nil
+	case "Duration":
+		v, err := unquoteString(string(in))
+		if err != nil {
+			return err
+		}
+		d, err := time.ParseDuration(v)
+		if err != nil {
+			return fmt.Errorf("bad Duration: %v", err)
+		}
+
+		sec := d.Nanoseconds() / 1e9
+		nsec := d.Nanoseconds() % 1e9
+		m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec)))
+		m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec)))
+		return nil
+	case "Timestamp":
+		v, err := unquoteString(string(in))
+		if err != nil {
+			return err
+		}
+		t, err := time.Parse(time.RFC3339Nano, v)
+		if err != nil {
+			return fmt.Errorf("bad Timestamp: %v", err)
+		}
+
+		sec := t.Unix()
+		nsec := t.Nanosecond()
+		m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec)))
+		m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec)))
+		return nil
+	case "Value":
+		switch {
+		case string(in) == "null":
+			m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0))
+		case string(in) == "true":
+			m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true))
+		case string(in) == "false":
+			m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false))
+		case hasPrefixAndSuffix('"', in, '"'):
+			s, err := unquoteString(string(in))
+			if err != nil {
+				return fmt.Errorf("unrecognized type for Value %q", in)
+			}
+			m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s))
+		case hasPrefixAndSuffix('[', in, ']'):
+			v := m.Mutable(fds.ByNumber(6))
+			return u.unmarshalMessage(v.Message(), in)
+		case hasPrefixAndSuffix('{', in, '}'):
+			v := m.Mutable(fds.ByNumber(5))
+			return u.unmarshalMessage(v.Message(), in)
+		default:
+			f, err := strconv.ParseFloat(string(in), 0)
+			if err != nil {
+				return fmt.Errorf("unrecognized type for Value %q", in)
+			}
+			m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f))
+		}
+		return nil
+	case "ListValue":
+		var jsonArray []json.RawMessage
+		if err := json.Unmarshal(in, &jsonArray); err != nil {
+			return fmt.Errorf("bad ListValue: %v", err)
+		}
+
+		lv := m.Mutable(fds.ByNumber(1)).List()
+		for _, raw := range jsonArray {
+			ve := lv.NewElement()
+			if err := u.unmarshalMessage(ve.Message(), raw); err != nil {
+				return err
+			}
+			lv.Append(ve)
+		}
+		return nil
+	case "Struct":
+		var jsonObject map[string]json.RawMessage
+		if err := json.Unmarshal(in, &jsonObject); err != nil {
+			return fmt.Errorf("bad StructValue: %v", err)
+		}
+
+		mv := m.Mutable(fds.ByNumber(1)).Map()
+		for key, raw := range jsonObject {
+			kv := protoreflect.ValueOf(key).MapKey()
+			vv := mv.NewValue()
+			if err := u.unmarshalMessage(vv.Message(), raw); err != nil {
+				return fmt.Errorf("bad value in StructValue for key %q: %v", key, err)
+			}
+			mv.Set(kv, vv)
+		}
+		return nil
+	}
+
+	var jsonObject map[string]json.RawMessage
+	if err := json.Unmarshal(in, &jsonObject); err != nil {
+		return err
+	}
+
+	// Handle known fields.
+	for i := 0; i < fds.Len(); i++ {
+		fd := fds.Get(i)
+		if fd.IsWeak() && fd.Message().IsPlaceholder() {
+			continue //  weak reference is not linked in
+		}
+
+		// Search for any raw JSON value associated with this field.
+		var raw json.RawMessage
+		name := string(fd.Name())
+		if fd.Kind() == protoreflect.GroupKind {
+			name = string(fd.Message().Name())
+		}
+		if v, ok := jsonObject[name]; ok {
+			delete(jsonObject, name)
+			raw = v
+		}
+		name = string(fd.JSONName())
+		if v, ok := jsonObject[name]; ok {
+			delete(jsonObject, name)
+			raw = v
+		}
+
+		field := m.NewField(fd)
+		// Unmarshal the field value.
+		if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
+			continue
+		}
+		v, err := u.unmarshalValue(field, raw, fd)
+		if err != nil {
+			return err
+		}
+		m.Set(fd, v)
+	}
+
+	// Handle extension fields.
+	for name, raw := range jsonObject {
+		if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") {
+			continue
+		}
+
+		// Resolve the extension field by name.
+		xname := protoreflect.FullName(name[len("[") : len(name)-len("]")])
+		xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
+		if xt == nil && isMessageSet(md) {
+			xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
+		}
+		if xt == nil {
+			continue
+		}
+		delete(jsonObject, name)
+		fd := xt.TypeDescriptor()
+		if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
+			return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName())
+		}
+
+		field := m.NewField(fd)
+		// Unmarshal the field value.
+		if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
+			continue
+		}
+		v, err := u.unmarshalValue(field, raw, fd)
+		if err != nil {
+			return err
+		}
+		m.Set(fd, v)
+	}
+
+	if !u.AllowUnknownFields && len(jsonObject) > 0 {
+		for name := range jsonObject {
+			return fmt.Errorf("unknown field %q in %v", name, md.FullName())
+		}
+	}
+	return nil
+}
+
+func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool {
+	if md := fd.Message(); md != nil {
+		return md.FullName() == "google.protobuf.Value" && fd.Cardinality() != protoreflect.Repeated
+	}
+	return false
+}
+
+func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool {
+	if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated {
+		_, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler)
+		return ok
+	}
+	return false
+}
+
+func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+	switch {
+	case fd.IsList():
+		var jsonArray []json.RawMessage
+		if err := json.Unmarshal(in, &jsonArray); err != nil {
+			return v, err
+		}
+		lv := v.List()
+		for _, raw := range jsonArray {
+			ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd)
+			if err != nil {
+				return v, err
+			}
+			lv.Append(ve)
+		}
+		return v, nil
+	case fd.IsMap():
+		var jsonObject map[string]json.RawMessage
+		if err := json.Unmarshal(in, &jsonObject); err != nil {
+			return v, err
+		}
+		kfd := fd.MapKey()
+		vfd := fd.MapValue()
+		mv := v.Map()
+		for key, raw := range jsonObject {
+			var kv protoreflect.MapKey
+			if kfd.Kind() == protoreflect.StringKind {
+				kv = protoreflect.ValueOf(key).MapKey()
+			} else {
+				v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd)
+				if err != nil {
+					return v, err
+				}
+				kv = v.MapKey()
+			}
+
+			vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd)
+			if err != nil {
+				return v, err
+			}
+			mv.Set(kv, vv)
+		}
+		return v, nil
+	default:
+		return u.unmarshalSingularValue(v, in, fd)
+	}
+}
+
+var nonFinite = map[string]float64{
+	`"NaN"`:       math.NaN(),
+	`"Infinity"`:  math.Inf(+1),
+	`"-Infinity"`: math.Inf(-1),
+}
+
+func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+	switch fd.Kind() {
+	case protoreflect.BoolKind:
+		return unmarshalValue(in, new(bool))
+	case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+		return unmarshalValue(trimQuote(in), new(int32))
+	case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+		return unmarshalValue(trimQuote(in), new(int64))
+	case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+		return unmarshalValue(trimQuote(in), new(uint32))
+	case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+		return unmarshalValue(trimQuote(in), new(uint64))
+	case protoreflect.FloatKind:
+		if f, ok := nonFinite[string(in)]; ok {
+			return protoreflect.ValueOfFloat32(float32(f)), nil
+		}
+		return unmarshalValue(trimQuote(in), new(float32))
+	case protoreflect.DoubleKind:
+		if f, ok := nonFinite[string(in)]; ok {
+			return protoreflect.ValueOfFloat64(float64(f)), nil
+		}
+		return unmarshalValue(trimQuote(in), new(float64))
+	case protoreflect.StringKind:
+		return unmarshalValue(in, new(string))
+	case protoreflect.BytesKind:
+		return unmarshalValue(in, new([]byte))
+	case protoreflect.EnumKind:
+		if hasPrefixAndSuffix('"', in, '"') {
+			vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in)))
+			if vd == nil {
+				return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName())
+			}
+			return protoreflect.ValueOfEnum(vd.Number()), nil
+		}
+		return unmarshalValue(in, new(protoreflect.EnumNumber))
+	case protoreflect.MessageKind, protoreflect.GroupKind:
+		err := u.unmarshalMessage(v.Message(), in)
+		return v, err
+	default:
+		panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
+	}
+}
+
+func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) {
+	err := json.Unmarshal(in, v)
+	return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err
+}
+
+func unquoteString(in string) (out string, err error) {
+	err = json.Unmarshal([]byte(in), &out)
+	return out, err
+}
+
+func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool {
+	if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix {
+		return true
+	}
+	return false
+}
+
+// trimQuote is like unquoteString but simply strips surrounding quotes.
+// This is incorrect, but is behavior done by the legacy implementation.
+func trimQuote(in []byte) []byte {
+	if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' {
+		in = in[1 : len(in)-1]
+	}
+	return in
+}
diff --git a/vendor/github.com/golang/protobuf/jsonpb/encode.go b/vendor/github.com/golang/protobuf/jsonpb/encode.go
new file mode 100644
index 0000000..685c80a
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/jsonpb/encode.go
@@ -0,0 +1,559 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package jsonpb
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	"google.golang.org/protobuf/encoding/protojson"
+	protoV2 "google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const wrapJSONMarshalV2 = false
+
+// Marshaler is a configurable object for marshaling protocol buffer messages
+// to the specified JSON representation.
+type Marshaler struct {
+	// OrigName specifies whether to use the original protobuf name for fields.
+	OrigName bool
+
+	// EnumsAsInts specifies whether to render enum values as integers,
+	// as opposed to string values.
+	EnumsAsInts bool
+
+	// EmitDefaults specifies whether to render fields with zero values.
+	EmitDefaults bool
+
+	// Indent controls whether the output is compact or not.
+	// If empty, the output is compact JSON. Otherwise, every JSON object
+	// entry and JSON array value will be on its own line.
+	// Each line will be preceded by repeated copies of Indent, where the
+	// number of copies is the current indentation depth.
+	Indent string
+
+	// AnyResolver is used to resolve the google.protobuf.Any well-known type.
+	// If unset, the global registry is used by default.
+	AnyResolver AnyResolver
+}
+
+// JSONPBMarshaler is implemented by protobuf messages that customize the
+// way they are marshaled to JSON. Messages that implement this should also
+// implement JSONPBUnmarshaler so that the custom format can be parsed.
+//
+// The JSON marshaling must follow the proto to JSON specification:
+//	https://developers.google.com/protocol-buffers/docs/proto3#json
+//
+// Deprecated: Custom types should implement protobuf reflection instead.
+type JSONPBMarshaler interface {
+	MarshalJSONPB(*Marshaler) ([]byte, error)
+}
+
+// Marshal serializes a protobuf message as JSON into w.
+func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error {
+	b, err := jm.marshal(m)
+	if len(b) > 0 {
+		if _, err := w.Write(b); err != nil {
+			return err
+		}
+	}
+	return err
+}
+
+// MarshalToString serializes a protobuf message as JSON in string form.
+func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) {
+	b, err := jm.marshal(m)
+	if err != nil {
+		return "", err
+	}
+	return string(b), nil
+}
+
+func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) {
+	v := reflect.ValueOf(m)
+	if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) {
+		return nil, errors.New("Marshal called with nil")
+	}
+
+	// Check for custom marshalers first since they may not properly
+	// implement protobuf reflection that the logic below relies on.
+	if jsm, ok := m.(JSONPBMarshaler); ok {
+		return jsm.MarshalJSONPB(jm)
+	}
+
+	if wrapJSONMarshalV2 {
+		opts := protojson.MarshalOptions{
+			UseProtoNames:   jm.OrigName,
+			UseEnumNumbers:  jm.EnumsAsInts,
+			EmitUnpopulated: jm.EmitDefaults,
+			Indent:          jm.Indent,
+		}
+		if jm.AnyResolver != nil {
+			opts.Resolver = anyResolver{jm.AnyResolver}
+		}
+		return opts.Marshal(proto.MessageReflect(m).Interface())
+	} else {
+		// Check for unpopulated required fields first.
+		m2 := proto.MessageReflect(m)
+		if err := protoV2.CheckInitialized(m2.Interface()); err != nil {
+			return nil, err
+		}
+
+		w := jsonWriter{Marshaler: jm}
+		err := w.marshalMessage(m2, "", "")
+		return w.buf, err
+	}
+}
+
+type jsonWriter struct {
+	*Marshaler
+	buf []byte
+}
+
+func (w *jsonWriter) write(s string) {
+	w.buf = append(w.buf, s...)
+}
+
+func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error {
+	if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok {
+		b, err := jsm.MarshalJSONPB(w.Marshaler)
+		if err != nil {
+			return err
+		}
+		if typeURL != "" {
+			// we are marshaling this object to an Any type
+			var js map[string]*json.RawMessage
+			if err = json.Unmarshal(b, &js); err != nil {
+				return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err)
+			}
+			turl, err := json.Marshal(typeURL)
+			if err != nil {
+				return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err)
+			}
+			js["@type"] = (*json.RawMessage)(&turl)
+			if b, err = json.Marshal(js); err != nil {
+				return err
+			}
+		}
+		w.write(string(b))
+		return nil
+	}
+
+	md := m.Descriptor()
+	fds := md.Fields()
+
+	// Handle well-known types.
+	const secondInNanos = int64(time.Second / time.Nanosecond)
+	switch wellKnownType(md.FullName()) {
+	case "Any":
+		return w.marshalAny(m, indent)
+	case "BoolValue", "BytesValue", "StringValue",
+		"Int32Value", "UInt32Value", "FloatValue",
+		"Int64Value", "UInt64Value", "DoubleValue":
+		fd := fds.ByNumber(1)
+		return w.marshalValue(fd, m.Get(fd), indent)
+	case "Duration":
+		const maxSecondsInDuration = 315576000000
+		// "Generated output always contains 0, 3, 6, or 9 fractional digits,
+		//  depending on required precision."
+		s := m.Get(fds.ByNumber(1)).Int()
+		ns := m.Get(fds.ByNumber(2)).Int()
+		if s < -maxSecondsInDuration || s > maxSecondsInDuration {
+			return fmt.Errorf("seconds out of range %v", s)
+		}
+		if ns <= -secondInNanos || ns >= secondInNanos {
+			return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos)
+		}
+		if (s > 0 && ns < 0) || (s < 0 && ns > 0) {
+			return errors.New("signs of seconds and nanos do not match")
+		}
+		var sign string
+		if s < 0 || ns < 0 {
+			sign, s, ns = "-", -1*s, -1*ns
+		}
+		x := fmt.Sprintf("%s%d.%09d", sign, s, ns)
+		x = strings.TrimSuffix(x, "000")
+		x = strings.TrimSuffix(x, "000")
+		x = strings.TrimSuffix(x, ".000")
+		w.write(fmt.Sprintf(`"%vs"`, x))
+		return nil
+	case "Timestamp":
+		// "RFC 3339, where generated output will always be Z-normalized
+		//  and uses 0, 3, 6 or 9 fractional digits."
+		s := m.Get(fds.ByNumber(1)).Int()
+		ns := m.Get(fds.ByNumber(2)).Int()
+		if ns < 0 || ns >= secondInNanos {
+			return fmt.Errorf("ns out of range [0, %v)", secondInNanos)
+		}
+		t := time.Unix(s, ns).UTC()
+		// time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
+		x := t.Format("2006-01-02T15:04:05.000000000")
+		x = strings.TrimSuffix(x, "000")
+		x = strings.TrimSuffix(x, "000")
+		x = strings.TrimSuffix(x, ".000")
+		w.write(fmt.Sprintf(`"%vZ"`, x))
+		return nil
+	case "Value":
+		// JSON value; which is a null, number, string, bool, object, or array.
+		od := md.Oneofs().Get(0)
+		fd := m.WhichOneof(od)
+		if fd == nil {
+			return errors.New("nil Value")
+		}
+		return w.marshalValue(fd, m.Get(fd), indent)
+	case "Struct", "ListValue":
+		// JSON object or array.
+		fd := fds.ByNumber(1)
+		return w.marshalValue(fd, m.Get(fd), indent)
+	}
+
+	w.write("{")
+	if w.Indent != "" {
+		w.write("\n")
+	}
+
+	firstField := true
+	if typeURL != "" {
+		if err := w.marshalTypeURL(indent, typeURL); err != nil {
+			return err
+		}
+		firstField = false
+	}
+
+	for i := 0; i < fds.Len(); {
+		fd := fds.Get(i)
+		if od := fd.ContainingOneof(); od != nil {
+			fd = m.WhichOneof(od)
+			i += od.Fields().Len()
+			if fd == nil {
+				continue
+			}
+		} else {
+			i++
+		}
+
+		v := m.Get(fd)
+
+		if !m.Has(fd) {
+			if !w.EmitDefaults || fd.ContainingOneof() != nil {
+				continue
+			}
+			if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) {
+				v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars
+			}
+		}
+
+		if !firstField {
+			w.writeComma()
+		}
+		if err := w.marshalField(fd, v, indent); err != nil {
+			return err
+		}
+		firstField = false
+	}
+
+	// Handle proto2 extensions.
+	if md.ExtensionRanges().Len() > 0 {
+		// Collect a sorted list of all extension descriptor and values.
+		type ext struct {
+			desc protoreflect.FieldDescriptor
+			val  protoreflect.Value
+		}
+		var exts []ext
+		m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+			if fd.IsExtension() {
+				exts = append(exts, ext{fd, v})
+			}
+			return true
+		})
+		sort.Slice(exts, func(i, j int) bool {
+			return exts[i].desc.Number() < exts[j].desc.Number()
+		})
+
+		for _, ext := range exts {
+			if !firstField {
+				w.writeComma()
+			}
+			if err := w.marshalField(ext.desc, ext.val, indent); err != nil {
+				return err
+			}
+			firstField = false
+		}
+	}
+
+	if w.Indent != "" {
+		w.write("\n")
+		w.write(indent)
+	}
+	w.write("}")
+	return nil
+}
+
+func (w *jsonWriter) writeComma() {
+	if w.Indent != "" {
+		w.write(",\n")
+	} else {
+		w.write(",")
+	}
+}
+
+func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error {
+	// "If the Any contains a value that has a special JSON mapping,
+	//  it will be converted as follows: {"@type": xxx, "value": yyy}.
+	//  Otherwise, the value will be converted into a JSON object,
+	//  and the "@type" field will be inserted to indicate the actual data type."
+	md := m.Descriptor()
+	typeURL := m.Get(md.Fields().ByNumber(1)).String()
+	rawVal := m.Get(md.Fields().ByNumber(2)).Bytes()
+
+	var m2 protoreflect.Message
+	if w.AnyResolver != nil {
+		mi, err := w.AnyResolver.Resolve(typeURL)
+		if err != nil {
+			return err
+		}
+		m2 = proto.MessageReflect(mi)
+	} else {
+		mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL)
+		if err != nil {
+			return err
+		}
+		m2 = mt.New()
+	}
+
+	if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil {
+		return err
+	}
+
+	if wellKnownType(m2.Descriptor().FullName()) == "" {
+		return w.marshalMessage(m2, indent, typeURL)
+	}
+
+	w.write("{")
+	if w.Indent != "" {
+		w.write("\n")
+	}
+	if err := w.marshalTypeURL(indent, typeURL); err != nil {
+		return err
+	}
+	w.writeComma()
+	if w.Indent != "" {
+		w.write(indent)
+		w.write(w.Indent)
+		w.write(`"value": `)
+	} else {
+		w.write(`"value":`)
+	}
+	if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil {
+		return err
+	}
+	if w.Indent != "" {
+		w.write("\n")
+		w.write(indent)
+	}
+	w.write("}")
+	return nil
+}
+
+func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error {
+	if w.Indent != "" {
+		w.write(indent)
+		w.write(w.Indent)
+	}
+	w.write(`"@type":`)
+	if w.Indent != "" {
+		w.write(" ")
+	}
+	b, err := json.Marshal(typeURL)
+	if err != nil {
+		return err
+	}
+	w.write(string(b))
+	return nil
+}
+
+// marshalField writes field description and value to the Writer.
+func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
+	if w.Indent != "" {
+		w.write(indent)
+		w.write(w.Indent)
+	}
+	w.write(`"`)
+	switch {
+	case fd.IsExtension():
+		// For message set, use the fname of the message as the extension name.
+		name := string(fd.FullName())
+		if isMessageSet(fd.ContainingMessage()) {
+			name = strings.TrimSuffix(name, ".message_set_extension")
+		}
+
+		w.write("[" + name + "]")
+	case w.OrigName:
+		name := string(fd.Name())
+		if fd.Kind() == protoreflect.GroupKind {
+			name = string(fd.Message().Name())
+		}
+		w.write(name)
+	default:
+		w.write(string(fd.JSONName()))
+	}
+	w.write(`":`)
+	if w.Indent != "" {
+		w.write(" ")
+	}
+	return w.marshalValue(fd, v, indent)
+}
+
+func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
+	switch {
+	case fd.IsList():
+		w.write("[")
+		comma := ""
+		lv := v.List()
+		for i := 0; i < lv.Len(); i++ {
+			w.write(comma)
+			if w.Indent != "" {
+				w.write("\n")
+				w.write(indent)
+				w.write(w.Indent)
+				w.write(w.Indent)
+			}
+			if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil {
+				return err
+			}
+			comma = ","
+		}
+		if w.Indent != "" {
+			w.write("\n")
+			w.write(indent)
+			w.write(w.Indent)
+		}
+		w.write("]")
+		return nil
+	case fd.IsMap():
+		kfd := fd.MapKey()
+		vfd := fd.MapValue()
+		mv := v.Map()
+
+		// Collect a sorted list of all map keys and values.
+		type entry struct{ key, val protoreflect.Value }
+		var entries []entry
+		mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
+			entries = append(entries, entry{k.Value(), v})
+			return true
+		})
+		sort.Slice(entries, func(i, j int) bool {
+			switch kfd.Kind() {
+			case protoreflect.BoolKind:
+				return !entries[i].key.Bool() && entries[j].key.Bool()
+			case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+				return entries[i].key.Int() < entries[j].key.Int()
+			case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+				return entries[i].key.Uint() < entries[j].key.Uint()
+			case protoreflect.StringKind:
+				return entries[i].key.String() < entries[j].key.String()
+			default:
+				panic("invalid kind")
+			}
+		})
+
+		w.write(`{`)
+		comma := ""
+		for _, entry := range entries {
+			w.write(comma)
+			if w.Indent != "" {
+				w.write("\n")
+				w.write(indent)
+				w.write(w.Indent)
+				w.write(w.Indent)
+			}
+
+			s := fmt.Sprint(entry.key.Interface())
+			b, err := json.Marshal(s)
+			if err != nil {
+				return err
+			}
+			w.write(string(b))
+
+			w.write(`:`)
+			if w.Indent != "" {
+				w.write(` `)
+			}
+
+			if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil {
+				return err
+			}
+			comma = ","
+		}
+		if w.Indent != "" {
+			w.write("\n")
+			w.write(indent)
+			w.write(w.Indent)
+		}
+		w.write(`}`)
+		return nil
+	default:
+		return w.marshalSingularValue(fd, v, indent)
+	}
+}
+
+func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
+	switch {
+	case !v.IsValid():
+		w.write("null")
+		return nil
+	case fd.Message() != nil:
+		return w.marshalMessage(v.Message(), indent+w.Indent, "")
+	case fd.Enum() != nil:
+		if fd.Enum().FullName() == "google.protobuf.NullValue" {
+			w.write("null")
+			return nil
+		}
+
+		vd := fd.Enum().Values().ByNumber(v.Enum())
+		if vd == nil || w.EnumsAsInts {
+			w.write(strconv.Itoa(int(v.Enum())))
+		} else {
+			w.write(`"` + string(vd.Name()) + `"`)
+		}
+		return nil
+	default:
+		switch v.Interface().(type) {
+		case float32, float64:
+			switch {
+			case math.IsInf(v.Float(), +1):
+				w.write(`"Infinity"`)
+				return nil
+			case math.IsInf(v.Float(), -1):
+				w.write(`"-Infinity"`)
+				return nil
+			case math.IsNaN(v.Float()):
+				w.write(`"NaN"`)
+				return nil
+			}
+		case int64, uint64:
+			w.write(fmt.Sprintf(`"%d"`, v.Interface()))
+			return nil
+		}
+
+		b, err := json.Marshal(v.Interface())
+		if err != nil {
+			return err
+		}
+		w.write(string(b))
+		return nil
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/jsonpb/json.go b/vendor/github.com/golang/protobuf/jsonpb/json.go
new file mode 100644
index 0000000..480e244
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/jsonpb/json.go
@@ -0,0 +1,69 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package jsonpb provides functionality to marshal and unmarshal between a
+// protocol buffer message and JSON. It follows the specification at
+// https://developers.google.com/protocol-buffers/docs/proto3#json.
+//
+// Do not rely on the default behavior of the standard encoding/json package
+// when called on generated message types as it does not operate correctly.
+//
+// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson"
+// package instead.
+package jsonpb
+
+import (
+	"github.com/golang/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+	"google.golang.org/protobuf/runtime/protoimpl"
+)
+
+// AnyResolver takes a type URL, present in an Any message,
+// and resolves it into an instance of the associated message.
+type AnyResolver interface {
+	Resolve(typeURL string) (proto.Message, error)
+}
+
+type anyResolver struct{ AnyResolver }
+
+func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
+	return r.FindMessageByURL(string(message))
+}
+
+func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) {
+	m, err := r.Resolve(url)
+	if err != nil {
+		return nil, err
+	}
+	return protoimpl.X.MessageTypeOf(m), nil
+}
+
+func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
+	return protoregistry.GlobalTypes.FindExtensionByName(field)
+}
+
+func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
+	return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
+}
+
+func wellKnownType(s protoreflect.FullName) string {
+	if s.Parent() == "google.protobuf" {
+		switch s.Name() {
+		case "Empty", "Any",
+			"BoolValue", "BytesValue", "StringValue",
+			"Int32Value", "UInt32Value", "FloatValue",
+			"Int64Value", "UInt64Value", "DoubleValue",
+			"Duration", "Timestamp",
+			"NullValue", "Struct", "Value", "ListValue":
+			return string(s.Name())
+		}
+	}
+	return ""
+}
+
+func isMessageSet(md protoreflect.MessageDescriptor) bool {
+	ms, ok := md.(interface{ IsMessageSet() bool })
+	return ok && ms.IsMessageSet()
+}
diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
deleted file mode 100644
index e9cc202..0000000
--- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
+++ /dev/null
@@ -1,1284 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2015 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-/*
-Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON.
-It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json.
-
-This package produces a different output than the standard "encoding/json" package,
-which does not operate correctly on protocol buffers.
-*/
-package jsonpb
-
-import (
-	"bytes"
-	"encoding/json"
-	"errors"
-	"fmt"
-	"io"
-	"math"
-	"reflect"
-	"sort"
-	"strconv"
-	"strings"
-	"time"
-
-	"github.com/golang/protobuf/proto"
-
-	stpb "github.com/golang/protobuf/ptypes/struct"
-)
-
-const secondInNanos = int64(time.Second / time.Nanosecond)
-const maxSecondsInDuration = 315576000000
-
-// Marshaler is a configurable object for converting between
-// protocol buffer objects and a JSON representation for them.
-type Marshaler struct {
-	// Whether to render enum values as integers, as opposed to string values.
-	EnumsAsInts bool
-
-	// Whether to render fields with zero values.
-	EmitDefaults bool
-
-	// A string to indent each level by. The presence of this field will
-	// also cause a space to appear between the field separator and
-	// value, and for newlines to be appear between fields and array
-	// elements.
-	Indent string
-
-	// Whether to use the original (.proto) name for fields.
-	OrigName bool
-
-	// A custom URL resolver to use when marshaling Any messages to JSON.
-	// If unset, the default resolution strategy is to extract the
-	// fully-qualified type name from the type URL and pass that to
-	// proto.MessageType(string).
-	AnyResolver AnyResolver
-}
-
-// AnyResolver takes a type URL, present in an Any message, and resolves it into
-// an instance of the associated message.
-type AnyResolver interface {
-	Resolve(typeUrl string) (proto.Message, error)
-}
-
-func defaultResolveAny(typeUrl string) (proto.Message, error) {
-	// Only the part of typeUrl after the last slash is relevant.
-	mname := typeUrl
-	if slash := strings.LastIndex(mname, "/"); slash >= 0 {
-		mname = mname[slash+1:]
-	}
-	mt := proto.MessageType(mname)
-	if mt == nil {
-		return nil, fmt.Errorf("unknown message type %q", mname)
-	}
-	return reflect.New(mt.Elem()).Interface().(proto.Message), nil
-}
-
-// JSONPBMarshaler is implemented by protobuf messages that customize the
-// way they are marshaled to JSON. Messages that implement this should
-// also implement JSONPBUnmarshaler so that the custom format can be
-// parsed.
-//
-// The JSON marshaling must follow the proto to JSON specification:
-//	https://developers.google.com/protocol-buffers/docs/proto3#json
-type JSONPBMarshaler interface {
-	MarshalJSONPB(*Marshaler) ([]byte, error)
-}
-
-// JSONPBUnmarshaler is implemented by protobuf messages that customize
-// the way they are unmarshaled from JSON. Messages that implement this
-// should also implement JSONPBMarshaler so that the custom format can be
-// produced.
-//
-// The JSON unmarshaling must follow the JSON to proto specification:
-//	https://developers.google.com/protocol-buffers/docs/proto3#json
-type JSONPBUnmarshaler interface {
-	UnmarshalJSONPB(*Unmarshaler, []byte) error
-}
-
-// Marshal marshals a protocol buffer into JSON.
-func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error {
-	v := reflect.ValueOf(pb)
-	if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) {
-		return errors.New("Marshal called with nil")
-	}
-	// Check for unset required fields first.
-	if err := checkRequiredFields(pb); err != nil {
-		return err
-	}
-	writer := &errWriter{writer: out}
-	return m.marshalObject(writer, pb, "", "")
-}
-
-// MarshalToString converts a protocol buffer object to JSON string.
-func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) {
-	var buf bytes.Buffer
-	if err := m.Marshal(&buf, pb); err != nil {
-		return "", err
-	}
-	return buf.String(), nil
-}
-
-type int32Slice []int32
-
-var nonFinite = map[string]float64{
-	`"NaN"`:       math.NaN(),
-	`"Infinity"`:  math.Inf(1),
-	`"-Infinity"`: math.Inf(-1),
-}
-
-// For sorting extensions ids to ensure stable output.
-func (s int32Slice) Len() int           { return len(s) }
-func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
-func (s int32Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
-
-type wkt interface {
-	XXX_WellKnownType() string
-}
-
-// marshalObject writes a struct to the Writer.
-func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error {
-	if jsm, ok := v.(JSONPBMarshaler); ok {
-		b, err := jsm.MarshalJSONPB(m)
-		if err != nil {
-			return err
-		}
-		if typeURL != "" {
-			// we are marshaling this object to an Any type
-			var js map[string]*json.RawMessage
-			if err = json.Unmarshal(b, &js); err != nil {
-				return fmt.Errorf("type %T produced invalid JSON: %v", v, err)
-			}
-			turl, err := json.Marshal(typeURL)
-			if err != nil {
-				return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err)
-			}
-			js["@type"] = (*json.RawMessage)(&turl)
-			if m.Indent != "" {
-				b, err = json.MarshalIndent(js, indent, m.Indent)
-			} else {
-				b, err = json.Marshal(js)
-			}
-			if err != nil {
-				return err
-			}
-		}
-
-		out.write(string(b))
-		return out.err
-	}
-
-	s := reflect.ValueOf(v).Elem()
-
-	// Handle well-known types.
-	if wkt, ok := v.(wkt); ok {
-		switch wkt.XXX_WellKnownType() {
-		case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
-			"Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
-			// "Wrappers use the same representation in JSON
-			//  as the wrapped primitive type, ..."
-			sprop := proto.GetProperties(s.Type())
-			return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent)
-		case "Any":
-			// Any is a bit more involved.
-			return m.marshalAny(out, v, indent)
-		case "Duration":
-			s, ns := s.Field(0).Int(), s.Field(1).Int()
-			if s < -maxSecondsInDuration || s > maxSecondsInDuration {
-				return fmt.Errorf("seconds out of range %v", s)
-			}
-			if ns <= -secondInNanos || ns >= secondInNanos {
-				return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos)
-			}
-			if (s > 0 && ns < 0) || (s < 0 && ns > 0) {
-				return errors.New("signs of seconds and nanos do not match")
-			}
-			// Generated output always contains 0, 3, 6, or 9 fractional digits,
-			// depending on required precision, followed by the suffix "s".
-			f := "%d.%09d"
-			if ns < 0 {
-				ns = -ns
-				if s == 0 {
-					f = "-%d.%09d"
-				}
-			}
-			x := fmt.Sprintf(f, s, ns)
-			x = strings.TrimSuffix(x, "000")
-			x = strings.TrimSuffix(x, "000")
-			x = strings.TrimSuffix(x, ".000")
-			out.write(`"`)
-			out.write(x)
-			out.write(`s"`)
-			return out.err
-		case "Struct", "ListValue":
-			// Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice.
-			// TODO: pass the correct Properties if needed.
-			return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent)
-		case "Timestamp":
-			// "RFC 3339, where generated output will always be Z-normalized
-			//  and uses 0, 3, 6 or 9 fractional digits."
-			s, ns := s.Field(0).Int(), s.Field(1).Int()
-			if ns < 0 || ns >= secondInNanos {
-				return fmt.Errorf("ns out of range [0, %v)", secondInNanos)
-			}
-			t := time.Unix(s, ns).UTC()
-			// time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
-			x := t.Format("2006-01-02T15:04:05.000000000")
-			x = strings.TrimSuffix(x, "000")
-			x = strings.TrimSuffix(x, "000")
-			x = strings.TrimSuffix(x, ".000")
-			out.write(`"`)
-			out.write(x)
-			out.write(`Z"`)
-			return out.err
-		case "Value":
-			// Value has a single oneof.
-			kind := s.Field(0)
-			if kind.IsNil() {
-				// "absence of any variant indicates an error"
-				return errors.New("nil Value")
-			}
-			// oneof -> *T -> T -> T.F
-			x := kind.Elem().Elem().Field(0)
-			// TODO: pass the correct Properties if needed.
-			return m.marshalValue(out, &proto.Properties{}, x, indent)
-		}
-	}
-
-	out.write("{")
-	if m.Indent != "" {
-		out.write("\n")
-	}
-
-	firstField := true
-
-	if typeURL != "" {
-		if err := m.marshalTypeURL(out, indent, typeURL); err != nil {
-			return err
-		}
-		firstField = false
-	}
-
-	for i := 0; i < s.NumField(); i++ {
-		value := s.Field(i)
-		valueField := s.Type().Field(i)
-		if strings.HasPrefix(valueField.Name, "XXX_") {
-			continue
-		}
-
-		// IsNil will panic on most value kinds.
-		switch value.Kind() {
-		case reflect.Chan, reflect.Func, reflect.Interface:
-			if value.IsNil() {
-				continue
-			}
-		}
-
-		if !m.EmitDefaults {
-			switch value.Kind() {
-			case reflect.Bool:
-				if !value.Bool() {
-					continue
-				}
-			case reflect.Int32, reflect.Int64:
-				if value.Int() == 0 {
-					continue
-				}
-			case reflect.Uint32, reflect.Uint64:
-				if value.Uint() == 0 {
-					continue
-				}
-			case reflect.Float32, reflect.Float64:
-				if value.Float() == 0 {
-					continue
-				}
-			case reflect.String:
-				if value.Len() == 0 {
-					continue
-				}
-			case reflect.Map, reflect.Ptr, reflect.Slice:
-				if value.IsNil() {
-					continue
-				}
-			}
-		}
-
-		// Oneof fields need special handling.
-		if valueField.Tag.Get("protobuf_oneof") != "" {
-			// value is an interface containing &T{real_value}.
-			sv := value.Elem().Elem() // interface -> *T -> T
-			value = sv.Field(0)
-			valueField = sv.Type().Field(0)
-		}
-		prop := jsonProperties(valueField, m.OrigName)
-		if !firstField {
-			m.writeSep(out)
-		}
-		if err := m.marshalField(out, prop, value, indent); err != nil {
-			return err
-		}
-		firstField = false
-	}
-
-	// Handle proto2 extensions.
-	if ep, ok := v.(proto.Message); ok {
-		extensions := proto.RegisteredExtensions(v)
-		// Sort extensions for stable output.
-		ids := make([]int32, 0, len(extensions))
-		for id, desc := range extensions {
-			if !proto.HasExtension(ep, desc) {
-				continue
-			}
-			ids = append(ids, id)
-		}
-		sort.Sort(int32Slice(ids))
-		for _, id := range ids {
-			desc := extensions[id]
-			if desc == nil {
-				// unknown extension
-				continue
-			}
-			ext, extErr := proto.GetExtension(ep, desc)
-			if extErr != nil {
-				return extErr
-			}
-			value := reflect.ValueOf(ext)
-			var prop proto.Properties
-			prop.Parse(desc.Tag)
-			prop.JSONName = fmt.Sprintf("[%s]", desc.Name)
-			if !firstField {
-				m.writeSep(out)
-			}
-			if err := m.marshalField(out, &prop, value, indent); err != nil {
-				return err
-			}
-			firstField = false
-		}
-
-	}
-
-	if m.Indent != "" {
-		out.write("\n")
-		out.write(indent)
-	}
-	out.write("}")
-	return out.err
-}
-
-func (m *Marshaler) writeSep(out *errWriter) {
-	if m.Indent != "" {
-		out.write(",\n")
-	} else {
-		out.write(",")
-	}
-}
-
-func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error {
-	// "If the Any contains a value that has a special JSON mapping,
-	//  it will be converted as follows: {"@type": xxx, "value": yyy}.
-	//  Otherwise, the value will be converted into a JSON object,
-	//  and the "@type" field will be inserted to indicate the actual data type."
-	v := reflect.ValueOf(any).Elem()
-	turl := v.Field(0).String()
-	val := v.Field(1).Bytes()
-
-	var msg proto.Message
-	var err error
-	if m.AnyResolver != nil {
-		msg, err = m.AnyResolver.Resolve(turl)
-	} else {
-		msg, err = defaultResolveAny(turl)
-	}
-	if err != nil {
-		return err
-	}
-
-	if err := proto.Unmarshal(val, msg); err != nil {
-		return err
-	}
-
-	if _, ok := msg.(wkt); ok {
-		out.write("{")
-		if m.Indent != "" {
-			out.write("\n")
-		}
-		if err := m.marshalTypeURL(out, indent, turl); err != nil {
-			return err
-		}
-		m.writeSep(out)
-		if m.Indent != "" {
-			out.write(indent)
-			out.write(m.Indent)
-			out.write(`"value": `)
-		} else {
-			out.write(`"value":`)
-		}
-		if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil {
-			return err
-		}
-		if m.Indent != "" {
-			out.write("\n")
-			out.write(indent)
-		}
-		out.write("}")
-		return out.err
-	}
-
-	return m.marshalObject(out, msg, indent, turl)
-}
-
-func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error {
-	if m.Indent != "" {
-		out.write(indent)
-		out.write(m.Indent)
-	}
-	out.write(`"@type":`)
-	if m.Indent != "" {
-		out.write(" ")
-	}
-	b, err := json.Marshal(typeURL)
-	if err != nil {
-		return err
-	}
-	out.write(string(b))
-	return out.err
-}
-
-// marshalField writes field description and value to the Writer.
-func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
-	if m.Indent != "" {
-		out.write(indent)
-		out.write(m.Indent)
-	}
-	out.write(`"`)
-	out.write(prop.JSONName)
-	out.write(`":`)
-	if m.Indent != "" {
-		out.write(" ")
-	}
-	if err := m.marshalValue(out, prop, v, indent); err != nil {
-		return err
-	}
-	return nil
-}
-
-// marshalValue writes the value to the Writer.
-func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
-	var err error
-	v = reflect.Indirect(v)
-
-	// Handle nil pointer
-	if v.Kind() == reflect.Invalid {
-		out.write("null")
-		return out.err
-	}
-
-	// Handle repeated elements.
-	if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
-		out.write("[")
-		comma := ""
-		for i := 0; i < v.Len(); i++ {
-			sliceVal := v.Index(i)
-			out.write(comma)
-			if m.Indent != "" {
-				out.write("\n")
-				out.write(indent)
-				out.write(m.Indent)
-				out.write(m.Indent)
-			}
-			if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil {
-				return err
-			}
-			comma = ","
-		}
-		if m.Indent != "" {
-			out.write("\n")
-			out.write(indent)
-			out.write(m.Indent)
-		}
-		out.write("]")
-		return out.err
-	}
-
-	// Handle well-known types.
-	// Most are handled up in marshalObject (because 99% are messages).
-	if wkt, ok := v.Interface().(wkt); ok {
-		switch wkt.XXX_WellKnownType() {
-		case "NullValue":
-			out.write("null")
-			return out.err
-		}
-	}
-
-	// Handle enumerations.
-	if !m.EnumsAsInts && prop.Enum != "" {
-		// Unknown enum values will are stringified by the proto library as their
-		// value. Such values should _not_ be quoted or they will be interpreted
-		// as an enum string instead of their value.
-		enumStr := v.Interface().(fmt.Stringer).String()
-		var valStr string
-		if v.Kind() == reflect.Ptr {
-			valStr = strconv.Itoa(int(v.Elem().Int()))
-		} else {
-			valStr = strconv.Itoa(int(v.Int()))
-		}
-		isKnownEnum := enumStr != valStr
-		if isKnownEnum {
-			out.write(`"`)
-		}
-		out.write(enumStr)
-		if isKnownEnum {
-			out.write(`"`)
-		}
-		return out.err
-	}
-
-	// Handle nested messages.
-	if v.Kind() == reflect.Struct {
-		return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "")
-	}
-
-	// Handle maps.
-	// Since Go randomizes map iteration, we sort keys for stable output.
-	if v.Kind() == reflect.Map {
-		out.write(`{`)
-		keys := v.MapKeys()
-		sort.Sort(mapKeys(keys))
-		for i, k := range keys {
-			if i > 0 {
-				out.write(`,`)
-			}
-			if m.Indent != "" {
-				out.write("\n")
-				out.write(indent)
-				out.write(m.Indent)
-				out.write(m.Indent)
-			}
-
-			// TODO handle map key prop properly
-			b, err := json.Marshal(k.Interface())
-			if err != nil {
-				return err
-			}
-			s := string(b)
-
-			// If the JSON is not a string value, encode it again to make it one.
-			if !strings.HasPrefix(s, `"`) {
-				b, err := json.Marshal(s)
-				if err != nil {
-					return err
-				}
-				s = string(b)
-			}
-
-			out.write(s)
-			out.write(`:`)
-			if m.Indent != "" {
-				out.write(` `)
-			}
-
-			vprop := prop
-			if prop != nil && prop.MapValProp != nil {
-				vprop = prop.MapValProp
-			}
-			if err := m.marshalValue(out, vprop, v.MapIndex(k), indent+m.Indent); err != nil {
-				return err
-			}
-		}
-		if m.Indent != "" {
-			out.write("\n")
-			out.write(indent)
-			out.write(m.Indent)
-		}
-		out.write(`}`)
-		return out.err
-	}
-
-	// Handle non-finite floats, e.g. NaN, Infinity and -Infinity.
-	if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
-		f := v.Float()
-		var sval string
-		switch {
-		case math.IsInf(f, 1):
-			sval = `"Infinity"`
-		case math.IsInf(f, -1):
-			sval = `"-Infinity"`
-		case math.IsNaN(f):
-			sval = `"NaN"`
-		}
-		if sval != "" {
-			out.write(sval)
-			return out.err
-		}
-	}
-
-	// Default handling defers to the encoding/json library.
-	b, err := json.Marshal(v.Interface())
-	if err != nil {
-		return err
-	}
-	needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64)
-	if needToQuote {
-		out.write(`"`)
-	}
-	out.write(string(b))
-	if needToQuote {
-		out.write(`"`)
-	}
-	return out.err
-}
-
-// Unmarshaler is a configurable object for converting from a JSON
-// representation to a protocol buffer object.
-type Unmarshaler struct {
-	// Whether to allow messages to contain unknown fields, as opposed to
-	// failing to unmarshal.
-	AllowUnknownFields bool
-
-	// A custom URL resolver to use when unmarshaling Any messages from JSON.
-	// If unset, the default resolution strategy is to extract the
-	// fully-qualified type name from the type URL and pass that to
-	// proto.MessageType(string).
-	AnyResolver AnyResolver
-}
-
-// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
-// This function is lenient and will decode any options permutations of the
-// related Marshaler.
-func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
-	inputValue := json.RawMessage{}
-	if err := dec.Decode(&inputValue); err != nil {
-		return err
-	}
-	if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil {
-		return err
-	}
-	return checkRequiredFields(pb)
-}
-
-// Unmarshal unmarshals a JSON object stream into a protocol
-// buffer. This function is lenient and will decode any options
-// permutations of the related Marshaler.
-func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error {
-	dec := json.NewDecoder(r)
-	return u.UnmarshalNext(dec, pb)
-}
-
-// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
-// This function is lenient and will decode any options permutations of the
-// related Marshaler.
-func UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
-	return new(Unmarshaler).UnmarshalNext(dec, pb)
-}
-
-// Unmarshal unmarshals a JSON object stream into a protocol
-// buffer. This function is lenient and will decode any options
-// permutations of the related Marshaler.
-func Unmarshal(r io.Reader, pb proto.Message) error {
-	return new(Unmarshaler).Unmarshal(r, pb)
-}
-
-// UnmarshalString will populate the fields of a protocol buffer based
-// on a JSON string. This function is lenient and will decode any options
-// permutations of the related Marshaler.
-func UnmarshalString(str string, pb proto.Message) error {
-	return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb)
-}
-
-// unmarshalValue converts/copies a value into the target.
-// prop may be nil.
-func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error {
-	targetType := target.Type()
-
-	// Allocate memory for pointer fields.
-	if targetType.Kind() == reflect.Ptr {
-		// If input value is "null" and target is a pointer type, then the field should be treated as not set
-		// UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue.
-		_, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler)
-		if string(inputValue) == "null" && targetType != reflect.TypeOf(&stpb.Value{}) && !isJSONPBUnmarshaler {
-			return nil
-		}
-		target.Set(reflect.New(targetType.Elem()))
-
-		return u.unmarshalValue(target.Elem(), inputValue, prop)
-	}
-
-	if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok {
-		return jsu.UnmarshalJSONPB(u, []byte(inputValue))
-	}
-
-	// Handle well-known types that are not pointers.
-	if w, ok := target.Addr().Interface().(wkt); ok {
-		switch w.XXX_WellKnownType() {
-		case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
-			"Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
-			return u.unmarshalValue(target.Field(0), inputValue, prop)
-		case "Any":
-			// Use json.RawMessage pointer type instead of value to support pre-1.8 version.
-			// 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see
-			// https://github.com/golang/go/issues/14493
-			var jsonFields map[string]*json.RawMessage
-			if err := json.Unmarshal(inputValue, &jsonFields); err != nil {
-				return err
-			}
-
-			val, ok := jsonFields["@type"]
-			if !ok || val == nil {
-				return errors.New("Any JSON doesn't have '@type'")
-			}
-
-			var turl string
-			if err := json.Unmarshal([]byte(*val), &turl); err != nil {
-				return fmt.Errorf("can't unmarshal Any's '@type': %q", *val)
-			}
-			target.Field(0).SetString(turl)
-
-			var m proto.Message
-			var err error
-			if u.AnyResolver != nil {
-				m, err = u.AnyResolver.Resolve(turl)
-			} else {
-				m, err = defaultResolveAny(turl)
-			}
-			if err != nil {
-				return err
-			}
-
-			if _, ok := m.(wkt); ok {
-				val, ok := jsonFields["value"]
-				if !ok {
-					return errors.New("Any JSON doesn't have 'value'")
-				}
-
-				if err := u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil {
-					return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err)
-				}
-			} else {
-				delete(jsonFields, "@type")
-				nestedProto, err := json.Marshal(jsonFields)
-				if err != nil {
-					return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err)
-				}
-
-				if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil {
-					return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err)
-				}
-			}
-
-			b, err := proto.Marshal(m)
-			if err != nil {
-				return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err)
-			}
-			target.Field(1).SetBytes(b)
-
-			return nil
-		case "Duration":
-			unq, err := unquote(string(inputValue))
-			if err != nil {
-				return err
-			}
-
-			d, err := time.ParseDuration(unq)
-			if err != nil {
-				return fmt.Errorf("bad Duration: %v", err)
-			}
-
-			ns := d.Nanoseconds()
-			s := ns / 1e9
-			ns %= 1e9
-			target.Field(0).SetInt(s)
-			target.Field(1).SetInt(ns)
-			return nil
-		case "Timestamp":
-			unq, err := unquote(string(inputValue))
-			if err != nil {
-				return err
-			}
-
-			t, err := time.Parse(time.RFC3339Nano, unq)
-			if err != nil {
-				return fmt.Errorf("bad Timestamp: %v", err)
-			}
-
-			target.Field(0).SetInt(t.Unix())
-			target.Field(1).SetInt(int64(t.Nanosecond()))
-			return nil
-		case "Struct":
-			var m map[string]json.RawMessage
-			if err := json.Unmarshal(inputValue, &m); err != nil {
-				return fmt.Errorf("bad StructValue: %v", err)
-			}
-
-			target.Field(0).Set(reflect.ValueOf(map[string]*stpb.Value{}))
-			for k, jv := range m {
-				pv := &stpb.Value{}
-				if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil {
-					return fmt.Errorf("bad value in StructValue for key %q: %v", k, err)
-				}
-				target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv))
-			}
-			return nil
-		case "ListValue":
-			var s []json.RawMessage
-			if err := json.Unmarshal(inputValue, &s); err != nil {
-				return fmt.Errorf("bad ListValue: %v", err)
-			}
-
-			target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s))))
-			for i, sv := range s {
-				if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil {
-					return err
-				}
-			}
-			return nil
-		case "Value":
-			ivStr := string(inputValue)
-			if ivStr == "null" {
-				target.Field(0).Set(reflect.ValueOf(&stpb.Value_NullValue{}))
-			} else if v, err := strconv.ParseFloat(ivStr, 0); err == nil {
-				target.Field(0).Set(reflect.ValueOf(&stpb.Value_NumberValue{v}))
-			} else if v, err := unquote(ivStr); err == nil {
-				target.Field(0).Set(reflect.ValueOf(&stpb.Value_StringValue{v}))
-			} else if v, err := strconv.ParseBool(ivStr); err == nil {
-				target.Field(0).Set(reflect.ValueOf(&stpb.Value_BoolValue{v}))
-			} else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil {
-				lv := &stpb.ListValue{}
-				target.Field(0).Set(reflect.ValueOf(&stpb.Value_ListValue{lv}))
-				return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop)
-			} else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil {
-				sv := &stpb.Struct{}
-				target.Field(0).Set(reflect.ValueOf(&stpb.Value_StructValue{sv}))
-				return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop)
-			} else {
-				return fmt.Errorf("unrecognized type for Value %q", ivStr)
-			}
-			return nil
-		}
-	}
-
-	// Handle enums, which have an underlying type of int32,
-	// and may appear as strings.
-	// The case of an enum appearing as a number is handled
-	// at the bottom of this function.
-	if inputValue[0] == '"' && prop != nil && prop.Enum != "" {
-		vmap := proto.EnumValueMap(prop.Enum)
-		// Don't need to do unquoting; valid enum names
-		// are from a limited character set.
-		s := inputValue[1 : len(inputValue)-1]
-		n, ok := vmap[string(s)]
-		if !ok {
-			return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum)
-		}
-		if target.Kind() == reflect.Ptr { // proto2
-			target.Set(reflect.New(targetType.Elem()))
-			target = target.Elem()
-		}
-		if targetType.Kind() != reflect.Int32 {
-			return fmt.Errorf("invalid target %q for enum %s", targetType.Kind(), prop.Enum)
-		}
-		target.SetInt(int64(n))
-		return nil
-	}
-
-	// Handle nested messages.
-	if targetType.Kind() == reflect.Struct {
-		var jsonFields map[string]json.RawMessage
-		if err := json.Unmarshal(inputValue, &jsonFields); err != nil {
-			return err
-		}
-
-		consumeField := func(prop *proto.Properties) (json.RawMessage, bool) {
-			// Be liberal in what names we accept; both orig_name and camelName are okay.
-			fieldNames := acceptedJSONFieldNames(prop)
-
-			vOrig, okOrig := jsonFields[fieldNames.orig]
-			vCamel, okCamel := jsonFields[fieldNames.camel]
-			if !okOrig && !okCamel {
-				return nil, false
-			}
-			// If, for some reason, both are present in the data, favour the camelName.
-			var raw json.RawMessage
-			if okOrig {
-				raw = vOrig
-				delete(jsonFields, fieldNames.orig)
-			}
-			if okCamel {
-				raw = vCamel
-				delete(jsonFields, fieldNames.camel)
-			}
-			return raw, true
-		}
-
-		sprops := proto.GetProperties(targetType)
-		for i := 0; i < target.NumField(); i++ {
-			ft := target.Type().Field(i)
-			if strings.HasPrefix(ft.Name, "XXX_") {
-				continue
-			}
-
-			valueForField, ok := consumeField(sprops.Prop[i])
-			if !ok {
-				continue
-			}
-
-			if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil {
-				return err
-			}
-		}
-		// Check for any oneof fields.
-		if len(jsonFields) > 0 {
-			for _, oop := range sprops.OneofTypes {
-				raw, ok := consumeField(oop.Prop)
-				if !ok {
-					continue
-				}
-				nv := reflect.New(oop.Type.Elem())
-				target.Field(oop.Field).Set(nv)
-				if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil {
-					return err
-				}
-			}
-		}
-		// Handle proto2 extensions.
-		if len(jsonFields) > 0 {
-			if ep, ok := target.Addr().Interface().(proto.Message); ok {
-				for _, ext := range proto.RegisteredExtensions(ep) {
-					name := fmt.Sprintf("[%s]", ext.Name)
-					raw, ok := jsonFields[name]
-					if !ok {
-						continue
-					}
-					delete(jsonFields, name)
-					nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem())
-					if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil {
-						return err
-					}
-					if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil {
-						return err
-					}
-				}
-			}
-		}
-		if !u.AllowUnknownFields && len(jsonFields) > 0 {
-			// Pick any field to be the scapegoat.
-			var f string
-			for fname := range jsonFields {
-				f = fname
-				break
-			}
-			return fmt.Errorf("unknown field %q in %v", f, targetType)
-		}
-		return nil
-	}
-
-	// Handle arrays (which aren't encoded bytes)
-	if targetType.Kind() == reflect.Slice && targetType.Elem().Kind() != reflect.Uint8 {
-		var slc []json.RawMessage
-		if err := json.Unmarshal(inputValue, &slc); err != nil {
-			return err
-		}
-		if slc != nil {
-			l := len(slc)
-			target.Set(reflect.MakeSlice(targetType, l, l))
-			for i := 0; i < l; i++ {
-				if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil {
-					return err
-				}
-			}
-		}
-		return nil
-	}
-
-	// Handle maps (whose keys are always strings)
-	if targetType.Kind() == reflect.Map {
-		var mp map[string]json.RawMessage
-		if err := json.Unmarshal(inputValue, &mp); err != nil {
-			return err
-		}
-		if mp != nil {
-			target.Set(reflect.MakeMap(targetType))
-			for ks, raw := range mp {
-				// Unmarshal map key. The core json library already decoded the key into a
-				// string, so we handle that specially. Other types were quoted post-serialization.
-				var k reflect.Value
-				if targetType.Key().Kind() == reflect.String {
-					k = reflect.ValueOf(ks)
-				} else {
-					k = reflect.New(targetType.Key()).Elem()
-					var kprop *proto.Properties
-					if prop != nil && prop.MapKeyProp != nil {
-						kprop = prop.MapKeyProp
-					}
-					if err := u.unmarshalValue(k, json.RawMessage(ks), kprop); err != nil {
-						return err
-					}
-				}
-
-				// Unmarshal map value.
-				v := reflect.New(targetType.Elem()).Elem()
-				var vprop *proto.Properties
-				if prop != nil && prop.MapValProp != nil {
-					vprop = prop.MapValProp
-				}
-				if err := u.unmarshalValue(v, raw, vprop); err != nil {
-					return err
-				}
-				target.SetMapIndex(k, v)
-			}
-		}
-		return nil
-	}
-
-	// Non-finite numbers can be encoded as strings.
-	isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64
-	if isFloat {
-		if num, ok := nonFinite[string(inputValue)]; ok {
-			target.SetFloat(num)
-			return nil
-		}
-	}
-
-	// integers & floats can be encoded as strings. In this case we drop
-	// the quotes and proceed as normal.
-	isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 ||
-		targetType.Kind() == reflect.Int32 || targetType.Kind() == reflect.Uint32 ||
-		targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64
-	if isNum && strings.HasPrefix(string(inputValue), `"`) {
-		inputValue = inputValue[1 : len(inputValue)-1]
-	}
-
-	// Use the encoding/json for parsing other value types.
-	return json.Unmarshal(inputValue, target.Addr().Interface())
-}
-
-func unquote(s string) (string, error) {
-	var ret string
-	err := json.Unmarshal([]byte(s), &ret)
-	return ret, err
-}
-
-// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute.
-func jsonProperties(f reflect.StructField, origName bool) *proto.Properties {
-	var prop proto.Properties
-	prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f)
-	if origName || prop.JSONName == "" {
-		prop.JSONName = prop.OrigName
-	}
-	return &prop
-}
-
-type fieldNames struct {
-	orig, camel string
-}
-
-func acceptedJSONFieldNames(prop *proto.Properties) fieldNames {
-	opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName}
-	if prop.JSONName != "" {
-		opts.camel = prop.JSONName
-	}
-	return opts
-}
-
-// Writer wrapper inspired by https://blog.golang.org/errors-are-values
-type errWriter struct {
-	writer io.Writer
-	err    error
-}
-
-func (w *errWriter) write(str string) {
-	if w.err != nil {
-		return
-	}
-	_, w.err = w.writer.Write([]byte(str))
-}
-
-// Map fields may have key types of non-float scalars, strings and enums.
-// The easiest way to sort them in some deterministic order is to use fmt.
-// If this turns out to be inefficient we can always consider other options,
-// such as doing a Schwartzian transform.
-//
-// Numeric keys are sorted in numeric order per
-// https://developers.google.com/protocol-buffers/docs/proto#maps.
-type mapKeys []reflect.Value
-
-func (s mapKeys) Len() int      { return len(s) }
-func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s mapKeys) Less(i, j int) bool {
-	if k := s[i].Kind(); k == s[j].Kind() {
-		switch k {
-		case reflect.String:
-			return s[i].String() < s[j].String()
-		case reflect.Int32, reflect.Int64:
-			return s[i].Int() < s[j].Int()
-		case reflect.Uint32, reflect.Uint64:
-			return s[i].Uint() < s[j].Uint()
-		}
-	}
-	return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface())
-}
-
-// checkRequiredFields returns an error if any required field in the given proto message is not set.
-// This function is used by both Marshal and Unmarshal.  While required fields only exist in a
-// proto2 message, a proto3 message can contain proto2 message(s).
-func checkRequiredFields(pb proto.Message) error {
-	// Most well-known type messages do not contain required fields.  The "Any" type may contain
-	// a message that has required fields.
-	//
-	// When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value
-	// field in order to transform that into JSON, and that should have returned an error if a
-	// required field is not set in the embedded message.
-	//
-	// When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the
-	// embedded message to store the serialized message in Any.Value field, and that should have
-	// returned an error if a required field is not set.
-	if _, ok := pb.(wkt); ok {
-		return nil
-	}
-
-	v := reflect.ValueOf(pb)
-	// Skip message if it is not a struct pointer.
-	if v.Kind() != reflect.Ptr {
-		return nil
-	}
-	v = v.Elem()
-	if v.Kind() != reflect.Struct {
-		return nil
-	}
-
-	for i := 0; i < v.NumField(); i++ {
-		field := v.Field(i)
-		sfield := v.Type().Field(i)
-
-		if sfield.PkgPath != "" {
-			// blank PkgPath means the field is exported; skip if not exported
-			continue
-		}
-
-		if strings.HasPrefix(sfield.Name, "XXX_") {
-			continue
-		}
-
-		// Oneof field is an interface implemented by wrapper structs containing the actual oneof
-		// field, i.e. an interface containing &T{real_value}.
-		if sfield.Tag.Get("protobuf_oneof") != "" {
-			if field.Kind() != reflect.Interface {
-				continue
-			}
-			v := field.Elem()
-			if v.Kind() != reflect.Ptr || v.IsNil() {
-				continue
-			}
-			v = v.Elem()
-			if v.Kind() != reflect.Struct || v.NumField() < 1 {
-				continue
-			}
-			field = v.Field(0)
-			sfield = v.Type().Field(0)
-		}
-
-		protoTag := sfield.Tag.Get("protobuf")
-		if protoTag == "" {
-			continue
-		}
-		var prop proto.Properties
-		prop.Init(sfield.Type, sfield.Name, protoTag, &sfield)
-
-		switch field.Kind() {
-		case reflect.Map:
-			if field.IsNil() {
-				continue
-			}
-			// Check each map value.
-			keys := field.MapKeys()
-			for _, k := range keys {
-				v := field.MapIndex(k)
-				if err := checkRequiredFieldsInValue(v); err != nil {
-					return err
-				}
-			}
-		case reflect.Slice:
-			// Handle non-repeated type, e.g. bytes.
-			if !prop.Repeated {
-				if prop.Required && field.IsNil() {
-					return fmt.Errorf("required field %q is not set", prop.Name)
-				}
-				continue
-			}
-
-			// Handle repeated type.
-			if field.IsNil() {
-				continue
-			}
-			// Check each slice item.
-			for i := 0; i < field.Len(); i++ {
-				v := field.Index(i)
-				if err := checkRequiredFieldsInValue(v); err != nil {
-					return err
-				}
-			}
-		case reflect.Ptr:
-			if field.IsNil() {
-				if prop.Required {
-					return fmt.Errorf("required field %q is not set", prop.Name)
-				}
-				continue
-			}
-			if err := checkRequiredFieldsInValue(field); err != nil {
-				return err
-			}
-		}
-	}
-
-	// Handle proto2 extensions.
-	for _, ext := range proto.RegisteredExtensions(pb) {
-		if !proto.HasExtension(pb, ext) {
-			continue
-		}
-		ep, err := proto.GetExtension(pb, ext)
-		if err != nil {
-			return err
-		}
-		err = checkRequiredFieldsInValue(reflect.ValueOf(ep))
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func checkRequiredFieldsInValue(v reflect.Value) error {
-	if pm, ok := v.Interface().(proto.Message); ok {
-		return checkRequiredFields(pm)
-	}
-	return nil
-}
diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go
new file mode 100644
index 0000000..e810e6f
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/buffer.go
@@ -0,0 +1,324 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+	"errors"
+	"fmt"
+
+	"google.golang.org/protobuf/encoding/prototext"
+	"google.golang.org/protobuf/encoding/protowire"
+	"google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+	WireVarint     = 0
+	WireFixed32    = 5
+	WireFixed64    = 1
+	WireBytes      = 2
+	WireStartGroup = 3
+	WireEndGroup   = 4
+)
+
+// EncodeVarint returns the varint encoded bytes of v.
+func EncodeVarint(v uint64) []byte {
+	return protowire.AppendVarint(nil, v)
+}
+
+// SizeVarint returns the length of the varint encoded bytes of v.
+// This is equal to len(EncodeVarint(v)).
+func SizeVarint(v uint64) int {
+	return protowire.SizeVarint(v)
+}
+
+// DecodeVarint parses a varint encoded integer from b,
+// returning the integer value and the length of the varint.
+// It returns (0, 0) if there is a parse error.
+func DecodeVarint(b []byte) (uint64, int) {
+	v, n := protowire.ConsumeVarint(b)
+	if n < 0 {
+		return 0, 0
+	}
+	return v, n
+}
+
+// Buffer is a buffer for encoding and decoding the protobuf wire format.
+// It may be reused between invocations to reduce memory usage.
+type Buffer struct {
+	buf           []byte
+	idx           int
+	deterministic bool
+}
+
+// NewBuffer allocates a new Buffer initialized with buf,
+// where the contents of buf are considered the unread portion of the buffer.
+func NewBuffer(buf []byte) *Buffer {
+	return &Buffer{buf: buf}
+}
+
+// SetDeterministic specifies whether to use deterministic serialization.
+//
+// Deterministic serialization guarantees that for a given binary, equal
+// messages will always be serialized to the same bytes. This implies:
+//
+//   - Repeated serialization of a message will return the same bytes.
+//   - Different processes of the same binary (which may be executing on
+//     different machines) will serialize equal messages to the same bytes.
+//
+// Note that the deterministic serialization is NOT canonical across
+// languages. It is not guaranteed to remain stable over time. It is unstable
+// across different builds with schema changes due to unknown fields.
+// Users who need canonical serialization (e.g., persistent storage in a
+// canonical form, fingerprinting, etc.) should define their own
+// canonicalization specification and implement their own serializer rather
+// than relying on this API.
+//
+// If deterministic serialization is requested, map entries will be sorted
+// by keys in lexographical order. This is an implementation detail and
+// subject to change.
+func (b *Buffer) SetDeterministic(deterministic bool) {
+	b.deterministic = deterministic
+}
+
+// SetBuf sets buf as the internal buffer,
+// where the contents of buf are considered the unread portion of the buffer.
+func (b *Buffer) SetBuf(buf []byte) {
+	b.buf = buf
+	b.idx = 0
+}
+
+// Reset clears the internal buffer of all written and unread data.
+func (b *Buffer) Reset() {
+	b.buf = b.buf[:0]
+	b.idx = 0
+}
+
+// Bytes returns the internal buffer.
+func (b *Buffer) Bytes() []byte {
+	return b.buf
+}
+
+// Unread returns the unread portion of the buffer.
+func (b *Buffer) Unread() []byte {
+	return b.buf[b.idx:]
+}
+
+// Marshal appends the wire-format encoding of m to the buffer.
+func (b *Buffer) Marshal(m Message) error {
+	var err error
+	b.buf, err = marshalAppend(b.buf, m, b.deterministic)
+	return err
+}
+
+// Unmarshal parses the wire-format message in the buffer and
+// places the decoded results in m.
+// It does not reset m before unmarshaling.
+func (b *Buffer) Unmarshal(m Message) error {
+	err := UnmarshalMerge(b.Unread(), m)
+	b.idx = len(b.buf)
+	return err
+}
+
+type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
+
+func (m *unknownFields) String() string { panic("not implemented") }
+func (m *unknownFields) Reset()         { panic("not implemented") }
+func (m *unknownFields) ProtoMessage()  { panic("not implemented") }
+
+// DebugPrint dumps the encoded bytes of b with a header and footer including s
+// to stdout. This is only intended for debugging.
+func (*Buffer) DebugPrint(s string, b []byte) {
+	m := MessageReflect(new(unknownFields))
+	m.SetUnknown(b)
+	b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
+	fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
+}
+
+// EncodeVarint appends an unsigned varint encoding to the buffer.
+func (b *Buffer) EncodeVarint(v uint64) error {
+	b.buf = protowire.AppendVarint(b.buf, v)
+	return nil
+}
+
+// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
+func (b *Buffer) EncodeZigzag32(v uint64) error {
+	return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+}
+
+// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
+func (b *Buffer) EncodeZigzag64(v uint64) error {
+	return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
+}
+
+// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
+func (b *Buffer) EncodeFixed32(v uint64) error {
+	b.buf = protowire.AppendFixed32(b.buf, uint32(v))
+	return nil
+}
+
+// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
+func (b *Buffer) EncodeFixed64(v uint64) error {
+	b.buf = protowire.AppendFixed64(b.buf, uint64(v))
+	return nil
+}
+
+// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
+func (b *Buffer) EncodeRawBytes(v []byte) error {
+	b.buf = protowire.AppendBytes(b.buf, v)
+	return nil
+}
+
+// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
+// It does not validate whether v contains valid UTF-8.
+func (b *Buffer) EncodeStringBytes(v string) error {
+	b.buf = protowire.AppendString(b.buf, v)
+	return nil
+}
+
+// EncodeMessage appends a length-prefixed encoded message to the buffer.
+func (b *Buffer) EncodeMessage(m Message) error {
+	var err error
+	b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
+	b.buf, err = marshalAppend(b.buf, m, b.deterministic)
+	return err
+}
+
+// DecodeVarint consumes an encoded unsigned varint from the buffer.
+func (b *Buffer) DecodeVarint() (uint64, error) {
+	v, n := protowire.ConsumeVarint(b.buf[b.idx:])
+	if n < 0 {
+		return 0, protowire.ParseError(n)
+	}
+	b.idx += n
+	return uint64(v), nil
+}
+
+// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
+func (b *Buffer) DecodeZigzag32() (uint64, error) {
+	v, err := b.DecodeVarint()
+	if err != nil {
+		return 0, err
+	}
+	return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
+}
+
+// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
+func (b *Buffer) DecodeZigzag64() (uint64, error) {
+	v, err := b.DecodeVarint()
+	if err != nil {
+		return 0, err
+	}
+	return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
+}
+
+// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
+func (b *Buffer) DecodeFixed32() (uint64, error) {
+	v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
+	if n < 0 {
+		return 0, protowire.ParseError(n)
+	}
+	b.idx += n
+	return uint64(v), nil
+}
+
+// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
+func (b *Buffer) DecodeFixed64() (uint64, error) {
+	v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
+	if n < 0 {
+		return 0, protowire.ParseError(n)
+	}
+	b.idx += n
+	return uint64(v), nil
+}
+
+// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
+// If alloc is specified, it returns a copy the raw bytes
+// rather than a sub-slice of the buffer.
+func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
+	v, n := protowire.ConsumeBytes(b.buf[b.idx:])
+	if n < 0 {
+		return nil, protowire.ParseError(n)
+	}
+	b.idx += n
+	if alloc {
+		v = append([]byte(nil), v...)
+	}
+	return v, nil
+}
+
+// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
+// It does not validate whether the raw bytes contain valid UTF-8.
+func (b *Buffer) DecodeStringBytes() (string, error) {
+	v, n := protowire.ConsumeString(b.buf[b.idx:])
+	if n < 0 {
+		return "", protowire.ParseError(n)
+	}
+	b.idx += n
+	return v, nil
+}
+
+// DecodeMessage consumes a length-prefixed message from the buffer.
+// It does not reset m before unmarshaling.
+func (b *Buffer) DecodeMessage(m Message) error {
+	v, err := b.DecodeRawBytes(false)
+	if err != nil {
+		return err
+	}
+	return UnmarshalMerge(v, m)
+}
+
+// DecodeGroup consumes a message group from the buffer.
+// It assumes that the start group marker has already been consumed and
+// consumes all bytes until (and including the end group marker).
+// It does not reset m before unmarshaling.
+func (b *Buffer) DecodeGroup(m Message) error {
+	v, n, err := consumeGroup(b.buf[b.idx:])
+	if err != nil {
+		return err
+	}
+	b.idx += n
+	return UnmarshalMerge(v, m)
+}
+
+// consumeGroup parses b until it finds an end group marker, returning
+// the raw bytes of the message (excluding the end group marker) and the
+// the total length of the message (including the end group marker).
+func consumeGroup(b []byte) ([]byte, int, error) {
+	b0 := b
+	depth := 1 // assume this follows a start group marker
+	for {
+		_, wtyp, tagLen := protowire.ConsumeTag(b)
+		if tagLen < 0 {
+			return nil, 0, protowire.ParseError(tagLen)
+		}
+		b = b[tagLen:]
+
+		var valLen int
+		switch wtyp {
+		case protowire.VarintType:
+			_, valLen = protowire.ConsumeVarint(b)
+		case protowire.Fixed32Type:
+			_, valLen = protowire.ConsumeFixed32(b)
+		case protowire.Fixed64Type:
+			_, valLen = protowire.ConsumeFixed64(b)
+		case protowire.BytesType:
+			_, valLen = protowire.ConsumeBytes(b)
+		case protowire.StartGroupType:
+			depth++
+		case protowire.EndGroupType:
+			depth--
+		default:
+			return nil, 0, errors.New("proto: cannot parse reserved wire type")
+		}
+		if valLen < 0 {
+			return nil, 0, protowire.ParseError(valLen)
+		}
+		b = b[valLen:]
+
+		if depth == 0 {
+			return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
+		}
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
deleted file mode 100644
index 3cd3249..0000000
--- a/vendor/github.com/golang/protobuf/proto/clone.go
+++ /dev/null
@@ -1,253 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2011 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Protocol buffer deep copy and merge.
-// TODO: RawMessage.
-
-package proto
-
-import (
-	"fmt"
-	"log"
-	"reflect"
-	"strings"
-)
-
-// Clone returns a deep copy of a protocol buffer.
-func Clone(src Message) Message {
-	in := reflect.ValueOf(src)
-	if in.IsNil() {
-		return src
-	}
-	out := reflect.New(in.Type().Elem())
-	dst := out.Interface().(Message)
-	Merge(dst, src)
-	return dst
-}
-
-// Merger is the interface representing objects that can merge messages of the same type.
-type Merger interface {
-	// Merge merges src into this message.
-	// Required and optional fields that are set in src will be set to that value in dst.
-	// Elements of repeated fields will be appended.
-	//
-	// Merge may panic if called with a different argument type than the receiver.
-	Merge(src Message)
-}
-
-// generatedMerger is the custom merge method that generated protos will have.
-// We must add this method since a generate Merge method will conflict with
-// many existing protos that have a Merge data field already defined.
-type generatedMerger interface {
-	XXX_Merge(src Message)
-}
-
-// Merge merges src into dst.
-// Required and optional fields that are set in src will be set to that value in dst.
-// Elements of repeated fields will be appended.
-// Merge panics if src and dst are not the same type, or if dst is nil.
-func Merge(dst, src Message) {
-	if m, ok := dst.(Merger); ok {
-		m.Merge(src)
-		return
-	}
-
-	in := reflect.ValueOf(src)
-	out := reflect.ValueOf(dst)
-	if out.IsNil() {
-		panic("proto: nil destination")
-	}
-	if in.Type() != out.Type() {
-		panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
-	}
-	if in.IsNil() {
-		return // Merge from nil src is a noop
-	}
-	if m, ok := dst.(generatedMerger); ok {
-		m.XXX_Merge(src)
-		return
-	}
-	mergeStruct(out.Elem(), in.Elem())
-}
-
-func mergeStruct(out, in reflect.Value) {
-	sprop := GetProperties(in.Type())
-	for i := 0; i < in.NumField(); i++ {
-		f := in.Type().Field(i)
-		if strings.HasPrefix(f.Name, "XXX_") {
-			continue
-		}
-		mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
-	}
-
-	if emIn, err := extendable(in.Addr().Interface()); err == nil {
-		emOut, _ := extendable(out.Addr().Interface())
-		mIn, muIn := emIn.extensionsRead()
-		if mIn != nil {
-			mOut := emOut.extensionsWrite()
-			muIn.Lock()
-			mergeExtension(mOut, mIn)
-			muIn.Unlock()
-		}
-	}
-
-	uf := in.FieldByName("XXX_unrecognized")
-	if !uf.IsValid() {
-		return
-	}
-	uin := uf.Bytes()
-	if len(uin) > 0 {
-		out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
-	}
-}
-
-// mergeAny performs a merge between two values of the same type.
-// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
-// prop is set if this is a struct field (it may be nil).
-func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
-	if in.Type() == protoMessageType {
-		if !in.IsNil() {
-			if out.IsNil() {
-				out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
-			} else {
-				Merge(out.Interface().(Message), in.Interface().(Message))
-			}
-		}
-		return
-	}
-	switch in.Kind() {
-	case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
-		reflect.String, reflect.Uint32, reflect.Uint64:
-		if !viaPtr && isProto3Zero(in) {
-			return
-		}
-		out.Set(in)
-	case reflect.Interface:
-		// Probably a oneof field; copy non-nil values.
-		if in.IsNil() {
-			return
-		}
-		// Allocate destination if it is not set, or set to a different type.
-		// Otherwise we will merge as normal.
-		if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
-			out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
-		}
-		mergeAny(out.Elem(), in.Elem(), false, nil)
-	case reflect.Map:
-		if in.Len() == 0 {
-			return
-		}
-		if out.IsNil() {
-			out.Set(reflect.MakeMap(in.Type()))
-		}
-		// For maps with value types of *T or []byte we need to deep copy each value.
-		elemKind := in.Type().Elem().Kind()
-		for _, key := range in.MapKeys() {
-			var val reflect.Value
-			switch elemKind {
-			case reflect.Ptr:
-				val = reflect.New(in.Type().Elem().Elem())
-				mergeAny(val, in.MapIndex(key), false, nil)
-			case reflect.Slice:
-				val = in.MapIndex(key)
-				val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
-			default:
-				val = in.MapIndex(key)
-			}
-			out.SetMapIndex(key, val)
-		}
-	case reflect.Ptr:
-		if in.IsNil() {
-			return
-		}
-		if out.IsNil() {
-			out.Set(reflect.New(in.Elem().Type()))
-		}
-		mergeAny(out.Elem(), in.Elem(), true, nil)
-	case reflect.Slice:
-		if in.IsNil() {
-			return
-		}
-		if in.Type().Elem().Kind() == reflect.Uint8 {
-			// []byte is a scalar bytes field, not a repeated field.
-
-			// Edge case: if this is in a proto3 message, a zero length
-			// bytes field is considered the zero value, and should not
-			// be merged.
-			if prop != nil && prop.proto3 && in.Len() == 0 {
-				return
-			}
-
-			// Make a deep copy.
-			// Append to []byte{} instead of []byte(nil) so that we never end up
-			// with a nil result.
-			out.SetBytes(append([]byte{}, in.Bytes()...))
-			return
-		}
-		n := in.Len()
-		if out.IsNil() {
-			out.Set(reflect.MakeSlice(in.Type(), 0, n))
-		}
-		switch in.Type().Elem().Kind() {
-		case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
-			reflect.String, reflect.Uint32, reflect.Uint64:
-			out.Set(reflect.AppendSlice(out, in))
-		default:
-			for i := 0; i < n; i++ {
-				x := reflect.Indirect(reflect.New(in.Type().Elem()))
-				mergeAny(x, in.Index(i), false, nil)
-				out.Set(reflect.Append(out, x))
-			}
-		}
-	case reflect.Struct:
-		mergeStruct(out, in)
-	default:
-		// unknown type, so not a protocol buffer
-		log.Printf("proto: don't know how to copy %v", in)
-	}
-}
-
-func mergeExtension(out, in map[int32]Extension) {
-	for extNum, eIn := range in {
-		eOut := Extension{desc: eIn.desc}
-		if eIn.value != nil {
-			v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
-			mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
-			eOut.value = v.Interface()
-		}
-		if eIn.enc != nil {
-			eOut.enc = make([]byte, len(eIn.enc))
-			copy(eOut.enc, eIn.enc)
-		}
-
-		out[extNum] = eOut
-	}
-}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
deleted file mode 100644
index 63b0f08..0000000
--- a/vendor/github.com/golang/protobuf/proto/decode.go
+++ /dev/null
@@ -1,427 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Routines for decoding protocol buffer data to construct in-memory representations.
- */
-
-import (
-	"errors"
-	"fmt"
-	"io"
-)
-
-// errOverflow is returned when an integer is too large to be represented.
-var errOverflow = errors.New("proto: integer overflow")
-
-// ErrInternalBadWireType is returned by generated code when an incorrect
-// wire type is encountered. It does not get returned to user code.
-var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
-
-// DecodeVarint reads a varint-encoded integer from the slice.
-// It returns the integer and the number of bytes consumed, or
-// zero if there is not enough.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func DecodeVarint(buf []byte) (x uint64, n int) {
-	for shift := uint(0); shift < 64; shift += 7 {
-		if n >= len(buf) {
-			return 0, 0
-		}
-		b := uint64(buf[n])
-		n++
-		x |= (b & 0x7F) << shift
-		if (b & 0x80) == 0 {
-			return x, n
-		}
-	}
-
-	// The number is too large to represent in a 64-bit value.
-	return 0, 0
-}
-
-func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
-	i := p.index
-	l := len(p.buf)
-
-	for shift := uint(0); shift < 64; shift += 7 {
-		if i >= l {
-			err = io.ErrUnexpectedEOF
-			return
-		}
-		b := p.buf[i]
-		i++
-		x |= (uint64(b) & 0x7F) << shift
-		if b < 0x80 {
-			p.index = i
-			return
-		}
-	}
-
-	// The number is too large to represent in a 64-bit value.
-	err = errOverflow
-	return
-}
-
-// DecodeVarint reads a varint-encoded integer from the Buffer.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func (p *Buffer) DecodeVarint() (x uint64, err error) {
-	i := p.index
-	buf := p.buf
-
-	if i >= len(buf) {
-		return 0, io.ErrUnexpectedEOF
-	} else if buf[i] < 0x80 {
-		p.index++
-		return uint64(buf[i]), nil
-	} else if len(buf)-i < 10 {
-		return p.decodeVarintSlow()
-	}
-
-	var b uint64
-	// we already checked the first byte
-	x = uint64(buf[i]) - 0x80
-	i++
-
-	b = uint64(buf[i])
-	i++
-	x += b << 7
-	if b&0x80 == 0 {
-		goto done
-	}
-	x -= 0x80 << 7
-
-	b = uint64(buf[i])
-	i++
-	x += b << 14
-	if b&0x80 == 0 {
-		goto done
-	}
-	x -= 0x80 << 14
-
-	b = uint64(buf[i])
-	i++
-	x += b << 21
-	if b&0x80 == 0 {
-		goto done
-	}
-	x -= 0x80 << 21
-
-	b = uint64(buf[i])
-	i++
-	x += b << 28
-	if b&0x80 == 0 {
-		goto done
-	}
-	x -= 0x80 << 28
-
-	b = uint64(buf[i])
-	i++
-	x += b << 35
-	if b&0x80 == 0 {
-		goto done
-	}
-	x -= 0x80 << 35
-
-	b = uint64(buf[i])
-	i++
-	x += b << 42
-	if b&0x80 == 0 {
-		goto done
-	}
-	x -= 0x80 << 42
-
-	b = uint64(buf[i])
-	i++
-	x += b << 49
-	if b&0x80 == 0 {
-		goto done
-	}
-	x -= 0x80 << 49
-
-	b = uint64(buf[i])
-	i++
-	x += b << 56
-	if b&0x80 == 0 {
-		goto done
-	}
-	x -= 0x80 << 56
-
-	b = uint64(buf[i])
-	i++
-	x += b << 63
-	if b&0x80 == 0 {
-		goto done
-	}
-
-	return 0, errOverflow
-
-done:
-	p.index = i
-	return x, nil
-}
-
-// DecodeFixed64 reads a 64-bit integer from the Buffer.
-// This is the format for the
-// fixed64, sfixed64, and double protocol buffer types.
-func (p *Buffer) DecodeFixed64() (x uint64, err error) {
-	// x, err already 0
-	i := p.index + 8
-	if i < 0 || i > len(p.buf) {
-		err = io.ErrUnexpectedEOF
-		return
-	}
-	p.index = i
-
-	x = uint64(p.buf[i-8])
-	x |= uint64(p.buf[i-7]) << 8
-	x |= uint64(p.buf[i-6]) << 16
-	x |= uint64(p.buf[i-5]) << 24
-	x |= uint64(p.buf[i-4]) << 32
-	x |= uint64(p.buf[i-3]) << 40
-	x |= uint64(p.buf[i-2]) << 48
-	x |= uint64(p.buf[i-1]) << 56
-	return
-}
-
-// DecodeFixed32 reads a 32-bit integer from the Buffer.
-// This is the format for the
-// fixed32, sfixed32, and float protocol buffer types.
-func (p *Buffer) DecodeFixed32() (x uint64, err error) {
-	// x, err already 0
-	i := p.index + 4
-	if i < 0 || i > len(p.buf) {
-		err = io.ErrUnexpectedEOF
-		return
-	}
-	p.index = i
-
-	x = uint64(p.buf[i-4])
-	x |= uint64(p.buf[i-3]) << 8
-	x |= uint64(p.buf[i-2]) << 16
-	x |= uint64(p.buf[i-1]) << 24
-	return
-}
-
-// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
-// from the Buffer.
-// This is the format used for the sint64 protocol buffer type.
-func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
-	x, err = p.DecodeVarint()
-	if err != nil {
-		return
-	}
-	x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
-	return
-}
-
-// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
-// from  the Buffer.
-// This is the format used for the sint32 protocol buffer type.
-func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
-	x, err = p.DecodeVarint()
-	if err != nil {
-		return
-	}
-	x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
-	return
-}
-
-// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
-// This is the format used for the bytes protocol buffer
-// type and for embedded messages.
-func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
-	n, err := p.DecodeVarint()
-	if err != nil {
-		return nil, err
-	}
-
-	nb := int(n)
-	if nb < 0 {
-		return nil, fmt.Errorf("proto: bad byte length %d", nb)
-	}
-	end := p.index + nb
-	if end < p.index || end > len(p.buf) {
-		return nil, io.ErrUnexpectedEOF
-	}
-
-	if !alloc {
-		// todo: check if can get more uses of alloc=false
-		buf = p.buf[p.index:end]
-		p.index += nb
-		return
-	}
-
-	buf = make([]byte, nb)
-	copy(buf, p.buf[p.index:])
-	p.index += nb
-	return
-}
-
-// DecodeStringBytes reads an encoded string from the Buffer.
-// This is the format used for the proto2 string type.
-func (p *Buffer) DecodeStringBytes() (s string, err error) {
-	buf, err := p.DecodeRawBytes(false)
-	if err != nil {
-		return
-	}
-	return string(buf), nil
-}
-
-// Unmarshaler is the interface representing objects that can
-// unmarshal themselves.  The argument points to data that may be
-// overwritten, so implementations should not keep references to the
-// buffer.
-// Unmarshal implementations should not clear the receiver.
-// Any unmarshaled data should be merged into the receiver.
-// Callers of Unmarshal that do not want to retain existing data
-// should Reset the receiver before calling Unmarshal.
-type Unmarshaler interface {
-	Unmarshal([]byte) error
-}
-
-// newUnmarshaler is the interface representing objects that can
-// unmarshal themselves. The semantics are identical to Unmarshaler.
-//
-// This exists to support protoc-gen-go generated messages.
-// The proto package will stop type-asserting to this interface in the future.
-//
-// DO NOT DEPEND ON THIS.
-type newUnmarshaler interface {
-	XXX_Unmarshal([]byte) error
-}
-
-// Unmarshal parses the protocol buffer representation in buf and places the
-// decoded result in pb.  If the struct underlying pb does not match
-// the data in buf, the results can be unpredictable.
-//
-// Unmarshal resets pb before starting to unmarshal, so any
-// existing data in pb is always removed. Use UnmarshalMerge
-// to preserve and append to existing data.
-func Unmarshal(buf []byte, pb Message) error {
-	pb.Reset()
-	if u, ok := pb.(newUnmarshaler); ok {
-		return u.XXX_Unmarshal(buf)
-	}
-	if u, ok := pb.(Unmarshaler); ok {
-		return u.Unmarshal(buf)
-	}
-	return NewBuffer(buf).Unmarshal(pb)
-}
-
-// UnmarshalMerge parses the protocol buffer representation in buf and
-// writes the decoded result to pb.  If the struct underlying pb does not match
-// the data in buf, the results can be unpredictable.
-//
-// UnmarshalMerge merges into existing data in pb.
-// Most code should use Unmarshal instead.
-func UnmarshalMerge(buf []byte, pb Message) error {
-	if u, ok := pb.(newUnmarshaler); ok {
-		return u.XXX_Unmarshal(buf)
-	}
-	if u, ok := pb.(Unmarshaler); ok {
-		// NOTE: The history of proto have unfortunately been inconsistent
-		// whether Unmarshaler should or should not implicitly clear itself.
-		// Some implementations do, most do not.
-		// Thus, calling this here may or may not do what people want.
-		//
-		// See https://github.com/golang/protobuf/issues/424
-		return u.Unmarshal(buf)
-	}
-	return NewBuffer(buf).Unmarshal(pb)
-}
-
-// DecodeMessage reads a count-delimited message from the Buffer.
-func (p *Buffer) DecodeMessage(pb Message) error {
-	enc, err := p.DecodeRawBytes(false)
-	if err != nil {
-		return err
-	}
-	return NewBuffer(enc).Unmarshal(pb)
-}
-
-// DecodeGroup reads a tag-delimited group from the Buffer.
-// StartGroup tag is already consumed. This function consumes
-// EndGroup tag.
-func (p *Buffer) DecodeGroup(pb Message) error {
-	b := p.buf[p.index:]
-	x, y := findEndGroup(b)
-	if x < 0 {
-		return io.ErrUnexpectedEOF
-	}
-	err := Unmarshal(b[:x], pb)
-	p.index += y
-	return err
-}
-
-// Unmarshal parses the protocol buffer representation in the
-// Buffer and places the decoded result in pb.  If the struct
-// underlying pb does not match the data in the buffer, the results can be
-// unpredictable.
-//
-// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
-func (p *Buffer) Unmarshal(pb Message) error {
-	// If the object can unmarshal itself, let it.
-	if u, ok := pb.(newUnmarshaler); ok {
-		err := u.XXX_Unmarshal(p.buf[p.index:])
-		p.index = len(p.buf)
-		return err
-	}
-	if u, ok := pb.(Unmarshaler); ok {
-		// NOTE: The history of proto have unfortunately been inconsistent
-		// whether Unmarshaler should or should not implicitly clear itself.
-		// Some implementations do, most do not.
-		// Thus, calling this here may or may not do what people want.
-		//
-		// See https://github.com/golang/protobuf/issues/424
-		err := u.Unmarshal(p.buf[p.index:])
-		p.index = len(p.buf)
-		return err
-	}
-
-	// Slow workaround for messages that aren't Unmarshalers.
-	// This includes some hand-coded .pb.go files and
-	// bootstrap protos.
-	// TODO: fix all of those and then add Unmarshal to
-	// the Message interface. Then:
-	// The cast above and code below can be deleted.
-	// The old unmarshaler can be deleted.
-	// Clients can call Unmarshal directly (can already do that, actually).
-	var info InternalMessageInfo
-	err := info.Unmarshal(pb, p.buf[p.index:])
-	p.index = len(p.buf)
-	return err
-}
diff --git a/vendor/github.com/golang/protobuf/proto/defaults.go b/vendor/github.com/golang/protobuf/proto/defaults.go
new file mode 100644
index 0000000..d399bf0
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/defaults.go
@@ -0,0 +1,63 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+	"google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// SetDefaults sets unpopulated scalar fields to their default values.
+// Fields within a oneof are not set even if they have a default value.
+// SetDefaults is recursively called upon any populated message fields.
+func SetDefaults(m Message) {
+	if m != nil {
+		setDefaults(MessageReflect(m))
+	}
+}
+
+func setDefaults(m protoreflect.Message) {
+	fds := m.Descriptor().Fields()
+	for i := 0; i < fds.Len(); i++ {
+		fd := fds.Get(i)
+		if !m.Has(fd) {
+			if fd.HasDefault() && fd.ContainingOneof() == nil {
+				v := fd.Default()
+				if fd.Kind() == protoreflect.BytesKind {
+					v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
+				}
+				m.Set(fd, v)
+			}
+			continue
+		}
+	}
+
+	m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+		switch {
+		// Handle singular message.
+		case fd.Cardinality() != protoreflect.Repeated:
+			if fd.Message() != nil {
+				setDefaults(m.Get(fd).Message())
+			}
+		// Handle list of messages.
+		case fd.IsList():
+			if fd.Message() != nil {
+				ls := m.Get(fd).List()
+				for i := 0; i < ls.Len(); i++ {
+					setDefaults(ls.Get(i).Message())
+				}
+			}
+		// Handle map of messages.
+		case fd.IsMap():
+			if fd.MapValue().Message() != nil {
+				ms := m.Get(fd).Map()
+				ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
+					setDefaults(v.Message())
+					return true
+				})
+			}
+		}
+		return true
+	})
+}
diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go
index 35b882c..e8db57e 100644
--- a/vendor/github.com/golang/protobuf/proto/deprecated.go
+++ b/vendor/github.com/golang/protobuf/proto/deprecated.go
@@ -1,63 +1,113 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2018 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
 
 package proto
 
-import "errors"
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"strconv"
 
-// Deprecated: do not use.
+	protoV2 "google.golang.org/protobuf/proto"
+)
+
+var (
+	// Deprecated: No longer returned.
+	ErrNil = errors.New("proto: Marshal called with nil")
+
+	// Deprecated: No longer returned.
+	ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+
+	// Deprecated: No longer returned.
+	ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+)
+
+// Deprecated: Do not use.
 type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
 
-// Deprecated: do not use.
+// Deprecated: Do not use.
 func GetStats() Stats { return Stats{} }
 
-// Deprecated: do not use.
+// Deprecated: Do not use.
 func MarshalMessageSet(interface{}) ([]byte, error) {
 	return nil, errors.New("proto: not implemented")
 }
 
-// Deprecated: do not use.
+// Deprecated: Do not use.
 func UnmarshalMessageSet([]byte, interface{}) error {
 	return errors.New("proto: not implemented")
 }
 
-// Deprecated: do not use.
+// Deprecated: Do not use.
 func MarshalMessageSetJSON(interface{}) ([]byte, error) {
 	return nil, errors.New("proto: not implemented")
 }
 
-// Deprecated: do not use.
+// Deprecated: Do not use.
 func UnmarshalMessageSetJSON([]byte, interface{}) error {
 	return errors.New("proto: not implemented")
 }
 
-// Deprecated: do not use.
+// Deprecated: Do not use.
 func RegisterMessageSetType(Message, int32, string) {}
+
+// Deprecated: Do not use.
+func EnumName(m map[int32]string, v int32) string {
+	s, ok := m[v]
+	if ok {
+		return s
+	}
+	return strconv.Itoa(int(v))
+}
+
+// Deprecated: Do not use.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+	if data[0] == '"' {
+		// New style: enums are strings.
+		var repr string
+		if err := json.Unmarshal(data, &repr); err != nil {
+			return -1, err
+		}
+		val, ok := m[repr]
+		if !ok {
+			return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+		}
+		return val, nil
+	}
+	// Old style: enums are ints.
+	var val int32
+	if err := json.Unmarshal(data, &val); err != nil {
+		return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+	}
+	return val, nil
+}
+
+// Deprecated: Do not use; this type existed for intenal-use only.
+type InternalMessageInfo struct{}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) DiscardUnknown(m Message) {
+	DiscardUnknown(m)
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) {
+	return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m))
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Merge(dst, src Message) {
+	protoV2.Merge(MessageV2(dst), MessageV2(src))
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Size(m Message) int {
+	return protoV2.Size(MessageV2(m))
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error {
+	return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go
index dea2617..2187e87 100644
--- a/vendor/github.com/golang/protobuf/proto/discard.go
+++ b/vendor/github.com/golang/protobuf/proto/discard.go
@@ -1,48 +1,13 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2017 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
 
 package proto
 
 import (
-	"fmt"
-	"reflect"
-	"strings"
-	"sync"
-	"sync/atomic"
+	"google.golang.org/protobuf/reflect/protoreflect"
 )
 
-type generatedDiscarder interface {
-	XXX_DiscardUnknown()
-}
-
 // DiscardUnknown recursively discards all unknown fields from this message
 // and all embedded messages.
 //
@@ -51,300 +16,43 @@
 // marshal to be able to produce a message that continues to have those
 // unrecognized fields. To avoid this, DiscardUnknown is used to
 // explicitly clear the unknown fields after unmarshaling.
-//
-// For proto2 messages, the unknown fields of message extensions are only
-// discarded from messages that have been accessed via GetExtension.
 func DiscardUnknown(m Message) {
-	if m, ok := m.(generatedDiscarder); ok {
-		m.XXX_DiscardUnknown()
-		return
-	}
-	// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
-	// but the master branch has no implementation for InternalMessageInfo,
-	// so it would be more work to replicate that approach.
-	discardLegacy(m)
-}
-
-// DiscardUnknown recursively discards all unknown fields.
-func (a *InternalMessageInfo) DiscardUnknown(m Message) {
-	di := atomicLoadDiscardInfo(&a.discard)
-	if di == nil {
-		di = getDiscardInfo(reflect.TypeOf(m).Elem())
-		atomicStoreDiscardInfo(&a.discard, di)
-	}
-	di.discard(toPointer(&m))
-}
-
-type discardInfo struct {
-	typ reflect.Type
-
-	initialized int32 // 0: only typ is valid, 1: everything is valid
-	lock        sync.Mutex
-
-	fields       []discardFieldInfo
-	unrecognized field
-}
-
-type discardFieldInfo struct {
-	field   field // Offset of field, guaranteed to be valid
-	discard func(src pointer)
-}
-
-var (
-	discardInfoMap  = map[reflect.Type]*discardInfo{}
-	discardInfoLock sync.Mutex
-)
-
-func getDiscardInfo(t reflect.Type) *discardInfo {
-	discardInfoLock.Lock()
-	defer discardInfoLock.Unlock()
-	di := discardInfoMap[t]
-	if di == nil {
-		di = &discardInfo{typ: t}
-		discardInfoMap[t] = di
-	}
-	return di
-}
-
-func (di *discardInfo) discard(src pointer) {
-	if src.isNil() {
-		return // Nothing to do.
-	}
-
-	if atomic.LoadInt32(&di.initialized) == 0 {
-		di.computeDiscardInfo()
-	}
-
-	for _, fi := range di.fields {
-		sfp := src.offset(fi.field)
-		fi.discard(sfp)
-	}
-
-	// For proto2 messages, only discard unknown fields in message extensions
-	// that have been accessed via GetExtension.
-	if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
-		// Ignore lock since DiscardUnknown is not concurrency safe.
-		emm, _ := em.extensionsRead()
-		for _, mx := range emm {
-			if m, ok := mx.value.(Message); ok {
-				DiscardUnknown(m)
-			}
-		}
-	}
-
-	if di.unrecognized.IsValid() {
-		*src.offset(di.unrecognized).toBytes() = nil
+	if m != nil {
+		discardUnknown(MessageReflect(m))
 	}
 }
 
-func (di *discardInfo) computeDiscardInfo() {
-	di.lock.Lock()
-	defer di.lock.Unlock()
-	if di.initialized != 0 {
-		return
-	}
-	t := di.typ
-	n := t.NumField()
-
-	for i := 0; i < n; i++ {
-		f := t.Field(i)
-		if strings.HasPrefix(f.Name, "XXX_") {
-			continue
-		}
-
-		dfi := discardFieldInfo{field: toField(&f)}
-		tf := f.Type
-
-		// Unwrap tf to get its most basic type.
-		var isPointer, isSlice bool
-		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
-			isSlice = true
-			tf = tf.Elem()
-		}
-		if tf.Kind() == reflect.Ptr {
-			isPointer = true
-			tf = tf.Elem()
-		}
-		if isPointer && isSlice && tf.Kind() != reflect.Struct {
-			panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
-		}
-
-		switch tf.Kind() {
-		case reflect.Struct:
-			switch {
-			case !isPointer:
-				panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
-			case isSlice: // E.g., []*pb.T
-				di := getDiscardInfo(tf)
-				dfi.discard = func(src pointer) {
-					sps := src.getPointerSlice()
-					for _, sp := range sps {
-						if !sp.isNil() {
-							di.discard(sp)
-						}
-					}
-				}
-			default: // E.g., *pb.T
-				di := getDiscardInfo(tf)
-				dfi.discard = func(src pointer) {
-					sp := src.getPointer()
-					if !sp.isNil() {
-						di.discard(sp)
-					}
+func discardUnknown(m protoreflect.Message) {
+	m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
+		switch {
+		// Handle singular message.
+		case fd.Cardinality() != protoreflect.Repeated:
+			if fd.Message() != nil {
+				discardUnknown(m.Get(fd).Message())
+			}
+		// Handle list of messages.
+		case fd.IsList():
+			if fd.Message() != nil {
+				ls := m.Get(fd).List()
+				for i := 0; i < ls.Len(); i++ {
+					discardUnknown(ls.Get(i).Message())
 				}
 			}
-		case reflect.Map:
-			switch {
-			case isPointer || isSlice:
-				panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
-			default: // E.g., map[K]V
-				if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
-					dfi.discard = func(src pointer) {
-						sm := src.asPointerTo(tf).Elem()
-						if sm.Len() == 0 {
-							return
-						}
-						for _, key := range sm.MapKeys() {
-							val := sm.MapIndex(key)
-							DiscardUnknown(val.Interface().(Message))
-						}
-					}
-				} else {
-					dfi.discard = func(pointer) {} // Noop
-				}
-			}
-		case reflect.Interface:
-			// Must be oneof field.
-			switch {
-			case isPointer || isSlice:
-				panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
-			default: // E.g., interface{}
-				// TODO: Make this faster?
-				dfi.discard = func(src pointer) {
-					su := src.asPointerTo(tf).Elem()
-					if !su.IsNil() {
-						sv := su.Elem().Elem().Field(0)
-						if sv.Kind() == reflect.Ptr && sv.IsNil() {
-							return
-						}
-						switch sv.Type().Kind() {
-						case reflect.Ptr: // Proto struct (e.g., *T)
-							DiscardUnknown(sv.Interface().(Message))
-						}
-					}
-				}
-			}
-		default:
-			continue
-		}
-		di.fields = append(di.fields, dfi)
-	}
-
-	di.unrecognized = invalidField
-	if f, ok := t.FieldByName("XXX_unrecognized"); ok {
-		if f.Type != reflect.TypeOf([]byte{}) {
-			panic("expected XXX_unrecognized to be of type []byte")
-		}
-		di.unrecognized = toField(&f)
-	}
-
-	atomic.StoreInt32(&di.initialized, 1)
-}
-
-func discardLegacy(m Message) {
-	v := reflect.ValueOf(m)
-	if v.Kind() != reflect.Ptr || v.IsNil() {
-		return
-	}
-	v = v.Elem()
-	if v.Kind() != reflect.Struct {
-		return
-	}
-	t := v.Type()
-
-	for i := 0; i < v.NumField(); i++ {
-		f := t.Field(i)
-		if strings.HasPrefix(f.Name, "XXX_") {
-			continue
-		}
-		vf := v.Field(i)
-		tf := f.Type
-
-		// Unwrap tf to get its most basic type.
-		var isPointer, isSlice bool
-		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
-			isSlice = true
-			tf = tf.Elem()
-		}
-		if tf.Kind() == reflect.Ptr {
-			isPointer = true
-			tf = tf.Elem()
-		}
-		if isPointer && isSlice && tf.Kind() != reflect.Struct {
-			panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
-		}
-
-		switch tf.Kind() {
-		case reflect.Struct:
-			switch {
-			case !isPointer:
-				panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
-			case isSlice: // E.g., []*pb.T
-				for j := 0; j < vf.Len(); j++ {
-					discardLegacy(vf.Index(j).Interface().(Message))
-				}
-			default: // E.g., *pb.T
-				discardLegacy(vf.Interface().(Message))
-			}
-		case reflect.Map:
-			switch {
-			case isPointer || isSlice:
-				panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
-			default: // E.g., map[K]V
-				tv := vf.Type().Elem()
-				if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
-					for _, key := range vf.MapKeys() {
-						val := vf.MapIndex(key)
-						discardLegacy(val.Interface().(Message))
-					}
-				}
-			}
-		case reflect.Interface:
-			// Must be oneof field.
-			switch {
-			case isPointer || isSlice:
-				panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
-			default: // E.g., test_proto.isCommunique_Union interface
-				if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
-					vf = vf.Elem() // E.g., *test_proto.Communique_Msg
-					if !vf.IsNil() {
-						vf = vf.Elem()   // E.g., test_proto.Communique_Msg
-						vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
-						if vf.Kind() == reflect.Ptr {
-							discardLegacy(vf.Interface().(Message))
-						}
-					}
-				}
+		// Handle map of messages.
+		case fd.IsMap():
+			if fd.MapValue().Message() != nil {
+				ms := m.Get(fd).Map()
+				ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
+					discardUnknown(v.Message())
+					return true
+				})
 			}
 		}
-	}
+		return true
+	})
 
-	if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
-		if vf.Type() != reflect.TypeOf([]byte{}) {
-			panic("expected XXX_unrecognized to be of type []byte")
-		}
-		vf.Set(reflect.ValueOf([]byte(nil)))
-	}
-
-	// For proto2 messages, only discard unknown fields in message extensions
-	// that have been accessed via GetExtension.
-	if em, err := extendable(m); err == nil {
-		// Ignore lock since discardLegacy is not concurrency safe.
-		emm, _ := em.extensionsRead()
-		for _, mx := range emm {
-			if m, ok := mx.value.(Message); ok {
-				discardLegacy(m)
-			}
-		}
+	// Discard unknown fields.
+	if len(m.GetUnknown()) > 0 {
+		m.SetUnknown(nil)
 	}
 }
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
deleted file mode 100644
index 3abfed2..0000000
--- a/vendor/github.com/golang/protobuf/proto/encode.go
+++ /dev/null
@@ -1,203 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Routines for encoding data into the wire format for protocol buffers.
- */
-
-import (
-	"errors"
-	"reflect"
-)
-
-var (
-	// errRepeatedHasNil is the error returned if Marshal is called with
-	// a struct with a repeated field containing a nil element.
-	errRepeatedHasNil = errors.New("proto: repeated field has nil element")
-
-	// errOneofHasNil is the error returned if Marshal is called with
-	// a struct with a oneof field containing a nil element.
-	errOneofHasNil = errors.New("proto: oneof field has nil value")
-
-	// ErrNil is the error returned if Marshal is called with nil.
-	ErrNil = errors.New("proto: Marshal called with nil")
-
-	// ErrTooLarge is the error returned if Marshal is called with a
-	// message that encodes to >2GB.
-	ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
-)
-
-// The fundamental encoders that put bytes on the wire.
-// Those that take integer types all accept uint64 and are
-// therefore of type valueEncoder.
-
-const maxVarintBytes = 10 // maximum length of a varint
-
-// EncodeVarint returns the varint encoding of x.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-// Not used by the package itself, but helpful to clients
-// wishing to use the same encoding.
-func EncodeVarint(x uint64) []byte {
-	var buf [maxVarintBytes]byte
-	var n int
-	for n = 0; x > 127; n++ {
-		buf[n] = 0x80 | uint8(x&0x7F)
-		x >>= 7
-	}
-	buf[n] = uint8(x)
-	n++
-	return buf[0:n]
-}
-
-// EncodeVarint writes a varint-encoded integer to the Buffer.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func (p *Buffer) EncodeVarint(x uint64) error {
-	for x >= 1<<7 {
-		p.buf = append(p.buf, uint8(x&0x7f|0x80))
-		x >>= 7
-	}
-	p.buf = append(p.buf, uint8(x))
-	return nil
-}
-
-// SizeVarint returns the varint encoding size of an integer.
-func SizeVarint(x uint64) int {
-	switch {
-	case x < 1<<7:
-		return 1
-	case x < 1<<14:
-		return 2
-	case x < 1<<21:
-		return 3
-	case x < 1<<28:
-		return 4
-	case x < 1<<35:
-		return 5
-	case x < 1<<42:
-		return 6
-	case x < 1<<49:
-		return 7
-	case x < 1<<56:
-		return 8
-	case x < 1<<63:
-		return 9
-	}
-	return 10
-}
-
-// EncodeFixed64 writes a 64-bit integer to the Buffer.
-// This is the format for the
-// fixed64, sfixed64, and double protocol buffer types.
-func (p *Buffer) EncodeFixed64(x uint64) error {
-	p.buf = append(p.buf,
-		uint8(x),
-		uint8(x>>8),
-		uint8(x>>16),
-		uint8(x>>24),
-		uint8(x>>32),
-		uint8(x>>40),
-		uint8(x>>48),
-		uint8(x>>56))
-	return nil
-}
-
-// EncodeFixed32 writes a 32-bit integer to the Buffer.
-// This is the format for the
-// fixed32, sfixed32, and float protocol buffer types.
-func (p *Buffer) EncodeFixed32(x uint64) error {
-	p.buf = append(p.buf,
-		uint8(x),
-		uint8(x>>8),
-		uint8(x>>16),
-		uint8(x>>24))
-	return nil
-}
-
-// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
-// to the Buffer.
-// This is the format used for the sint64 protocol buffer type.
-func (p *Buffer) EncodeZigzag64(x uint64) error {
-	// use signed number to get arithmetic right shift.
-	return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-
-// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
-// to the Buffer.
-// This is the format used for the sint32 protocol buffer type.
-func (p *Buffer) EncodeZigzag32(x uint64) error {
-	// use signed number to get arithmetic right shift.
-	return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
-}
-
-// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
-// This is the format used for the bytes protocol buffer
-// type and for embedded messages.
-func (p *Buffer) EncodeRawBytes(b []byte) error {
-	p.EncodeVarint(uint64(len(b)))
-	p.buf = append(p.buf, b...)
-	return nil
-}
-
-// EncodeStringBytes writes an encoded string to the Buffer.
-// This is the format used for the proto2 string type.
-func (p *Buffer) EncodeStringBytes(s string) error {
-	p.EncodeVarint(uint64(len(s)))
-	p.buf = append(p.buf, s...)
-	return nil
-}
-
-// Marshaler is the interface representing objects that can marshal themselves.
-type Marshaler interface {
-	Marshal() ([]byte, error)
-}
-
-// EncodeMessage writes the protocol buffer to the Buffer,
-// prefixed by a varint-encoded length.
-func (p *Buffer) EncodeMessage(pb Message) error {
-	siz := Size(pb)
-	p.EncodeVarint(uint64(siz))
-	return p.Marshal(pb)
-}
-
-// All protocol buffer fields are nillable, but be careful.
-func isNil(v reflect.Value) bool {
-	switch v.Kind() {
-	case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
-		return v.IsNil()
-	}
-	return false
-}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
deleted file mode 100644
index f9b6e41..0000000
--- a/vendor/github.com/golang/protobuf/proto/equal.go
+++ /dev/null
@@ -1,301 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2011 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Protocol buffer comparison.
-
-package proto
-
-import (
-	"bytes"
-	"log"
-	"reflect"
-	"strings"
-)
-
-/*
-Equal returns true iff protocol buffers a and b are equal.
-The arguments must both be pointers to protocol buffer structs.
-
-Equality is defined in this way:
-  - Two messages are equal iff they are the same type,
-    corresponding fields are equal, unknown field sets
-    are equal, and extensions sets are equal.
-  - Two set scalar fields are equal iff their values are equal.
-    If the fields are of a floating-point type, remember that
-    NaN != x for all x, including NaN. If the message is defined
-    in a proto3 .proto file, fields are not "set"; specifically,
-    zero length proto3 "bytes" fields are equal (nil == {}).
-  - Two repeated fields are equal iff their lengths are the same,
-    and their corresponding elements are equal. Note a "bytes" field,
-    although represented by []byte, is not a repeated field and the
-    rule for the scalar fields described above applies.
-  - Two unset fields are equal.
-  - Two unknown field sets are equal if their current
-    encoded state is equal.
-  - Two extension sets are equal iff they have corresponding
-    elements that are pairwise equal.
-  - Two map fields are equal iff their lengths are the same,
-    and they contain the same set of elements. Zero-length map
-    fields are equal.
-  - Every other combination of things are not equal.
-
-The return value is undefined if a and b are not protocol buffers.
-*/
-func Equal(a, b Message) bool {
-	if a == nil || b == nil {
-		return a == b
-	}
-	v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
-	if v1.Type() != v2.Type() {
-		return false
-	}
-	if v1.Kind() == reflect.Ptr {
-		if v1.IsNil() {
-			return v2.IsNil()
-		}
-		if v2.IsNil() {
-			return false
-		}
-		v1, v2 = v1.Elem(), v2.Elem()
-	}
-	if v1.Kind() != reflect.Struct {
-		return false
-	}
-	return equalStruct(v1, v2)
-}
-
-// v1 and v2 are known to have the same type.
-func equalStruct(v1, v2 reflect.Value) bool {
-	sprop := GetProperties(v1.Type())
-	for i := 0; i < v1.NumField(); i++ {
-		f := v1.Type().Field(i)
-		if strings.HasPrefix(f.Name, "XXX_") {
-			continue
-		}
-		f1, f2 := v1.Field(i), v2.Field(i)
-		if f.Type.Kind() == reflect.Ptr {
-			if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
-				// both unset
-				continue
-			} else if n1 != n2 {
-				// set/unset mismatch
-				return false
-			}
-			f1, f2 = f1.Elem(), f2.Elem()
-		}
-		if !equalAny(f1, f2, sprop.Prop[i]) {
-			return false
-		}
-	}
-
-	if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
-		em2 := v2.FieldByName("XXX_InternalExtensions")
-		if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
-			return false
-		}
-	}
-
-	if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
-		em2 := v2.FieldByName("XXX_extensions")
-		if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
-			return false
-		}
-	}
-
-	uf := v1.FieldByName("XXX_unrecognized")
-	if !uf.IsValid() {
-		return true
-	}
-
-	u1 := uf.Bytes()
-	u2 := v2.FieldByName("XXX_unrecognized").Bytes()
-	return bytes.Equal(u1, u2)
-}
-
-// v1 and v2 are known to have the same type.
-// prop may be nil.
-func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
-	if v1.Type() == protoMessageType {
-		m1, _ := v1.Interface().(Message)
-		m2, _ := v2.Interface().(Message)
-		return Equal(m1, m2)
-	}
-	switch v1.Kind() {
-	case reflect.Bool:
-		return v1.Bool() == v2.Bool()
-	case reflect.Float32, reflect.Float64:
-		return v1.Float() == v2.Float()
-	case reflect.Int32, reflect.Int64:
-		return v1.Int() == v2.Int()
-	case reflect.Interface:
-		// Probably a oneof field; compare the inner values.
-		n1, n2 := v1.IsNil(), v2.IsNil()
-		if n1 || n2 {
-			return n1 == n2
-		}
-		e1, e2 := v1.Elem(), v2.Elem()
-		if e1.Type() != e2.Type() {
-			return false
-		}
-		return equalAny(e1, e2, nil)
-	case reflect.Map:
-		if v1.Len() != v2.Len() {
-			return false
-		}
-		for _, key := range v1.MapKeys() {
-			val2 := v2.MapIndex(key)
-			if !val2.IsValid() {
-				// This key was not found in the second map.
-				return false
-			}
-			if !equalAny(v1.MapIndex(key), val2, nil) {
-				return false
-			}
-		}
-		return true
-	case reflect.Ptr:
-		// Maps may have nil values in them, so check for nil.
-		if v1.IsNil() && v2.IsNil() {
-			return true
-		}
-		if v1.IsNil() != v2.IsNil() {
-			return false
-		}
-		return equalAny(v1.Elem(), v2.Elem(), prop)
-	case reflect.Slice:
-		if v1.Type().Elem().Kind() == reflect.Uint8 {
-			// short circuit: []byte
-
-			// Edge case: if this is in a proto3 message, a zero length
-			// bytes field is considered the zero value.
-			if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
-				return true
-			}
-			if v1.IsNil() != v2.IsNil() {
-				return false
-			}
-			return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
-		}
-
-		if v1.Len() != v2.Len() {
-			return false
-		}
-		for i := 0; i < v1.Len(); i++ {
-			if !equalAny(v1.Index(i), v2.Index(i), prop) {
-				return false
-			}
-		}
-		return true
-	case reflect.String:
-		return v1.Interface().(string) == v2.Interface().(string)
-	case reflect.Struct:
-		return equalStruct(v1, v2)
-	case reflect.Uint32, reflect.Uint64:
-		return v1.Uint() == v2.Uint()
-	}
-
-	// unknown type, so not a protocol buffer
-	log.Printf("proto: don't know how to compare %v", v1)
-	return false
-}
-
-// base is the struct type that the extensions are based on.
-// x1 and x2 are InternalExtensions.
-func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
-	em1, _ := x1.extensionsRead()
-	em2, _ := x2.extensionsRead()
-	return equalExtMap(base, em1, em2)
-}
-
-func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
-	if len(em1) != len(em2) {
-		return false
-	}
-
-	for extNum, e1 := range em1 {
-		e2, ok := em2[extNum]
-		if !ok {
-			return false
-		}
-
-		m1 := extensionAsLegacyType(e1.value)
-		m2 := extensionAsLegacyType(e2.value)
-
-		if m1 == nil && m2 == nil {
-			// Both have only encoded form.
-			if bytes.Equal(e1.enc, e2.enc) {
-				continue
-			}
-			// The bytes are different, but the extensions might still be
-			// equal. We need to decode them to compare.
-		}
-
-		if m1 != nil && m2 != nil {
-			// Both are unencoded.
-			if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
-				return false
-			}
-			continue
-		}
-
-		// At least one is encoded. To do a semantically correct comparison
-		// we need to unmarshal them first.
-		var desc *ExtensionDesc
-		if m := extensionMaps[base]; m != nil {
-			desc = m[extNum]
-		}
-		if desc == nil {
-			// If both have only encoded form and the bytes are the same,
-			// it is handled above. We get here when the bytes are different.
-			// We don't know how to decode it, so just compare them as byte
-			// slices.
-			log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
-			return false
-		}
-		var err error
-		if m1 == nil {
-			m1, err = decodeExtension(e1.enc, desc)
-		}
-		if m2 == nil && err == nil {
-			m2, err = decodeExtension(e2.enc, desc)
-		}
-		if err != nil {
-			// The encoded form is invalid.
-			log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
-			return false
-		}
-		if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
-			return false
-		}
-	}
-
-	return true
-}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
index fa88add..42fc120 100644
--- a/vendor/github.com/golang/protobuf/proto/extensions.go
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -1,607 +1,356 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
 
 package proto
 
-/*
- * Types and routines for supporting protocol buffer extensions.
- */
-
 import (
 	"errors"
 	"fmt"
-	"io"
 	"reflect"
-	"strconv"
-	"sync"
+
+	"google.golang.org/protobuf/encoding/protowire"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+	"google.golang.org/protobuf/runtime/protoiface"
+	"google.golang.org/protobuf/runtime/protoimpl"
 )
 
-// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+type (
+	// ExtensionDesc represents an extension descriptor and
+	// is used to interact with an extension field in a message.
+	//
+	// Variables of this type are generated in code by protoc-gen-go.
+	ExtensionDesc = protoimpl.ExtensionInfo
+
+	// ExtensionRange represents a range of message extensions.
+	// Used in code generated by protoc-gen-go.
+	ExtensionRange = protoiface.ExtensionRangeV1
+
+	// Deprecated: Do not use; this is an internal type.
+	Extension = protoimpl.ExtensionFieldV1
+
+	// Deprecated: Do not use; this is an internal type.
+	XXX_InternalExtensions = protoimpl.ExtensionFields
+)
+
+// ErrMissingExtension reports whether the extension was not present.
 var ErrMissingExtension = errors.New("proto: missing extension")
 
-// ExtensionRange represents a range of message extensions for a protocol buffer.
-// Used in code generated by the protocol compiler.
-type ExtensionRange struct {
-	Start, End int32 // both inclusive
-}
-
-// extendableProto is an interface implemented by any protocol buffer generated by the current
-// proto compiler that may be extended.
-type extendableProto interface {
-	Message
-	ExtensionRangeArray() []ExtensionRange
-	extensionsWrite() map[int32]Extension
-	extensionsRead() (map[int32]Extension, sync.Locker)
-}
-
-// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
-// version of the proto compiler that may be extended.
-type extendableProtoV1 interface {
-	Message
-	ExtensionRangeArray() []ExtensionRange
-	ExtensionMap() map[int32]Extension
-}
-
-// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
-type extensionAdapter struct {
-	extendableProtoV1
-}
-
-func (e extensionAdapter) extensionsWrite() map[int32]Extension {
-	return e.ExtensionMap()
-}
-
-func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
-	return e.ExtensionMap(), notLocker{}
-}
-
-// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
-type notLocker struct{}
-
-func (n notLocker) Lock()   {}
-func (n notLocker) Unlock() {}
-
-// extendable returns the extendableProto interface for the given generated proto message.
-// If the proto message has the old extension format, it returns a wrapper that implements
-// the extendableProto interface.
-func extendable(p interface{}) (extendableProto, error) {
-	switch p := p.(type) {
-	case extendableProto:
-		if isNilPtr(p) {
-			return nil, fmt.Errorf("proto: nil %T is not extendable", p)
-		}
-		return p, nil
-	case extendableProtoV1:
-		if isNilPtr(p) {
-			return nil, fmt.Errorf("proto: nil %T is not extendable", p)
-		}
-		return extensionAdapter{p}, nil
-	}
-	// Don't allocate a specific error containing %T:
-	// this is the hot path for Clone and MarshalText.
-	return nil, errNotExtendable
-}
-
 var errNotExtendable = errors.New("proto: not an extendable proto.Message")
 
-func isNilPtr(x interface{}) bool {
-	v := reflect.ValueOf(x)
-	return v.Kind() == reflect.Ptr && v.IsNil()
-}
-
-// XXX_InternalExtensions is an internal representation of proto extensions.
-//
-// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
-// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
-//
-// The methods of XXX_InternalExtensions are not concurrency safe in general,
-// but calls to logically read-only methods such as has and get may be executed concurrently.
-type XXX_InternalExtensions struct {
-	// The struct must be indirect so that if a user inadvertently copies a
-	// generated message and its embedded XXX_InternalExtensions, they
-	// avoid the mayhem of a copied mutex.
-	//
-	// The mutex serializes all logically read-only operations to p.extensionMap.
-	// It is up to the client to ensure that write operations to p.extensionMap are
-	// mutually exclusive with other accesses.
-	p *struct {
-		mu           sync.Mutex
-		extensionMap map[int32]Extension
+// HasExtension reports whether the extension field is present in m
+// either as an explicitly populated field or as an unknown field.
+func HasExtension(m Message, xt *ExtensionDesc) (has bool) {
+	mr := MessageReflect(m)
+	if mr == nil || !mr.IsValid() {
+		return false
 	}
-}
 
-// extensionsWrite returns the extension map, creating it on first use.
-func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
-	if e.p == nil {
-		e.p = new(struct {
-			mu           sync.Mutex
-			extensionMap map[int32]Extension
+	// Check whether any populated known field matches the field number.
+	xtd := xt.TypeDescriptor()
+	if isValidExtension(mr.Descriptor(), xtd) {
+		has = mr.Has(xtd)
+	} else {
+		mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+			has = int32(fd.Number()) == xt.Field
+			return !has
 		})
-		e.p.extensionMap = make(map[int32]Extension)
 	}
-	return e.p.extensionMap
-}
 
-// extensionsRead returns the extensions map for read-only use.  It may be nil.
-// The caller must hold the returned mutex's lock when accessing Elements within the map.
-func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
-	if e.p == nil {
-		return nil, nil
+	// Check whether any unknown field matches the field number.
+	for b := mr.GetUnknown(); !has && len(b) > 0; {
+		num, _, n := protowire.ConsumeField(b)
+		has = int32(num) == xt.Field
+		b = b[n:]
 	}
-	return e.p.extensionMap, &e.p.mu
+	return has
 }
 
-// ExtensionDesc represents an extension specification.
-// Used in generated code from the protocol compiler.
-type ExtensionDesc struct {
-	ExtendedType  Message     // nil pointer to the type that is being extended
-	ExtensionType interface{} // nil pointer to the extension type
-	Field         int32       // field number
-	Name          string      // fully-qualified name of extension, for text formatting
-	Tag           string      // protobuf tag style
-	Filename      string      // name of the file in which the extension is defined
-}
-
-func (ed *ExtensionDesc) repeated() bool {
-	t := reflect.TypeOf(ed.ExtensionType)
-	return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
-}
-
-// Extension represents an extension in a message.
-type Extension struct {
-	// When an extension is stored in a message using SetExtension
-	// only desc and value are set. When the message is marshaled
-	// enc will be set to the encoded form of the message.
-	//
-	// When a message is unmarshaled and contains extensions, each
-	// extension will have only enc set. When such an extension is
-	// accessed using GetExtension (or GetExtensions) desc and value
-	// will be set.
-	desc *ExtensionDesc
-
-	// value is a concrete value for the extension field. Let the type of
-	// desc.ExtensionType be the "API type" and the type of Extension.value
-	// be the "storage type". The API type and storage type are the same except:
-	//	* For scalars (except []byte), the API type uses *T,
-	//	while the storage type uses T.
-	//	* For repeated fields, the API type uses []T, while the storage type
-	//	uses *[]T.
-	//
-	// The reason for the divergence is so that the storage type more naturally
-	// matches what is expected of when retrieving the values through the
-	// protobuf reflection APIs.
-	//
-	// The value may only be populated if desc is also populated.
-	value interface{}
-
-	// enc is the raw bytes for the extension field.
-	enc []byte
-}
-
-// SetRawExtension is for testing only.
-func SetRawExtension(base Message, id int32, b []byte) {
-	epb, err := extendable(base)
-	if err != nil {
+// ClearExtension removes the extension field from m
+// either as an explicitly populated field or as an unknown field.
+func ClearExtension(m Message, xt *ExtensionDesc) {
+	mr := MessageReflect(m)
+	if mr == nil || !mr.IsValid() {
 		return
 	}
-	extmap := epb.extensionsWrite()
-	extmap[id] = Extension{enc: b}
-}
 
-// isExtensionField returns true iff the given field number is in an extension range.
-func isExtensionField(pb extendableProto, field int32) bool {
-	for _, er := range pb.ExtensionRangeArray() {
-		if er.Start <= field && field <= er.End {
+	xtd := xt.TypeDescriptor()
+	if isValidExtension(mr.Descriptor(), xtd) {
+		mr.Clear(xtd)
+	} else {
+		mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+			if int32(fd.Number()) == xt.Field {
+				mr.Clear(fd)
+				return false
+			}
 			return true
-		}
+		})
 	}
-	return false
+	clearUnknown(mr, fieldNum(xt.Field))
 }
 
-// checkExtensionTypes checks that the given extension is valid for pb.
-func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
-	var pbi interface{} = pb
-	// Check the extended type.
-	if ea, ok := pbi.(extensionAdapter); ok {
-		pbi = ea.extendableProtoV1
-	}
-	if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
-		return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
-	}
-	// Check the range.
-	if !isExtensionField(pb, extension.Field) {
-		return errors.New("proto: bad extension number; not in declared ranges")
-	}
-	return nil
-}
-
-// extPropKey is sufficient to uniquely identify an extension.
-type extPropKey struct {
-	base  reflect.Type
-	field int32
-}
-
-var extProp = struct {
-	sync.RWMutex
-	m map[extPropKey]*Properties
-}{
-	m: make(map[extPropKey]*Properties),
-}
-
-func extensionProperties(ed *ExtensionDesc) *Properties {
-	key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
-
-	extProp.RLock()
-	if prop, ok := extProp.m[key]; ok {
-		extProp.RUnlock()
-		return prop
-	}
-	extProp.RUnlock()
-
-	extProp.Lock()
-	defer extProp.Unlock()
-	// Check again.
-	if prop, ok := extProp.m[key]; ok {
-		return prop
-	}
-
-	prop := new(Properties)
-	prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
-	extProp.m[key] = prop
-	return prop
-}
-
-// HasExtension returns whether the given extension is present in pb.
-func HasExtension(pb Message, extension *ExtensionDesc) bool {
-	// TODO: Check types, field numbers, etc.?
-	epb, err := extendable(pb)
-	if err != nil {
-		return false
-	}
-	extmap, mu := epb.extensionsRead()
-	if extmap == nil {
-		return false
-	}
-	mu.Lock()
-	_, ok := extmap[extension.Field]
-	mu.Unlock()
-	return ok
-}
-
-// ClearExtension removes the given extension from pb.
-func ClearExtension(pb Message, extension *ExtensionDesc) {
-	epb, err := extendable(pb)
-	if err != nil {
+// ClearAllExtensions clears all extensions from m.
+// This includes populated fields and unknown fields in the extension range.
+func ClearAllExtensions(m Message) {
+	mr := MessageReflect(m)
+	if mr == nil || !mr.IsValid() {
 		return
 	}
-	// TODO: Check types, field numbers, etc.?
-	extmap := epb.extensionsWrite()
-	delete(extmap, extension.Field)
+
+	mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+		if fd.IsExtension() {
+			mr.Clear(fd)
+		}
+		return true
+	})
+	clearUnknown(mr, mr.Descriptor().ExtensionRanges())
 }
 
-// GetExtension retrieves a proto2 extended field from pb.
+// GetExtension retrieves a proto2 extended field from m.
 //
 // If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
 // then GetExtension parses the encoded field and returns a Go value of the specified type.
 // If the field is not present, then the default value is returned (if one is specified),
 // otherwise ErrMissingExtension is reported.
 //
-// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
-// then GetExtension returns the raw encoded bytes of the field extension.
-func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
-	epb, err := extendable(pb)
-	if err != nil {
-		return nil, err
+// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil),
+// then GetExtension returns the raw encoded bytes for the extension field.
+func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) {
+	mr := MessageReflect(m)
+	if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
+		return nil, errNotExtendable
 	}
 
-	if extension.ExtendedType != nil {
-		// can only check type if this is a complete descriptor
-		if err := checkExtensionTypes(epb, extension); err != nil {
+	// Retrieve the unknown fields for this extension field.
+	var bo protoreflect.RawFields
+	for bi := mr.GetUnknown(); len(bi) > 0; {
+		num, _, n := protowire.ConsumeField(bi)
+		if int32(num) == xt.Field {
+			bo = append(bo, bi[:n]...)
+		}
+		bi = bi[n:]
+	}
+
+	// For type incomplete descriptors, only retrieve the unknown fields.
+	if xt.ExtensionType == nil {
+		return []byte(bo), nil
+	}
+
+	// If the extension field only exists as unknown fields, unmarshal it.
+	// This is rarely done since proto.Unmarshal eagerly unmarshals extensions.
+	xtd := xt.TypeDescriptor()
+	if !isValidExtension(mr.Descriptor(), xtd) {
+		return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
+	}
+	if !mr.Has(xtd) && len(bo) > 0 {
+		m2 := mr.New()
+		if err := (proto.UnmarshalOptions{
+			Resolver: extensionResolver{xt},
+		}.Unmarshal(bo, m2.Interface())); err != nil {
 			return nil, err
 		}
-	}
-
-	emap, mu := epb.extensionsRead()
-	if emap == nil {
-		return defaultExtensionValue(extension)
-	}
-	mu.Lock()
-	defer mu.Unlock()
-	e, ok := emap[extension.Field]
-	if !ok {
-		// defaultExtensionValue returns the default value or
-		// ErrMissingExtension if there is no default.
-		return defaultExtensionValue(extension)
-	}
-
-	if e.value != nil {
-		// Already decoded. Check the descriptor, though.
-		if e.desc != extension {
-			// This shouldn't happen. If it does, it means that
-			// GetExtension was called twice with two different
-			// descriptors with the same field number.
-			return nil, errors.New("proto: descriptor conflict")
+		if m2.Has(xtd) {
+			mr.Set(xtd, m2.Get(xtd))
+			clearUnknown(mr, fieldNum(xt.Field))
 		}
-		return extensionAsLegacyType(e.value), nil
 	}
 
-	if extension.ExtensionType == nil {
-		// incomplete descriptor
-		return e.enc, nil
-	}
-
-	v, err := decodeExtension(e.enc, extension)
-	if err != nil {
-		return nil, err
-	}
-
-	// Remember the decoded version and drop the encoded version.
-	// That way it is safe to mutate what we return.
-	e.value = extensionAsStorageType(v)
-	e.desc = extension
-	e.enc = nil
-	emap[extension.Field] = e
-	return extensionAsLegacyType(e.value), nil
-}
-
-// defaultExtensionValue returns the default value for extension.
-// If no default for an extension is defined ErrMissingExtension is returned.
-func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
-	if extension.ExtensionType == nil {
-		// incomplete descriptor, so no default
+	// Check whether the message has the extension field set or a default.
+	var pv protoreflect.Value
+	switch {
+	case mr.Has(xtd):
+		pv = mr.Get(xtd)
+	case xtd.HasDefault():
+		pv = xtd.Default()
+	default:
 		return nil, ErrMissingExtension
 	}
 
-	t := reflect.TypeOf(extension.ExtensionType)
-	props := extensionProperties(extension)
-
-	sf, _, err := fieldDefault(t, props)
-	if err != nil {
-		return nil, err
-	}
-
-	if sf == nil || sf.value == nil {
-		// There is no default value.
-		return nil, ErrMissingExtension
-	}
-
-	if t.Kind() != reflect.Ptr {
-		// We do not need to return a Ptr, we can directly return sf.value.
-		return sf.value, nil
-	}
-
-	// We need to return an interface{} that is a pointer to sf.value.
-	value := reflect.New(t).Elem()
-	value.Set(reflect.New(value.Type().Elem()))
-	if sf.kind == reflect.Int32 {
-		// We may have an int32 or an enum, but the underlying data is int32.
-		// Since we can't set an int32 into a non int32 reflect.value directly
-		// set it as a int32.
-		value.Elem().SetInt(int64(sf.value.(int32)))
-	} else {
-		value.Elem().Set(reflect.ValueOf(sf.value))
-	}
-	return value.Interface(), nil
-}
-
-// decodeExtension decodes an extension encoded in b.
-func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
-	t := reflect.TypeOf(extension.ExtensionType)
-	unmarshal := typeUnmarshaler(t, extension.Tag)
-
-	// t is a pointer to a struct, pointer to basic type or a slice.
-	// Allocate space to store the pointer/slice.
-	value := reflect.New(t).Elem()
-
-	var err error
-	for {
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		wire := int(x) & 7
-
-		b, err = unmarshal(b, valToPointer(value.Addr()), wire)
-		if err != nil {
-			return nil, err
-		}
-
-		if len(b) == 0 {
-			break
-		}
-	}
-	return value.Interface(), nil
-}
-
-// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
-// The returned slice has the same length as es; missing extensions will appear as nil elements.
-func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
-	epb, err := extendable(pb)
-	if err != nil {
-		return nil, err
-	}
-	extensions = make([]interface{}, len(es))
-	for i, e := range es {
-		extensions[i], err = GetExtension(epb, e)
-		if err == ErrMissingExtension {
-			err = nil
-		}
-		if err != nil {
-			return
-		}
-	}
-	return
-}
-
-// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
-// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
-// just the Field field, which defines the extension's field number.
-func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
-	epb, err := extendable(pb)
-	if err != nil {
-		return nil, err
-	}
-	registeredExtensions := RegisteredExtensions(pb)
-
-	emap, mu := epb.extensionsRead()
-	if emap == nil {
-		return nil, nil
-	}
-	mu.Lock()
-	defer mu.Unlock()
-	extensions := make([]*ExtensionDesc, 0, len(emap))
-	for extid, e := range emap {
-		desc := e.desc
-		if desc == nil {
-			desc = registeredExtensions[extid]
-			if desc == nil {
-				desc = &ExtensionDesc{Field: extid}
-			}
-		}
-
-		extensions = append(extensions, desc)
-	}
-	return extensions, nil
-}
-
-// SetExtension sets the specified extension of pb to the specified value.
-func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
-	epb, err := extendable(pb)
-	if err != nil {
-		return err
-	}
-	if err := checkExtensionTypes(epb, extension); err != nil {
-		return err
-	}
-	typ := reflect.TypeOf(extension.ExtensionType)
-	if typ != reflect.TypeOf(value) {
-		return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
-	}
-	// nil extension values need to be caught early, because the
-	// encoder can't distinguish an ErrNil due to a nil extension
-	// from an ErrNil due to a missing field. Extensions are
-	// always optional, so the encoder would just swallow the error
-	// and drop all the extensions from the encoded message.
-	if reflect.ValueOf(value).IsNil() {
-		return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
-	}
-
-	extmap := epb.extensionsWrite()
-	extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
-	return nil
-}
-
-// ClearAllExtensions clears all extensions from pb.
-func ClearAllExtensions(pb Message) {
-	epb, err := extendable(pb)
-	if err != nil {
-		return
-	}
-	m := epb.extensionsWrite()
-	for k := range m {
-		delete(m, k)
-	}
-}
-
-// A global registry of extensions.
-// The generated code will register the generated descriptors by calling RegisterExtension.
-
-var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
-
-// RegisterExtension is called from the generated code.
-func RegisterExtension(desc *ExtensionDesc) {
-	st := reflect.TypeOf(desc.ExtendedType).Elem()
-	m := extensionMaps[st]
-	if m == nil {
-		m = make(map[int32]*ExtensionDesc)
-		extensionMaps[st] = m
-	}
-	if _, ok := m[desc.Field]; ok {
-		panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
-	}
-	m[desc.Field] = desc
-}
-
-// RegisteredExtensions returns a map of the registered extensions of a
-// protocol buffer struct, indexed by the extension number.
-// The argument pb should be a nil pointer to the struct type.
-func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
-	return extensionMaps[reflect.TypeOf(pb).Elem()]
-}
-
-// extensionAsLegacyType converts an value in the storage type as the API type.
-// See Extension.value.
-func extensionAsLegacyType(v interface{}) interface{} {
-	switch rv := reflect.ValueOf(v); rv.Kind() {
-	case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
-		// Represent primitive types as a pointer to the value.
+	v := xt.InterfaceOf(pv)
+	rv := reflect.ValueOf(v)
+	if isScalarKind(rv.Kind()) {
 		rv2 := reflect.New(rv.Type())
 		rv2.Elem().Set(rv)
 		v = rv2.Interface()
-	case reflect.Ptr:
-		// Represent slice types as the value itself.
-		switch rv.Type().Elem().Kind() {
-		case reflect.Slice:
-			if rv.IsNil() {
-				v = reflect.Zero(rv.Type().Elem()).Interface()
-			} else {
-				v = rv.Elem().Interface()
-			}
-		}
 	}
-	return v
+	return v, nil
 }
 
-// extensionAsStorageType converts an value in the API type as the storage type.
-// See Extension.value.
-func extensionAsStorageType(v interface{}) interface{} {
-	switch rv := reflect.ValueOf(v); rv.Kind() {
-	case reflect.Ptr:
-		// Represent slice types as the value itself.
-		switch rv.Type().Elem().Kind() {
-		case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
-			if rv.IsNil() {
-				v = reflect.Zero(rv.Type().Elem()).Interface()
-			} else {
-				v = rv.Elem().Interface()
+// extensionResolver is a custom extension resolver that stores a single
+// extension type that takes precedence over the global registry.
+type extensionResolver struct{ xt protoreflect.ExtensionType }
+
+func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
+	if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field {
+		return r.xt, nil
+	}
+	return protoregistry.GlobalTypes.FindExtensionByName(field)
+}
+
+func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
+	if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field {
+		return r.xt, nil
+	}
+	return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
+}
+
+// GetExtensions returns a list of the extensions values present in m,
+// corresponding with the provided list of extension descriptors, xts.
+// If an extension is missing in m, the corresponding value is nil.
+func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) {
+	mr := MessageReflect(m)
+	if mr == nil || !mr.IsValid() {
+		return nil, errNotExtendable
+	}
+
+	vs := make([]interface{}, len(xts))
+	for i, xt := range xts {
+		v, err := GetExtension(m, xt)
+		if err != nil {
+			if err == ErrMissingExtension {
+				continue
 			}
+			return vs, err
 		}
-	case reflect.Slice:
-		// Represent slice types as a pointer to the value.
-		if rv.Type().Elem().Kind() != reflect.Uint8 {
-			rv2 := reflect.New(rv.Type())
-			rv2.Elem().Set(rv)
-			v = rv2.Interface()
+		vs[i] = v
+	}
+	return vs, nil
+}
+
+// SetExtension sets an extension field in m to the provided value.
+func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error {
+	mr := MessageReflect(m)
+	if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
+		return errNotExtendable
+	}
+
+	rv := reflect.ValueOf(v)
+	if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) {
+		return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType)
+	}
+	if rv.Kind() == reflect.Ptr {
+		if rv.IsNil() {
+			return fmt.Errorf("proto: SetExtension called with nil value of type %T", v)
+		}
+		if isScalarKind(rv.Elem().Kind()) {
+			v = rv.Elem().Interface()
 		}
 	}
-	return v
+
+	xtd := xt.TypeDescriptor()
+	if !isValidExtension(mr.Descriptor(), xtd) {
+		return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
+	}
+	mr.Set(xtd, xt.ValueOf(v))
+	clearUnknown(mr, fieldNum(xt.Field))
+	return nil
+}
+
+// SetRawExtension inserts b into the unknown fields of m.
+//
+// Deprecated: Use Message.ProtoReflect.SetUnknown instead.
+func SetRawExtension(m Message, fnum int32, b []byte) {
+	mr := MessageReflect(m)
+	if mr == nil || !mr.IsValid() {
+		return
+	}
+
+	// Verify that the raw field is valid.
+	for b0 := b; len(b0) > 0; {
+		num, _, n := protowire.ConsumeField(b0)
+		if int32(num) != fnum {
+			panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum))
+		}
+		b0 = b0[n:]
+	}
+
+	ClearExtension(m, &ExtensionDesc{Field: fnum})
+	mr.SetUnknown(append(mr.GetUnknown(), b...))
+}
+
+// ExtensionDescs returns a list of extension descriptors found in m,
+// containing descriptors for both populated extension fields in m and
+// also unknown fields of m that are in the extension range.
+// For the later case, an type incomplete descriptor is provided where only
+// the ExtensionDesc.Field field is populated.
+// The order of the extension descriptors is undefined.
+func ExtensionDescs(m Message) ([]*ExtensionDesc, error) {
+	mr := MessageReflect(m)
+	if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
+		return nil, errNotExtendable
+	}
+
+	// Collect a set of known extension descriptors.
+	extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc)
+	mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+		if fd.IsExtension() {
+			xt := fd.(protoreflect.ExtensionTypeDescriptor)
+			if xd, ok := xt.Type().(*ExtensionDesc); ok {
+				extDescs[fd.Number()] = xd
+			}
+		}
+		return true
+	})
+
+	// Collect a set of unknown extension descriptors.
+	extRanges := mr.Descriptor().ExtensionRanges()
+	for b := mr.GetUnknown(); len(b) > 0; {
+		num, _, n := protowire.ConsumeField(b)
+		if extRanges.Has(num) && extDescs[num] == nil {
+			extDescs[num] = nil
+		}
+		b = b[n:]
+	}
+
+	// Transpose the set of descriptors into a list.
+	var xts []*ExtensionDesc
+	for num, xt := range extDescs {
+		if xt == nil {
+			xt = &ExtensionDesc{Field: int32(num)}
+		}
+		xts = append(xts, xt)
+	}
+	return xts, nil
+}
+
+// isValidExtension reports whether xtd is a valid extension descriptor for md.
+func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool {
+	return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number())
+}
+
+// isScalarKind reports whether k is a protobuf scalar kind (except bytes).
+// This function exists for historical reasons since the representation of
+// scalars differs between v1 and v2, where v1 uses *T and v2 uses T.
+func isScalarKind(k reflect.Kind) bool {
+	switch k {
+	case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+		return true
+	default:
+		return false
+	}
+}
+
+// clearUnknown removes unknown fields from m where remover.Has reports true.
+func clearUnknown(m protoreflect.Message, remover interface {
+	Has(protoreflect.FieldNumber) bool
+}) {
+	var bo protoreflect.RawFields
+	for bi := m.GetUnknown(); len(bi) > 0; {
+		num, _, n := protowire.ConsumeField(bi)
+		if !remover.Has(num) {
+			bo = append(bo, bi[:n]...)
+		}
+		bi = bi[n:]
+	}
+	if bi := m.GetUnknown(); len(bi) != len(bo) {
+		m.SetUnknown(bo)
+	}
+}
+
+type fieldNum protoreflect.FieldNumber
+
+func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool {
+	return protoreflect.FieldNumber(n1) == n2
 }
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
deleted file mode 100644
index fdd328b..0000000
--- a/vendor/github.com/golang/protobuf/proto/lib.go
+++ /dev/null
@@ -1,965 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-/*
-Package proto converts data structures to and from the wire format of
-protocol buffers.  It works in concert with the Go source code generated
-for .proto files by the protocol compiler.
-
-A summary of the properties of the protocol buffer interface
-for a protocol buffer variable v:
-
-  - Names are turned from camel_case to CamelCase for export.
-  - There are no methods on v to set fields; just treat
-	them as structure fields.
-  - There are getters that return a field's value if set,
-	and return the field's default value if unset.
-	The getters work even if the receiver is a nil message.
-  - The zero value for a struct is its correct initialization state.
-	All desired fields must be set before marshaling.
-  - A Reset() method will restore a protobuf struct to its zero state.
-  - Non-repeated fields are pointers to the values; nil means unset.
-	That is, optional or required field int32 f becomes F *int32.
-  - Repeated fields are slices.
-  - Helper functions are available to aid the setting of fields.
-	msg.Foo = proto.String("hello") // set field
-  - Constants are defined to hold the default values of all fields that
-	have them.  They have the form Default_StructName_FieldName.
-	Because the getter methods handle defaulted values,
-	direct use of these constants should be rare.
-  - Enums are given type names and maps from names to values.
-	Enum values are prefixed by the enclosing message's name, or by the
-	enum's type name if it is a top-level enum. Enum types have a String
-	method, and a Enum method to assist in message construction.
-  - Nested messages, groups and enums have type names prefixed with the name of
-	the surrounding message type.
-  - Extensions are given descriptor names that start with E_,
-	followed by an underscore-delimited list of the nested messages
-	that contain it (if any) followed by the CamelCased name of the
-	extension field itself.  HasExtension, ClearExtension, GetExtension
-	and SetExtension are functions for manipulating extensions.
-  - Oneof field sets are given a single field in their message,
-	with distinguished wrapper types for each possible field value.
-  - Marshal and Unmarshal are functions to encode and decode the wire format.
-
-When the .proto file specifies `syntax="proto3"`, there are some differences:
-
-  - Non-repeated fields of non-message type are values instead of pointers.
-  - Enum types do not get an Enum method.
-
-The simplest way to describe this is to see an example.
-Given file test.proto, containing
-
-	package example;
-
-	enum FOO { X = 17; }
-
-	message Test {
-	  required string label = 1;
-	  optional int32 type = 2 [default=77];
-	  repeated int64 reps = 3;
-	  optional group OptionalGroup = 4 {
-	    required string RequiredField = 5;
-	  }
-	  oneof union {
-	    int32 number = 6;
-	    string name = 7;
-	  }
-	}
-
-The resulting file, test.pb.go, is:
-
-	package example
-
-	import proto "github.com/golang/protobuf/proto"
-	import math "math"
-
-	type FOO int32
-	const (
-		FOO_X FOO = 17
-	)
-	var FOO_name = map[int32]string{
-		17: "X",
-	}
-	var FOO_value = map[string]int32{
-		"X": 17,
-	}
-
-	func (x FOO) Enum() *FOO {
-		p := new(FOO)
-		*p = x
-		return p
-	}
-	func (x FOO) String() string {
-		return proto.EnumName(FOO_name, int32(x))
-	}
-	func (x *FOO) UnmarshalJSON(data []byte) error {
-		value, err := proto.UnmarshalJSONEnum(FOO_value, data)
-		if err != nil {
-			return err
-		}
-		*x = FOO(value)
-		return nil
-	}
-
-	type Test struct {
-		Label         *string             `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
-		Type          *int32              `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
-		Reps          []int64             `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
-		Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
-		// Types that are valid to be assigned to Union:
-		//	*Test_Number
-		//	*Test_Name
-		Union            isTest_Union `protobuf_oneof:"union"`
-		XXX_unrecognized []byte       `json:"-"`
-	}
-	func (m *Test) Reset()         { *m = Test{} }
-	func (m *Test) String() string { return proto.CompactTextString(m) }
-	func (*Test) ProtoMessage() {}
-
-	type isTest_Union interface {
-		isTest_Union()
-	}
-
-	type Test_Number struct {
-		Number int32 `protobuf:"varint,6,opt,name=number"`
-	}
-	type Test_Name struct {
-		Name string `protobuf:"bytes,7,opt,name=name"`
-	}
-
-	func (*Test_Number) isTest_Union() {}
-	func (*Test_Name) isTest_Union()   {}
-
-	func (m *Test) GetUnion() isTest_Union {
-		if m != nil {
-			return m.Union
-		}
-		return nil
-	}
-	const Default_Test_Type int32 = 77
-
-	func (m *Test) GetLabel() string {
-		if m != nil && m.Label != nil {
-			return *m.Label
-		}
-		return ""
-	}
-
-	func (m *Test) GetType() int32 {
-		if m != nil && m.Type != nil {
-			return *m.Type
-		}
-		return Default_Test_Type
-	}
-
-	func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
-		if m != nil {
-			return m.Optionalgroup
-		}
-		return nil
-	}
-
-	type Test_OptionalGroup struct {
-		RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
-	}
-	func (m *Test_OptionalGroup) Reset()         { *m = Test_OptionalGroup{} }
-	func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
-
-	func (m *Test_OptionalGroup) GetRequiredField() string {
-		if m != nil && m.RequiredField != nil {
-			return *m.RequiredField
-		}
-		return ""
-	}
-
-	func (m *Test) GetNumber() int32 {
-		if x, ok := m.GetUnion().(*Test_Number); ok {
-			return x.Number
-		}
-		return 0
-	}
-
-	func (m *Test) GetName() string {
-		if x, ok := m.GetUnion().(*Test_Name); ok {
-			return x.Name
-		}
-		return ""
-	}
-
-	func init() {
-		proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
-	}
-
-To create and play with a Test object:
-
-	package main
-
-	import (
-		"log"
-
-		"github.com/golang/protobuf/proto"
-		pb "./example.pb"
-	)
-
-	func main() {
-		test := &pb.Test{
-			Label: proto.String("hello"),
-			Type:  proto.Int32(17),
-			Reps:  []int64{1, 2, 3},
-			Optionalgroup: &pb.Test_OptionalGroup{
-				RequiredField: proto.String("good bye"),
-			},
-			Union: &pb.Test_Name{"fred"},
-		}
-		data, err := proto.Marshal(test)
-		if err != nil {
-			log.Fatal("marshaling error: ", err)
-		}
-		newTest := &pb.Test{}
-		err = proto.Unmarshal(data, newTest)
-		if err != nil {
-			log.Fatal("unmarshaling error: ", err)
-		}
-		// Now test and newTest contain the same data.
-		if test.GetLabel() != newTest.GetLabel() {
-			log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
-		}
-		// Use a type switch to determine which oneof was set.
-		switch u := test.Union.(type) {
-		case *pb.Test_Number: // u.Number contains the number.
-		case *pb.Test_Name: // u.Name contains the string.
-		}
-		// etc.
-	}
-*/
-package proto
-
-import (
-	"encoding/json"
-	"fmt"
-	"log"
-	"reflect"
-	"sort"
-	"strconv"
-	"sync"
-)
-
-// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
-// Marshal reports this when a required field is not initialized.
-// Unmarshal reports this when a required field is missing from the wire data.
-type RequiredNotSetError struct{ field string }
-
-func (e *RequiredNotSetError) Error() string {
-	if e.field == "" {
-		return fmt.Sprintf("proto: required field not set")
-	}
-	return fmt.Sprintf("proto: required field %q not set", e.field)
-}
-func (e *RequiredNotSetError) RequiredNotSet() bool {
-	return true
-}
-
-type invalidUTF8Error struct{ field string }
-
-func (e *invalidUTF8Error) Error() string {
-	if e.field == "" {
-		return "proto: invalid UTF-8 detected"
-	}
-	return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
-}
-func (e *invalidUTF8Error) InvalidUTF8() bool {
-	return true
-}
-
-// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
-// This error should not be exposed to the external API as such errors should
-// be recreated with the field information.
-var errInvalidUTF8 = &invalidUTF8Error{}
-
-// isNonFatal reports whether the error is either a RequiredNotSet error
-// or a InvalidUTF8 error.
-func isNonFatal(err error) bool {
-	if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
-		return true
-	}
-	if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
-		return true
-	}
-	return false
-}
-
-type nonFatal struct{ E error }
-
-// Merge merges err into nf and reports whether it was successful.
-// Otherwise it returns false for any fatal non-nil errors.
-func (nf *nonFatal) Merge(err error) (ok bool) {
-	if err == nil {
-		return true // not an error
-	}
-	if !isNonFatal(err) {
-		return false // fatal error
-	}
-	if nf.E == nil {
-		nf.E = err // store first instance of non-fatal error
-	}
-	return true
-}
-
-// Message is implemented by generated protocol buffer messages.
-type Message interface {
-	Reset()
-	String() string
-	ProtoMessage()
-}
-
-// A Buffer is a buffer manager for marshaling and unmarshaling
-// protocol buffers.  It may be reused between invocations to
-// reduce memory usage.  It is not necessary to use a Buffer;
-// the global functions Marshal and Unmarshal create a
-// temporary Buffer and are fine for most applications.
-type Buffer struct {
-	buf   []byte // encode/decode byte stream
-	index int    // read point
-
-	deterministic bool
-}
-
-// NewBuffer allocates a new Buffer and initializes its internal data to
-// the contents of the argument slice.
-func NewBuffer(e []byte) *Buffer {
-	return &Buffer{buf: e}
-}
-
-// Reset resets the Buffer, ready for marshaling a new protocol buffer.
-func (p *Buffer) Reset() {
-	p.buf = p.buf[0:0] // for reading/writing
-	p.index = 0        // for reading
-}
-
-// SetBuf replaces the internal buffer with the slice,
-// ready for unmarshaling the contents of the slice.
-func (p *Buffer) SetBuf(s []byte) {
-	p.buf = s
-	p.index = 0
-}
-
-// Bytes returns the contents of the Buffer.
-func (p *Buffer) Bytes() []byte { return p.buf }
-
-// SetDeterministic sets whether to use deterministic serialization.
-//
-// Deterministic serialization guarantees that for a given binary, equal
-// messages will always be serialized to the same bytes. This implies:
-//
-//   - Repeated serialization of a message will return the same bytes.
-//   - Different processes of the same binary (which may be executing on
-//     different machines) will serialize equal messages to the same bytes.
-//
-// Note that the deterministic serialization is NOT canonical across
-// languages. It is not guaranteed to remain stable over time. It is unstable
-// across different builds with schema changes due to unknown fields.
-// Users who need canonical serialization (e.g., persistent storage in a
-// canonical form, fingerprinting, etc.) should define their own
-// canonicalization specification and implement their own serializer rather
-// than relying on this API.
-//
-// If deterministic serialization is requested, map entries will be sorted
-// by keys in lexographical order. This is an implementation detail and
-// subject to change.
-func (p *Buffer) SetDeterministic(deterministic bool) {
-	p.deterministic = deterministic
-}
-
-/*
- * Helper routines for simplifying the creation of optional fields of basic type.
- */
-
-// Bool is a helper routine that allocates a new bool value
-// to store v and returns a pointer to it.
-func Bool(v bool) *bool {
-	return &v
-}
-
-// Int32 is a helper routine that allocates a new int32 value
-// to store v and returns a pointer to it.
-func Int32(v int32) *int32 {
-	return &v
-}
-
-// Int is a helper routine that allocates a new int32 value
-// to store v and returns a pointer to it, but unlike Int32
-// its argument value is an int.
-func Int(v int) *int32 {
-	p := new(int32)
-	*p = int32(v)
-	return p
-}
-
-// Int64 is a helper routine that allocates a new int64 value
-// to store v and returns a pointer to it.
-func Int64(v int64) *int64 {
-	return &v
-}
-
-// Float32 is a helper routine that allocates a new float32 value
-// to store v and returns a pointer to it.
-func Float32(v float32) *float32 {
-	return &v
-}
-
-// Float64 is a helper routine that allocates a new float64 value
-// to store v and returns a pointer to it.
-func Float64(v float64) *float64 {
-	return &v
-}
-
-// Uint32 is a helper routine that allocates a new uint32 value
-// to store v and returns a pointer to it.
-func Uint32(v uint32) *uint32 {
-	return &v
-}
-
-// Uint64 is a helper routine that allocates a new uint64 value
-// to store v and returns a pointer to it.
-func Uint64(v uint64) *uint64 {
-	return &v
-}
-
-// String is a helper routine that allocates a new string value
-// to store v and returns a pointer to it.
-func String(v string) *string {
-	return &v
-}
-
-// EnumName is a helper function to simplify printing protocol buffer enums
-// by name.  Given an enum map and a value, it returns a useful string.
-func EnumName(m map[int32]string, v int32) string {
-	s, ok := m[v]
-	if ok {
-		return s
-	}
-	return strconv.Itoa(int(v))
-}
-
-// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
-// from their JSON-encoded representation. Given a map from the enum's symbolic
-// names to its int values, and a byte buffer containing the JSON-encoded
-// value, it returns an int32 that can be cast to the enum type by the caller.
-//
-// The function can deal with both JSON representations, numeric and symbolic.
-func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
-	if data[0] == '"' {
-		// New style: enums are strings.
-		var repr string
-		if err := json.Unmarshal(data, &repr); err != nil {
-			return -1, err
-		}
-		val, ok := m[repr]
-		if !ok {
-			return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
-		}
-		return val, nil
-	}
-	// Old style: enums are ints.
-	var val int32
-	if err := json.Unmarshal(data, &val); err != nil {
-		return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
-	}
-	return val, nil
-}
-
-// DebugPrint dumps the encoded data in b in a debugging format with a header
-// including the string s. Used in testing but made available for general debugging.
-func (p *Buffer) DebugPrint(s string, b []byte) {
-	var u uint64
-
-	obuf := p.buf
-	index := p.index
-	p.buf = b
-	p.index = 0
-	depth := 0
-
-	fmt.Printf("\n--- %s ---\n", s)
-
-out:
-	for {
-		for i := 0; i < depth; i++ {
-			fmt.Print("  ")
-		}
-
-		index := p.index
-		if index == len(p.buf) {
-			break
-		}
-
-		op, err := p.DecodeVarint()
-		if err != nil {
-			fmt.Printf("%3d: fetching op err %v\n", index, err)
-			break out
-		}
-		tag := op >> 3
-		wire := op & 7
-
-		switch wire {
-		default:
-			fmt.Printf("%3d: t=%3d unknown wire=%d\n",
-				index, tag, wire)
-			break out
-
-		case WireBytes:
-			var r []byte
-
-			r, err = p.DecodeRawBytes(false)
-			if err != nil {
-				break out
-			}
-			fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
-			if len(r) <= 6 {
-				for i := 0; i < len(r); i++ {
-					fmt.Printf(" %.2x", r[i])
-				}
-			} else {
-				for i := 0; i < 3; i++ {
-					fmt.Printf(" %.2x", r[i])
-				}
-				fmt.Printf(" ..")
-				for i := len(r) - 3; i < len(r); i++ {
-					fmt.Printf(" %.2x", r[i])
-				}
-			}
-			fmt.Printf("\n")
-
-		case WireFixed32:
-			u, err = p.DecodeFixed32()
-			if err != nil {
-				fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
-				break out
-			}
-			fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
-
-		case WireFixed64:
-			u, err = p.DecodeFixed64()
-			if err != nil {
-				fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
-				break out
-			}
-			fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
-
-		case WireVarint:
-			u, err = p.DecodeVarint()
-			if err != nil {
-				fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
-				break out
-			}
-			fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
-
-		case WireStartGroup:
-			fmt.Printf("%3d: t=%3d start\n", index, tag)
-			depth++
-
-		case WireEndGroup:
-			depth--
-			fmt.Printf("%3d: t=%3d end\n", index, tag)
-		}
-	}
-
-	if depth != 0 {
-		fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
-	}
-	fmt.Printf("\n")
-
-	p.buf = obuf
-	p.index = index
-}
-
-// SetDefaults sets unset protocol buffer fields to their default values.
-// It only modifies fields that are both unset and have defined defaults.
-// It recursively sets default values in any non-nil sub-messages.
-func SetDefaults(pb Message) {
-	setDefaults(reflect.ValueOf(pb), true, false)
-}
-
-// v is a pointer to a struct.
-func setDefaults(v reflect.Value, recur, zeros bool) {
-	v = v.Elem()
-
-	defaultMu.RLock()
-	dm, ok := defaults[v.Type()]
-	defaultMu.RUnlock()
-	if !ok {
-		dm = buildDefaultMessage(v.Type())
-		defaultMu.Lock()
-		defaults[v.Type()] = dm
-		defaultMu.Unlock()
-	}
-
-	for _, sf := range dm.scalars {
-		f := v.Field(sf.index)
-		if !f.IsNil() {
-			// field already set
-			continue
-		}
-		dv := sf.value
-		if dv == nil && !zeros {
-			// no explicit default, and don't want to set zeros
-			continue
-		}
-		fptr := f.Addr().Interface() // **T
-		// TODO: Consider batching the allocations we do here.
-		switch sf.kind {
-		case reflect.Bool:
-			b := new(bool)
-			if dv != nil {
-				*b = dv.(bool)
-			}
-			*(fptr.(**bool)) = b
-		case reflect.Float32:
-			f := new(float32)
-			if dv != nil {
-				*f = dv.(float32)
-			}
-			*(fptr.(**float32)) = f
-		case reflect.Float64:
-			f := new(float64)
-			if dv != nil {
-				*f = dv.(float64)
-			}
-			*(fptr.(**float64)) = f
-		case reflect.Int32:
-			// might be an enum
-			if ft := f.Type(); ft != int32PtrType {
-				// enum
-				f.Set(reflect.New(ft.Elem()))
-				if dv != nil {
-					f.Elem().SetInt(int64(dv.(int32)))
-				}
-			} else {
-				// int32 field
-				i := new(int32)
-				if dv != nil {
-					*i = dv.(int32)
-				}
-				*(fptr.(**int32)) = i
-			}
-		case reflect.Int64:
-			i := new(int64)
-			if dv != nil {
-				*i = dv.(int64)
-			}
-			*(fptr.(**int64)) = i
-		case reflect.String:
-			s := new(string)
-			if dv != nil {
-				*s = dv.(string)
-			}
-			*(fptr.(**string)) = s
-		case reflect.Uint8:
-			// exceptional case: []byte
-			var b []byte
-			if dv != nil {
-				db := dv.([]byte)
-				b = make([]byte, len(db))
-				copy(b, db)
-			} else {
-				b = []byte{}
-			}
-			*(fptr.(*[]byte)) = b
-		case reflect.Uint32:
-			u := new(uint32)
-			if dv != nil {
-				*u = dv.(uint32)
-			}
-			*(fptr.(**uint32)) = u
-		case reflect.Uint64:
-			u := new(uint64)
-			if dv != nil {
-				*u = dv.(uint64)
-			}
-			*(fptr.(**uint64)) = u
-		default:
-			log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
-		}
-	}
-
-	for _, ni := range dm.nested {
-		f := v.Field(ni)
-		// f is *T or []*T or map[T]*T
-		switch f.Kind() {
-		case reflect.Ptr:
-			if f.IsNil() {
-				continue
-			}
-			setDefaults(f, recur, zeros)
-
-		case reflect.Slice:
-			for i := 0; i < f.Len(); i++ {
-				e := f.Index(i)
-				if e.IsNil() {
-					continue
-				}
-				setDefaults(e, recur, zeros)
-			}
-
-		case reflect.Map:
-			for _, k := range f.MapKeys() {
-				e := f.MapIndex(k)
-				if e.IsNil() {
-					continue
-				}
-				setDefaults(e, recur, zeros)
-			}
-		}
-	}
-}
-
-var (
-	// defaults maps a protocol buffer struct type to a slice of the fields,
-	// with its scalar fields set to their proto-declared non-zero default values.
-	defaultMu sync.RWMutex
-	defaults  = make(map[reflect.Type]defaultMessage)
-
-	int32PtrType = reflect.TypeOf((*int32)(nil))
-)
-
-// defaultMessage represents information about the default values of a message.
-type defaultMessage struct {
-	scalars []scalarField
-	nested  []int // struct field index of nested messages
-}
-
-type scalarField struct {
-	index int          // struct field index
-	kind  reflect.Kind // element type (the T in *T or []T)
-	value interface{}  // the proto-declared default value, or nil
-}
-
-// t is a struct type.
-func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
-	sprop := GetProperties(t)
-	for _, prop := range sprop.Prop {
-		fi, ok := sprop.decoderTags.get(prop.Tag)
-		if !ok {
-			// XXX_unrecognized
-			continue
-		}
-		ft := t.Field(fi).Type
-
-		sf, nested, err := fieldDefault(ft, prop)
-		switch {
-		case err != nil:
-			log.Print(err)
-		case nested:
-			dm.nested = append(dm.nested, fi)
-		case sf != nil:
-			sf.index = fi
-			dm.scalars = append(dm.scalars, *sf)
-		}
-	}
-
-	return dm
-}
-
-// fieldDefault returns the scalarField for field type ft.
-// sf will be nil if the field can not have a default.
-// nestedMessage will be true if this is a nested message.
-// Note that sf.index is not set on return.
-func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
-	var canHaveDefault bool
-	switch ft.Kind() {
-	case reflect.Ptr:
-		if ft.Elem().Kind() == reflect.Struct {
-			nestedMessage = true
-		} else {
-			canHaveDefault = true // proto2 scalar field
-		}
-
-	case reflect.Slice:
-		switch ft.Elem().Kind() {
-		case reflect.Ptr:
-			nestedMessage = true // repeated message
-		case reflect.Uint8:
-			canHaveDefault = true // bytes field
-		}
-
-	case reflect.Map:
-		if ft.Elem().Kind() == reflect.Ptr {
-			nestedMessage = true // map with message values
-		}
-	}
-
-	if !canHaveDefault {
-		if nestedMessage {
-			return nil, true, nil
-		}
-		return nil, false, nil
-	}
-
-	// We now know that ft is a pointer or slice.
-	sf = &scalarField{kind: ft.Elem().Kind()}
-
-	// scalar fields without defaults
-	if !prop.HasDefault {
-		return sf, false, nil
-	}
-
-	// a scalar field: either *T or []byte
-	switch ft.Elem().Kind() {
-	case reflect.Bool:
-		x, err := strconv.ParseBool(prop.Default)
-		if err != nil {
-			return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
-		}
-		sf.value = x
-	case reflect.Float32:
-		x, err := strconv.ParseFloat(prop.Default, 32)
-		if err != nil {
-			return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
-		}
-		sf.value = float32(x)
-	case reflect.Float64:
-		x, err := strconv.ParseFloat(prop.Default, 64)
-		if err != nil {
-			return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
-		}
-		sf.value = x
-	case reflect.Int32:
-		x, err := strconv.ParseInt(prop.Default, 10, 32)
-		if err != nil {
-			return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
-		}
-		sf.value = int32(x)
-	case reflect.Int64:
-		x, err := strconv.ParseInt(prop.Default, 10, 64)
-		if err != nil {
-			return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
-		}
-		sf.value = x
-	case reflect.String:
-		sf.value = prop.Default
-	case reflect.Uint8:
-		// []byte (not *uint8)
-		sf.value = []byte(prop.Default)
-	case reflect.Uint32:
-		x, err := strconv.ParseUint(prop.Default, 10, 32)
-		if err != nil {
-			return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
-		}
-		sf.value = uint32(x)
-	case reflect.Uint64:
-		x, err := strconv.ParseUint(prop.Default, 10, 64)
-		if err != nil {
-			return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
-		}
-		sf.value = x
-	default:
-		return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
-	}
-
-	return sf, false, nil
-}
-
-// mapKeys returns a sort.Interface to be used for sorting the map keys.
-// Map fields may have key types of non-float scalars, strings and enums.
-func mapKeys(vs []reflect.Value) sort.Interface {
-	s := mapKeySorter{vs: vs}
-
-	// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
-	if len(vs) == 0 {
-		return s
-	}
-	switch vs[0].Kind() {
-	case reflect.Int32, reflect.Int64:
-		s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
-	case reflect.Uint32, reflect.Uint64:
-		s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
-	case reflect.Bool:
-		s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
-	case reflect.String:
-		s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
-	default:
-		panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
-	}
-
-	return s
-}
-
-type mapKeySorter struct {
-	vs   []reflect.Value
-	less func(a, b reflect.Value) bool
-}
-
-func (s mapKeySorter) Len() int      { return len(s.vs) }
-func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
-func (s mapKeySorter) Less(i, j int) bool {
-	return s.less(s.vs[i], s.vs[j])
-}
-
-// isProto3Zero reports whether v is a zero proto3 value.
-func isProto3Zero(v reflect.Value) bool {
-	switch v.Kind() {
-	case reflect.Bool:
-		return !v.Bool()
-	case reflect.Int32, reflect.Int64:
-		return v.Int() == 0
-	case reflect.Uint32, reflect.Uint64:
-		return v.Uint() == 0
-	case reflect.Float32, reflect.Float64:
-		return v.Float() == 0
-	case reflect.String:
-		return v.String() == ""
-	}
-	return false
-}
-
-const (
-	// ProtoPackageIsVersion3 is referenced from generated protocol buffer files
-	// to assert that that code is compatible with this version of the proto package.
-	ProtoPackageIsVersion3 = true
-
-	// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
-	// to assert that that code is compatible with this version of the proto package.
-	ProtoPackageIsVersion2 = true
-
-	// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
-	// to assert that that code is compatible with this version of the proto package.
-	ProtoPackageIsVersion1 = true
-)
-
-// InternalMessageInfo is a type used internally by generated .pb.go files.
-// This type is not intended to be used by non-generated code.
-// This type is not subject to any compatibility guarantee.
-type InternalMessageInfo struct {
-	marshal   *marshalInfo
-	unmarshal *unmarshalInfo
-	merge     *mergeInfo
-	discard   *discardInfo
-}
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
deleted file mode 100644
index f48a756..0000000
--- a/vendor/github.com/golang/protobuf/proto/message_set.go
+++ /dev/null
@@ -1,181 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Support for message sets.
- */
-
-import (
-	"errors"
-)
-
-// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
-// A message type ID is required for storing a protocol buffer in a message set.
-var errNoMessageTypeID = errors.New("proto does not have a message type ID")
-
-// The first two types (_MessageSet_Item and messageSet)
-// model what the protocol compiler produces for the following protocol message:
-//   message MessageSet {
-//     repeated group Item = 1 {
-//       required int32 type_id = 2;
-//       required string message = 3;
-//     };
-//   }
-// That is the MessageSet wire format. We can't use a proto to generate these
-// because that would introduce a circular dependency between it and this package.
-
-type _MessageSet_Item struct {
-	TypeId  *int32 `protobuf:"varint,2,req,name=type_id"`
-	Message []byte `protobuf:"bytes,3,req,name=message"`
-}
-
-type messageSet struct {
-	Item             []*_MessageSet_Item `protobuf:"group,1,rep"`
-	XXX_unrecognized []byte
-	// TODO: caching?
-}
-
-// Make sure messageSet is a Message.
-var _ Message = (*messageSet)(nil)
-
-// messageTypeIder is an interface satisfied by a protocol buffer type
-// that may be stored in a MessageSet.
-type messageTypeIder interface {
-	MessageTypeId() int32
-}
-
-func (ms *messageSet) find(pb Message) *_MessageSet_Item {
-	mti, ok := pb.(messageTypeIder)
-	if !ok {
-		return nil
-	}
-	id := mti.MessageTypeId()
-	for _, item := range ms.Item {
-		if *item.TypeId == id {
-			return item
-		}
-	}
-	return nil
-}
-
-func (ms *messageSet) Has(pb Message) bool {
-	return ms.find(pb) != nil
-}
-
-func (ms *messageSet) Unmarshal(pb Message) error {
-	if item := ms.find(pb); item != nil {
-		return Unmarshal(item.Message, pb)
-	}
-	if _, ok := pb.(messageTypeIder); !ok {
-		return errNoMessageTypeID
-	}
-	return nil // TODO: return error instead?
-}
-
-func (ms *messageSet) Marshal(pb Message) error {
-	msg, err := Marshal(pb)
-	if err != nil {
-		return err
-	}
-	if item := ms.find(pb); item != nil {
-		// reuse existing item
-		item.Message = msg
-		return nil
-	}
-
-	mti, ok := pb.(messageTypeIder)
-	if !ok {
-		return errNoMessageTypeID
-	}
-
-	mtid := mti.MessageTypeId()
-	ms.Item = append(ms.Item, &_MessageSet_Item{
-		TypeId:  &mtid,
-		Message: msg,
-	})
-	return nil
-}
-
-func (ms *messageSet) Reset()         { *ms = messageSet{} }
-func (ms *messageSet) String() string { return CompactTextString(ms) }
-func (*messageSet) ProtoMessage()     {}
-
-// Support for the message_set_wire_format message option.
-
-func skipVarint(buf []byte) []byte {
-	i := 0
-	for ; buf[i]&0x80 != 0; i++ {
-	}
-	return buf[i+1:]
-}
-
-// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
-// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
-func unmarshalMessageSet(buf []byte, exts interface{}) error {
-	var m map[int32]Extension
-	switch exts := exts.(type) {
-	case *XXX_InternalExtensions:
-		m = exts.extensionsWrite()
-	case map[int32]Extension:
-		m = exts
-	default:
-		return errors.New("proto: not an extension map")
-	}
-
-	ms := new(messageSet)
-	if err := Unmarshal(buf, ms); err != nil {
-		return err
-	}
-	for _, item := range ms.Item {
-		id := *item.TypeId
-		msg := item.Message
-
-		// Restore wire type and field number varint, plus length varint.
-		// Be careful to preserve duplicate items.
-		b := EncodeVarint(uint64(id)<<3 | WireBytes)
-		if ext, ok := m[id]; ok {
-			// Existing data; rip off the tag and length varint
-			// so we join the new data correctly.
-			// We can assume that ext.enc is set because we are unmarshaling.
-			o := ext.enc[len(b):]   // skip wire type and field number
-			_, n := DecodeVarint(o) // calculate length of length varint
-			o = o[n:]               // skip length varint
-			msg = append(o, msg...) // join old data and new data
-		}
-		b = append(b, EncodeVarint(uint64(len(msg)))...)
-		b = append(b, msg...)
-
-		m[id] = Extension{enc: b}
-	}
-	return nil
-}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
deleted file mode 100644
index 94fa919..0000000
--- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
+++ /dev/null
@@ -1,360 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2012 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// +build purego appengine js
-
-// This file contains an implementation of proto field accesses using package reflect.
-// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
-// be used on App Engine.
-
-package proto
-
-import (
-	"reflect"
-	"sync"
-)
-
-const unsafeAllowed = false
-
-// A field identifies a field in a struct, accessible from a pointer.
-// In this implementation, a field is identified by the sequence of field indices
-// passed to reflect's FieldByIndex.
-type field []int
-
-// toField returns a field equivalent to the given reflect field.
-func toField(f *reflect.StructField) field {
-	return f.Index
-}
-
-// invalidField is an invalid field identifier.
-var invalidField = field(nil)
-
-// zeroField is a noop when calling pointer.offset.
-var zeroField = field([]int{})
-
-// IsValid reports whether the field identifier is valid.
-func (f field) IsValid() bool { return f != nil }
-
-// The pointer type is for the table-driven decoder.
-// The implementation here uses a reflect.Value of pointer type to
-// create a generic pointer. In pointer_unsafe.go we use unsafe
-// instead of reflect to implement the same (but faster) interface.
-type pointer struct {
-	v reflect.Value
-}
-
-// toPointer converts an interface of pointer type to a pointer
-// that points to the same target.
-func toPointer(i *Message) pointer {
-	return pointer{v: reflect.ValueOf(*i)}
-}
-
-// toAddrPointer converts an interface to a pointer that points to
-// the interface data.
-func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
-	v := reflect.ValueOf(*i)
-	u := reflect.New(v.Type())
-	u.Elem().Set(v)
-	if deref {
-		u = u.Elem()
-	}
-	return pointer{v: u}
-}
-
-// valToPointer converts v to a pointer.  v must be of pointer type.
-func valToPointer(v reflect.Value) pointer {
-	return pointer{v: v}
-}
-
-// offset converts from a pointer to a structure to a pointer to
-// one of its fields.
-func (p pointer) offset(f field) pointer {
-	return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
-}
-
-func (p pointer) isNil() bool {
-	return p.v.IsNil()
-}
-
-// grow updates the slice s in place to make it one element longer.
-// s must be addressable.
-// Returns the (addressable) new element.
-func grow(s reflect.Value) reflect.Value {
-	n, m := s.Len(), s.Cap()
-	if n < m {
-		s.SetLen(n + 1)
-	} else {
-		s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
-	}
-	return s.Index(n)
-}
-
-func (p pointer) toInt64() *int64 {
-	return p.v.Interface().(*int64)
-}
-func (p pointer) toInt64Ptr() **int64 {
-	return p.v.Interface().(**int64)
-}
-func (p pointer) toInt64Slice() *[]int64 {
-	return p.v.Interface().(*[]int64)
-}
-
-var int32ptr = reflect.TypeOf((*int32)(nil))
-
-func (p pointer) toInt32() *int32 {
-	return p.v.Convert(int32ptr).Interface().(*int32)
-}
-
-// The toInt32Ptr/Slice methods don't work because of enums.
-// Instead, we must use set/get methods for the int32ptr/slice case.
-/*
-	func (p pointer) toInt32Ptr() **int32 {
-		return p.v.Interface().(**int32)
-}
-	func (p pointer) toInt32Slice() *[]int32 {
-		return p.v.Interface().(*[]int32)
-}
-*/
-func (p pointer) getInt32Ptr() *int32 {
-	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
-		// raw int32 type
-		return p.v.Elem().Interface().(*int32)
-	}
-	// an enum
-	return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
-}
-func (p pointer) setInt32Ptr(v int32) {
-	// Allocate value in a *int32. Possibly convert that to a *enum.
-	// Then assign it to a **int32 or **enum.
-	// Note: we can convert *int32 to *enum, but we can't convert
-	// **int32 to **enum!
-	p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
-}
-
-// getInt32Slice copies []int32 from p as a new slice.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) getInt32Slice() []int32 {
-	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
-		// raw int32 type
-		return p.v.Elem().Interface().([]int32)
-	}
-	// an enum
-	// Allocate a []int32, then assign []enum's values into it.
-	// Note: we can't convert []enum to []int32.
-	slice := p.v.Elem()
-	s := make([]int32, slice.Len())
-	for i := 0; i < slice.Len(); i++ {
-		s[i] = int32(slice.Index(i).Int())
-	}
-	return s
-}
-
-// setInt32Slice copies []int32 into p as a new slice.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) setInt32Slice(v []int32) {
-	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
-		// raw int32 type
-		p.v.Elem().Set(reflect.ValueOf(v))
-		return
-	}
-	// an enum
-	// Allocate a []enum, then assign []int32's values into it.
-	// Note: we can't convert []enum to []int32.
-	slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
-	for i, x := range v {
-		slice.Index(i).SetInt(int64(x))
-	}
-	p.v.Elem().Set(slice)
-}
-func (p pointer) appendInt32Slice(v int32) {
-	grow(p.v.Elem()).SetInt(int64(v))
-}
-
-func (p pointer) toUint64() *uint64 {
-	return p.v.Interface().(*uint64)
-}
-func (p pointer) toUint64Ptr() **uint64 {
-	return p.v.Interface().(**uint64)
-}
-func (p pointer) toUint64Slice() *[]uint64 {
-	return p.v.Interface().(*[]uint64)
-}
-func (p pointer) toUint32() *uint32 {
-	return p.v.Interface().(*uint32)
-}
-func (p pointer) toUint32Ptr() **uint32 {
-	return p.v.Interface().(**uint32)
-}
-func (p pointer) toUint32Slice() *[]uint32 {
-	return p.v.Interface().(*[]uint32)
-}
-func (p pointer) toBool() *bool {
-	return p.v.Interface().(*bool)
-}
-func (p pointer) toBoolPtr() **bool {
-	return p.v.Interface().(**bool)
-}
-func (p pointer) toBoolSlice() *[]bool {
-	return p.v.Interface().(*[]bool)
-}
-func (p pointer) toFloat64() *float64 {
-	return p.v.Interface().(*float64)
-}
-func (p pointer) toFloat64Ptr() **float64 {
-	return p.v.Interface().(**float64)
-}
-func (p pointer) toFloat64Slice() *[]float64 {
-	return p.v.Interface().(*[]float64)
-}
-func (p pointer) toFloat32() *float32 {
-	return p.v.Interface().(*float32)
-}
-func (p pointer) toFloat32Ptr() **float32 {
-	return p.v.Interface().(**float32)
-}
-func (p pointer) toFloat32Slice() *[]float32 {
-	return p.v.Interface().(*[]float32)
-}
-func (p pointer) toString() *string {
-	return p.v.Interface().(*string)
-}
-func (p pointer) toStringPtr() **string {
-	return p.v.Interface().(**string)
-}
-func (p pointer) toStringSlice() *[]string {
-	return p.v.Interface().(*[]string)
-}
-func (p pointer) toBytes() *[]byte {
-	return p.v.Interface().(*[]byte)
-}
-func (p pointer) toBytesSlice() *[][]byte {
-	return p.v.Interface().(*[][]byte)
-}
-func (p pointer) toExtensions() *XXX_InternalExtensions {
-	return p.v.Interface().(*XXX_InternalExtensions)
-}
-func (p pointer) toOldExtensions() *map[int32]Extension {
-	return p.v.Interface().(*map[int32]Extension)
-}
-func (p pointer) getPointer() pointer {
-	return pointer{v: p.v.Elem()}
-}
-func (p pointer) setPointer(q pointer) {
-	p.v.Elem().Set(q.v)
-}
-func (p pointer) appendPointer(q pointer) {
-	grow(p.v.Elem()).Set(q.v)
-}
-
-// getPointerSlice copies []*T from p as a new []pointer.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) getPointerSlice() []pointer {
-	if p.v.IsNil() {
-		return nil
-	}
-	n := p.v.Elem().Len()
-	s := make([]pointer, n)
-	for i := 0; i < n; i++ {
-		s[i] = pointer{v: p.v.Elem().Index(i)}
-	}
-	return s
-}
-
-// setPointerSlice copies []pointer into p as a new []*T.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) setPointerSlice(v []pointer) {
-	if v == nil {
-		p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
-		return
-	}
-	s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
-	for _, p := range v {
-		s = reflect.Append(s, p.v)
-	}
-	p.v.Elem().Set(s)
-}
-
-// getInterfacePointer returns a pointer that points to the
-// interface data of the interface pointed by p.
-func (p pointer) getInterfacePointer() pointer {
-	if p.v.Elem().IsNil() {
-		return pointer{v: p.v.Elem()}
-	}
-	return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
-}
-
-func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
-	// TODO: check that p.v.Type().Elem() == t?
-	return p.v
-}
-
-func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
-	atomicLock.Lock()
-	defer atomicLock.Unlock()
-	return *p
-}
-func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
-	atomicLock.Lock()
-	defer atomicLock.Unlock()
-	*p = v
-}
-func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
-	atomicLock.Lock()
-	defer atomicLock.Unlock()
-	return *p
-}
-func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
-	atomicLock.Lock()
-	defer atomicLock.Unlock()
-	*p = v
-}
-func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
-	atomicLock.Lock()
-	defer atomicLock.Unlock()
-	return *p
-}
-func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
-	atomicLock.Lock()
-	defer atomicLock.Unlock()
-	*p = v
-}
-func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
-	atomicLock.Lock()
-	defer atomicLock.Unlock()
-	return *p
-}
-func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
-	atomicLock.Lock()
-	defer atomicLock.Unlock()
-	*p = v
-}
-
-var atomicLock sync.Mutex
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
deleted file mode 100644
index dbfffe0..0000000
--- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
+++ /dev/null
@@ -1,313 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2012 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// +build !purego,!appengine,!js
-
-// This file contains the implementation of the proto field accesses using package unsafe.
-
-package proto
-
-import (
-	"reflect"
-	"sync/atomic"
-	"unsafe"
-)
-
-const unsafeAllowed = true
-
-// A field identifies a field in a struct, accessible from a pointer.
-// In this implementation, a field is identified by its byte offset from the start of the struct.
-type field uintptr
-
-// toField returns a field equivalent to the given reflect field.
-func toField(f *reflect.StructField) field {
-	return field(f.Offset)
-}
-
-// invalidField is an invalid field identifier.
-const invalidField = ^field(0)
-
-// zeroField is a noop when calling pointer.offset.
-const zeroField = field(0)
-
-// IsValid reports whether the field identifier is valid.
-func (f field) IsValid() bool {
-	return f != invalidField
-}
-
-// The pointer type below is for the new table-driven encoder/decoder.
-// The implementation here uses unsafe.Pointer to create a generic pointer.
-// In pointer_reflect.go we use reflect instead of unsafe to implement
-// the same (but slower) interface.
-type pointer struct {
-	p unsafe.Pointer
-}
-
-// size of pointer
-var ptrSize = unsafe.Sizeof(uintptr(0))
-
-// toPointer converts an interface of pointer type to a pointer
-// that points to the same target.
-func toPointer(i *Message) pointer {
-	// Super-tricky - read pointer out of data word of interface value.
-	// Saves ~25ns over the equivalent:
-	// return valToPointer(reflect.ValueOf(*i))
-	return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
-}
-
-// toAddrPointer converts an interface to a pointer that points to
-// the interface data.
-func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
-	// Super-tricky - read or get the address of data word of interface value.
-	if isptr {
-		// The interface is of pointer type, thus it is a direct interface.
-		// The data word is the pointer data itself. We take its address.
-		p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
-	} else {
-		// The interface is not of pointer type. The data word is the pointer
-		// to the data.
-		p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
-	}
-	if deref {
-		p.p = *(*unsafe.Pointer)(p.p)
-	}
-	return p
-}
-
-// valToPointer converts v to a pointer. v must be of pointer type.
-func valToPointer(v reflect.Value) pointer {
-	return pointer{p: unsafe.Pointer(v.Pointer())}
-}
-
-// offset converts from a pointer to a structure to a pointer to
-// one of its fields.
-func (p pointer) offset(f field) pointer {
-	// For safety, we should panic if !f.IsValid, however calling panic causes
-	// this to no longer be inlineable, which is a serious performance cost.
-	/*
-		if !f.IsValid() {
-			panic("invalid field")
-		}
-	*/
-	return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
-}
-
-func (p pointer) isNil() bool {
-	return p.p == nil
-}
-
-func (p pointer) toInt64() *int64 {
-	return (*int64)(p.p)
-}
-func (p pointer) toInt64Ptr() **int64 {
-	return (**int64)(p.p)
-}
-func (p pointer) toInt64Slice() *[]int64 {
-	return (*[]int64)(p.p)
-}
-func (p pointer) toInt32() *int32 {
-	return (*int32)(p.p)
-}
-
-// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
-/*
-	func (p pointer) toInt32Ptr() **int32 {
-		return (**int32)(p.p)
-	}
-	func (p pointer) toInt32Slice() *[]int32 {
-		return (*[]int32)(p.p)
-	}
-*/
-func (p pointer) getInt32Ptr() *int32 {
-	return *(**int32)(p.p)
-}
-func (p pointer) setInt32Ptr(v int32) {
-	*(**int32)(p.p) = &v
-}
-
-// getInt32Slice loads a []int32 from p.
-// The value returned is aliased with the original slice.
-// This behavior differs from the implementation in pointer_reflect.go.
-func (p pointer) getInt32Slice() []int32 {
-	return *(*[]int32)(p.p)
-}
-
-// setInt32Slice stores a []int32 to p.
-// The value set is aliased with the input slice.
-// This behavior differs from the implementation in pointer_reflect.go.
-func (p pointer) setInt32Slice(v []int32) {
-	*(*[]int32)(p.p) = v
-}
-
-// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
-func (p pointer) appendInt32Slice(v int32) {
-	s := (*[]int32)(p.p)
-	*s = append(*s, v)
-}
-
-func (p pointer) toUint64() *uint64 {
-	return (*uint64)(p.p)
-}
-func (p pointer) toUint64Ptr() **uint64 {
-	return (**uint64)(p.p)
-}
-func (p pointer) toUint64Slice() *[]uint64 {
-	return (*[]uint64)(p.p)
-}
-func (p pointer) toUint32() *uint32 {
-	return (*uint32)(p.p)
-}
-func (p pointer) toUint32Ptr() **uint32 {
-	return (**uint32)(p.p)
-}
-func (p pointer) toUint32Slice() *[]uint32 {
-	return (*[]uint32)(p.p)
-}
-func (p pointer) toBool() *bool {
-	return (*bool)(p.p)
-}
-func (p pointer) toBoolPtr() **bool {
-	return (**bool)(p.p)
-}
-func (p pointer) toBoolSlice() *[]bool {
-	return (*[]bool)(p.p)
-}
-func (p pointer) toFloat64() *float64 {
-	return (*float64)(p.p)
-}
-func (p pointer) toFloat64Ptr() **float64 {
-	return (**float64)(p.p)
-}
-func (p pointer) toFloat64Slice() *[]float64 {
-	return (*[]float64)(p.p)
-}
-func (p pointer) toFloat32() *float32 {
-	return (*float32)(p.p)
-}
-func (p pointer) toFloat32Ptr() **float32 {
-	return (**float32)(p.p)
-}
-func (p pointer) toFloat32Slice() *[]float32 {
-	return (*[]float32)(p.p)
-}
-func (p pointer) toString() *string {
-	return (*string)(p.p)
-}
-func (p pointer) toStringPtr() **string {
-	return (**string)(p.p)
-}
-func (p pointer) toStringSlice() *[]string {
-	return (*[]string)(p.p)
-}
-func (p pointer) toBytes() *[]byte {
-	return (*[]byte)(p.p)
-}
-func (p pointer) toBytesSlice() *[][]byte {
-	return (*[][]byte)(p.p)
-}
-func (p pointer) toExtensions() *XXX_InternalExtensions {
-	return (*XXX_InternalExtensions)(p.p)
-}
-func (p pointer) toOldExtensions() *map[int32]Extension {
-	return (*map[int32]Extension)(p.p)
-}
-
-// getPointerSlice loads []*T from p as a []pointer.
-// The value returned is aliased with the original slice.
-// This behavior differs from the implementation in pointer_reflect.go.
-func (p pointer) getPointerSlice() []pointer {
-	// Super-tricky - p should point to a []*T where T is a
-	// message type. We load it as []pointer.
-	return *(*[]pointer)(p.p)
-}
-
-// setPointerSlice stores []pointer into p as a []*T.
-// The value set is aliased with the input slice.
-// This behavior differs from the implementation in pointer_reflect.go.
-func (p pointer) setPointerSlice(v []pointer) {
-	// Super-tricky - p should point to a []*T where T is a
-	// message type. We store it as []pointer.
-	*(*[]pointer)(p.p) = v
-}
-
-// getPointer loads the pointer at p and returns it.
-func (p pointer) getPointer() pointer {
-	return pointer{p: *(*unsafe.Pointer)(p.p)}
-}
-
-// setPointer stores the pointer q at p.
-func (p pointer) setPointer(q pointer) {
-	*(*unsafe.Pointer)(p.p) = q.p
-}
-
-// append q to the slice pointed to by p.
-func (p pointer) appendPointer(q pointer) {
-	s := (*[]unsafe.Pointer)(p.p)
-	*s = append(*s, q.p)
-}
-
-// getInterfacePointer returns a pointer that points to the
-// interface data of the interface pointed by p.
-func (p pointer) getInterfacePointer() pointer {
-	// Super-tricky - read pointer out of data word of interface value.
-	return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
-}
-
-// asPointerTo returns a reflect.Value that is a pointer to an
-// object of type t stored at p.
-func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
-	return reflect.NewAt(t, p.p)
-}
-
-func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
-	return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
-	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}
-func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
-	return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
-	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}
-func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
-	return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
-	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}
-func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
-	return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
-	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
index a4b8c0c..dcdc220 100644
--- a/vendor/github.com/golang/protobuf/proto/properties.go
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -1,162 +1,104 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
 
 package proto
 
-/*
- * Routines for encoding data into the wire format for protocol buffers.
- */
-
 import (
 	"fmt"
-	"log"
 	"reflect"
-	"sort"
 	"strconv"
 	"strings"
 	"sync"
+
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/runtime/protoimpl"
 )
 
-const debug bool = false
-
-// Constants that identify the encoding of a value on the wire.
-const (
-	WireVarint     = 0
-	WireFixed64    = 1
-	WireBytes      = 2
-	WireStartGroup = 3
-	WireEndGroup   = 4
-	WireFixed32    = 5
-)
-
-// tagMap is an optimization over map[int]int for typical protocol buffer
-// use-cases. Encoded protocol buffers are often in tag order with small tag
-// numbers.
-type tagMap struct {
-	fastTags []int
-	slowTags map[int]int
-}
-
-// tagMapFastLimit is the upper bound on the tag number that will be stored in
-// the tagMap slice rather than its map.
-const tagMapFastLimit = 1024
-
-func (p *tagMap) get(t int) (int, bool) {
-	if t > 0 && t < tagMapFastLimit {
-		if t >= len(p.fastTags) {
-			return 0, false
-		}
-		fi := p.fastTags[t]
-		return fi, fi >= 0
-	}
-	fi, ok := p.slowTags[t]
-	return fi, ok
-}
-
-func (p *tagMap) put(t int, fi int) {
-	if t > 0 && t < tagMapFastLimit {
-		for len(p.fastTags) < t+1 {
-			p.fastTags = append(p.fastTags, -1)
-		}
-		p.fastTags[t] = fi
-		return
-	}
-	if p.slowTags == nil {
-		p.slowTags = make(map[int]int)
-	}
-	p.slowTags[t] = fi
-}
-
-// StructProperties represents properties for all the fields of a struct.
-// decoderTags and decoderOrigNames should only be used by the decoder.
+// StructProperties represents protocol buffer type information for a
+// generated protobuf message in the open-struct API.
+//
+// Deprecated: Do not use.
 type StructProperties struct {
-	Prop             []*Properties  // properties for each field
-	reqCount         int            // required count
-	decoderTags      tagMap         // map from proto tag to struct field number
-	decoderOrigNames map[string]int // map from original name to struct field number
-	order            []int          // list of struct field numbers in tag order
+	// Prop are the properties for each field.
+	//
+	// Fields belonging to a oneof are stored in OneofTypes instead, with a
+	// single Properties representing the parent oneof held here.
+	//
+	// The order of Prop matches the order of fields in the Go struct.
+	// Struct fields that are not related to protobufs have a "XXX_" prefix
+	// in the Properties.Name and must be ignored by the user.
+	Prop []*Properties
 
 	// OneofTypes contains information about the oneof fields in this message.
-	// It is keyed by the original name of a field.
+	// It is keyed by the protobuf field name.
 	OneofTypes map[string]*OneofProperties
 }
 
-// OneofProperties represents information about a specific field in a oneof.
-type OneofProperties struct {
-	Type  reflect.Type // pointer to generated struct type for this oneof field
-	Field int          // struct field number of the containing oneof in the message
-	Prop  *Properties
-}
-
-// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
-// See encode.go, (*Buffer).enc_struct.
-
-func (sp *StructProperties) Len() int { return len(sp.order) }
-func (sp *StructProperties) Less(i, j int) bool {
-	return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
-}
-func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
-
-// Properties represents the protocol-specific behavior of a single struct field.
+// Properties represents the type information for a protobuf message field.
+//
+// Deprecated: Do not use.
 type Properties struct {
-	Name     string // name of the field, for error messages
-	OrigName string // original name before protocol compiler (always set)
-	JSONName string // name to use for JSON; determined by protoc
-	Wire     string
+	// Name is a placeholder name with little meaningful semantic value.
+	// If the name has an "XXX_" prefix, the entire Properties must be ignored.
+	Name string
+	// OrigName is the protobuf field name or oneof name.
+	OrigName string
+	// JSONName is the JSON name for the protobuf field.
+	JSONName string
+	// Enum is a placeholder name for enums.
+	// For historical reasons, this is neither the Go name for the enum,
+	// nor the protobuf name for the enum.
+	Enum string // Deprecated: Do not use.
+	// Weak contains the full name of the weakly referenced message.
+	Weak string
+	// Wire is a string representation of the wire type.
+	Wire string
+	// WireType is the protobuf wire type for the field.
 	WireType int
-	Tag      int
+	// Tag is the protobuf field number.
+	Tag int
+	// Required reports whether this is a required field.
 	Required bool
+	// Optional reports whether this is a optional field.
 	Optional bool
+	// Repeated reports whether this is a repeated field.
 	Repeated bool
-	Packed   bool   // relevant for repeated primitives only
-	Enum     string // set for enum types only
-	proto3   bool   // whether this is known to be a proto3 field
-	oneof    bool   // whether this is a oneof field
+	// Packed reports whether this is a packed repeated field of scalars.
+	Packed bool
+	// Proto3 reports whether this field operates under the proto3 syntax.
+	Proto3 bool
+	// Oneof reports whether this field belongs within a oneof.
+	Oneof bool
 
-	Default    string // default value
-	HasDefault bool   // whether an explicit default was provided
+	// Default is the default value in string form.
+	Default string
+	// HasDefault reports whether the field has a default value.
+	HasDefault bool
 
-	stype reflect.Type      // set for struct types only
-	sprop *StructProperties // set for struct types only
+	// MapKeyProp is the properties for the key field for a map field.
+	MapKeyProp *Properties
+	// MapValProp is the properties for the value field for a map field.
+	MapValProp *Properties
+}
 
-	mtype      reflect.Type // set for map types only
-	MapKeyProp *Properties  // set for map types only
-	MapValProp *Properties  // set for map types only
+// OneofProperties represents the type information for a protobuf oneof.
+//
+// Deprecated: Do not use.
+type OneofProperties struct {
+	// Type is a pointer to the generated wrapper type for the field value.
+	// This is nil for messages that are not in the open-struct API.
+	Type reflect.Type
+	// Field is the index into StructProperties.Prop for the containing oneof.
+	Field int
+	// Prop is the properties for the field.
+	Prop *Properties
 }
 
 // String formats the properties in the protobuf struct field tag style.
 func (p *Properties) String() string {
 	s := p.Wire
-	s += ","
-	s += strconv.Itoa(p.Tag)
+	s += "," + strconv.Itoa(p.Tag)
 	if p.Required {
 		s += ",req"
 	}
@@ -170,18 +112,21 @@
 		s += ",packed"
 	}
 	s += ",name=" + p.OrigName
-	if p.JSONName != p.OrigName {
+	if p.JSONName != "" {
 		s += ",json=" + p.JSONName
 	}
-	if p.proto3 {
-		s += ",proto3"
-	}
-	if p.oneof {
-		s += ",oneof"
-	}
 	if len(p.Enum) > 0 {
 		s += ",enum=" + p.Enum
 	}
+	if len(p.Weak) > 0 {
+		s += ",weak=" + p.Weak
+	}
+	if p.Proto3 {
+		s += ",proto3"
+	}
+	if p.Oneof {
+		s += ",oneof"
+	}
 	if p.HasDefault {
 		s += ",def=" + p.Default
 	}
@@ -189,356 +134,173 @@
 }
 
 // Parse populates p by parsing a string in the protobuf struct field tag style.
-func (p *Properties) Parse(s string) {
-	// "bytes,49,opt,name=foo,def=hello!"
-	fields := strings.Split(s, ",") // breaks def=, but handled below.
-	if len(fields) < 2 {
-		log.Printf("proto: tag has too few fields: %q", s)
-		return
-	}
-
-	p.Wire = fields[0]
-	switch p.Wire {
-	case "varint":
-		p.WireType = WireVarint
-	case "fixed32":
-		p.WireType = WireFixed32
-	case "fixed64":
-		p.WireType = WireFixed64
-	case "zigzag32":
-		p.WireType = WireVarint
-	case "zigzag64":
-		p.WireType = WireVarint
-	case "bytes", "group":
-		p.WireType = WireBytes
-		// no numeric converter for non-numeric types
-	default:
-		log.Printf("proto: tag has unknown wire type: %q", s)
-		return
-	}
-
-	var err error
-	p.Tag, err = strconv.Atoi(fields[1])
-	if err != nil {
-		return
-	}
-
-outer:
-	for i := 2; i < len(fields); i++ {
-		f := fields[i]
-		switch {
-		case f == "req":
-			p.Required = true
-		case f == "opt":
+func (p *Properties) Parse(tag string) {
+	// For example: "bytes,49,opt,name=foo,def=hello!"
+	for len(tag) > 0 {
+		i := strings.IndexByte(tag, ',')
+		if i < 0 {
+			i = len(tag)
+		}
+		switch s := tag[:i]; {
+		case strings.HasPrefix(s, "name="):
+			p.OrigName = s[len("name="):]
+		case strings.HasPrefix(s, "json="):
+			p.JSONName = s[len("json="):]
+		case strings.HasPrefix(s, "enum="):
+			p.Enum = s[len("enum="):]
+		case strings.HasPrefix(s, "weak="):
+			p.Weak = s[len("weak="):]
+		case strings.Trim(s, "0123456789") == "":
+			n, _ := strconv.ParseUint(s, 10, 32)
+			p.Tag = int(n)
+		case s == "opt":
 			p.Optional = true
-		case f == "rep":
+		case s == "req":
+			p.Required = true
+		case s == "rep":
 			p.Repeated = true
-		case f == "packed":
+		case s == "varint" || s == "zigzag32" || s == "zigzag64":
+			p.Wire = s
+			p.WireType = WireVarint
+		case s == "fixed32":
+			p.Wire = s
+			p.WireType = WireFixed32
+		case s == "fixed64":
+			p.Wire = s
+			p.WireType = WireFixed64
+		case s == "bytes":
+			p.Wire = s
+			p.WireType = WireBytes
+		case s == "group":
+			p.Wire = s
+			p.WireType = WireStartGroup
+		case s == "packed":
 			p.Packed = true
-		case strings.HasPrefix(f, "name="):
-			p.OrigName = f[5:]
-		case strings.HasPrefix(f, "json="):
-			p.JSONName = f[5:]
-		case strings.HasPrefix(f, "enum="):
-			p.Enum = f[5:]
-		case f == "proto3":
-			p.proto3 = true
-		case f == "oneof":
-			p.oneof = true
-		case strings.HasPrefix(f, "def="):
+		case s == "proto3":
+			p.Proto3 = true
+		case s == "oneof":
+			p.Oneof = true
+		case strings.HasPrefix(s, "def="):
+			// The default tag is special in that everything afterwards is the
+			// default regardless of the presence of commas.
 			p.HasDefault = true
-			p.Default = f[4:] // rest of string
-			if i+1 < len(fields) {
-				// Commas aren't escaped, and def is always last.
-				p.Default += "," + strings.Join(fields[i+1:], ",")
-				break outer
-			}
+			p.Default, i = tag[len("def="):], len(tag)
 		}
+		tag = strings.TrimPrefix(tag[i:], ",")
 	}
 }
 
-var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
-
-// setFieldProps initializes the field properties for submessages and maps.
-func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
-	switch t1 := typ; t1.Kind() {
-	case reflect.Ptr:
-		if t1.Elem().Kind() == reflect.Struct {
-			p.stype = t1.Elem()
-		}
-
-	case reflect.Slice:
-		if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
-			p.stype = t2.Elem()
-		}
-
-	case reflect.Map:
-		p.mtype = t1
-		p.MapKeyProp = &Properties{}
-		p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
-		p.MapValProp = &Properties{}
-		vtype := p.mtype.Elem()
-		if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
-			// The value type is not a message (*T) or bytes ([]byte),
-			// so we need encoders for the pointer to this type.
-			vtype = reflect.PtrTo(vtype)
-		}
-		p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
-	}
-
-	if p.stype != nil {
-		if lockGetProp {
-			p.sprop = GetProperties(p.stype)
-		} else {
-			p.sprop = getPropertiesLocked(p.stype)
-		}
-	}
-}
-
-var (
-	marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
-)
-
 // Init populates the properties from a protocol buffer struct tag.
+//
+// Deprecated: Do not use.
 func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
-	p.init(typ, name, tag, f, true)
-}
-
-func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
-	// "bytes,49,opt,def=hello!"
 	p.Name = name
 	p.OrigName = name
 	if tag == "" {
 		return
 	}
 	p.Parse(tag)
-	p.setFieldProps(typ, f, lockGetProp)
+
+	if typ != nil && typ.Kind() == reflect.Map {
+		p.MapKeyProp = new(Properties)
+		p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil)
+		p.MapValProp = new(Properties)
+		p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil)
+	}
 }
 
-var (
-	propertiesMu  sync.RWMutex
-	propertiesMap = make(map[reflect.Type]*StructProperties)
-)
+var propertiesCache sync.Map // map[reflect.Type]*StructProperties
 
-// GetProperties returns the list of properties for the type represented by t.
-// t must represent a generated struct type of a protocol message.
+// GetProperties returns the list of properties for the type represented by t,
+// which must be a generated protocol buffer message in the open-struct API,
+// where protobuf message fields are represented by exported Go struct fields.
+//
+// Deprecated: Use protobuf reflection instead.
 func GetProperties(t reflect.Type) *StructProperties {
-	if t.Kind() != reflect.Struct {
-		panic("proto: type must have kind struct")
+	if p, ok := propertiesCache.Load(t); ok {
+		return p.(*StructProperties)
 	}
-
-	// Most calls to GetProperties in a long-running program will be
-	// retrieving details for types we have seen before.
-	propertiesMu.RLock()
-	sprop, ok := propertiesMap[t]
-	propertiesMu.RUnlock()
-	if ok {
-		return sprop
-	}
-
-	propertiesMu.Lock()
-	sprop = getPropertiesLocked(t)
-	propertiesMu.Unlock()
-	return sprop
+	p, _ := propertiesCache.LoadOrStore(t, newProperties(t))
+	return p.(*StructProperties)
 }
 
-type (
-	oneofFuncsIface interface {
-		XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
-	}
-	oneofWrappersIface interface {
-		XXX_OneofWrappers() []interface{}
-	}
-)
-
-// getPropertiesLocked requires that propertiesMu is held.
-func getPropertiesLocked(t reflect.Type) *StructProperties {
-	if prop, ok := propertiesMap[t]; ok {
-		return prop
+func newProperties(t reflect.Type) *StructProperties {
+	if t.Kind() != reflect.Struct {
+		panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
 	}
 
+	var hasOneof bool
 	prop := new(StructProperties)
-	// in case of recursive protos, fill this in now.
-	propertiesMap[t] = prop
 
-	// build properties
-	prop.Prop = make([]*Properties, t.NumField())
-	prop.order = make([]int, t.NumField())
-
+	// Construct a list of properties for each field in the struct.
 	for i := 0; i < t.NumField(); i++ {
-		f := t.Field(i)
 		p := new(Properties)
-		name := f.Name
-		p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+		f := t.Field(i)
+		tagField := f.Tag.Get("protobuf")
+		p.Init(f.Type, f.Name, tagField, &f)
 
-		oneof := f.Tag.Get("protobuf_oneof") // special case
-		if oneof != "" {
-			// Oneof fields don't use the traditional protobuf tag.
-			p.OrigName = oneof
+		tagOneof := f.Tag.Get("protobuf_oneof")
+		if tagOneof != "" {
+			hasOneof = true
+			p.OrigName = tagOneof
 		}
-		prop.Prop[i] = p
-		prop.order[i] = i
-		if debug {
-			print(i, " ", f.Name, " ", t.String(), " ")
-			if p.Tag > 0 {
-				print(p.String())
+
+		// Rename unrelated struct fields with the "XXX_" prefix since so much
+		// user code simply checks for this to exclude special fields.
+		if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") {
+			p.Name = "XXX_" + p.Name
+			p.OrigName = "XXX_" + p.OrigName
+		} else if p.Weak != "" {
+			p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field
+		}
+
+		prop.Prop = append(prop.Prop, p)
+	}
+
+	// Construct a mapping of oneof field names to properties.
+	if hasOneof {
+		var oneofWrappers []interface{}
+		if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
+			oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{})
+		}
+		if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
+			oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{})
+		}
+		if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok {
+			if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok {
+				oneofWrappers = m.ProtoMessageInfo().OneofWrappers
 			}
-			print("\n")
 		}
-	}
 
-	// Re-order prop.order.
-	sort.Sort(prop)
-
-	var oots []interface{}
-	switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
-	case oneofFuncsIface:
-		_, _, _, oots = m.XXX_OneofFuncs()
-	case oneofWrappersIface:
-		oots = m.XXX_OneofWrappers()
-	}
-	if len(oots) > 0 {
-		// Interpret oneof metadata.
 		prop.OneofTypes = make(map[string]*OneofProperties)
-		for _, oot := range oots {
-			oop := &OneofProperties{
-				Type: reflect.ValueOf(oot).Type(), // *T
+		for _, wrapper := range oneofWrappers {
+			p := &OneofProperties{
+				Type: reflect.ValueOf(wrapper).Type(), // *T
 				Prop: new(Properties),
 			}
-			sft := oop.Type.Elem().Field(0)
-			oop.Prop.Name = sft.Name
-			oop.Prop.Parse(sft.Tag.Get("protobuf"))
-			// There will be exactly one interface field that
-			// this new value is assignable to.
-			for i := 0; i < t.NumField(); i++ {
-				f := t.Field(i)
-				if f.Type.Kind() != reflect.Interface {
-					continue
-				}
-				if !oop.Type.AssignableTo(f.Type) {
-					continue
-				}
-				oop.Field = i
-				break
-			}
-			prop.OneofTypes[oop.Prop.OrigName] = oop
-		}
-	}
+			f := p.Type.Elem().Field(0)
+			p.Prop.Name = f.Name
+			p.Prop.Parse(f.Tag.Get("protobuf"))
 
-	// build required counts
-	// build tags
-	reqCount := 0
-	prop.decoderOrigNames = make(map[string]int)
-	for i, p := range prop.Prop {
-		if strings.HasPrefix(p.Name, "XXX_") {
-			// Internal fields should not appear in tags/origNames maps.
-			// They are handled specially when encoding and decoding.
-			continue
+			// Determine the struct field that contains this oneof.
+			// Each wrapper is assignable to exactly one parent field.
+			var foundOneof bool
+			for i := 0; i < t.NumField() && !foundOneof; i++ {
+				if p.Type.AssignableTo(t.Field(i).Type) {
+					p.Field = i
+					foundOneof = true
+				}
+			}
+			if !foundOneof {
+				panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
+			}
+			prop.OneofTypes[p.Prop.OrigName] = p
 		}
-		if p.Required {
-			reqCount++
-		}
-		prop.decoderTags.put(p.Tag, i)
-		prop.decoderOrigNames[p.OrigName] = i
 	}
-	prop.reqCount = reqCount
 
 	return prop
 }
 
-// A global registry of enum types.
-// The generated code will register the generated maps by calling RegisterEnum.
-
-var enumValueMaps = make(map[string]map[string]int32)
-
-// RegisterEnum is called from the generated code to install the enum descriptor
-// maps into the global table to aid parsing text format protocol buffers.
-func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
-	if _, ok := enumValueMaps[typeName]; ok {
-		panic("proto: duplicate enum registered: " + typeName)
-	}
-	enumValueMaps[typeName] = valueMap
-}
-
-// EnumValueMap returns the mapping from names to integers of the
-// enum type enumType, or a nil if not found.
-func EnumValueMap(enumType string) map[string]int32 {
-	return enumValueMaps[enumType]
-}
-
-// A registry of all linked message types.
-// The string is a fully-qualified proto name ("pkg.Message").
-var (
-	protoTypedNils = make(map[string]Message)      // a map from proto names to typed nil pointers
-	protoMapTypes  = make(map[string]reflect.Type) // a map from proto names to map types
-	revProtoTypes  = make(map[reflect.Type]string)
-)
-
-// RegisterType is called from generated code and maps from the fully qualified
-// proto name to the type (pointer to struct) of the protocol buffer.
-func RegisterType(x Message, name string) {
-	if _, ok := protoTypedNils[name]; ok {
-		// TODO: Some day, make this a panic.
-		log.Printf("proto: duplicate proto type registered: %s", name)
-		return
-	}
-	t := reflect.TypeOf(x)
-	if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
-		// Generated code always calls RegisterType with nil x.
-		// This check is just for extra safety.
-		protoTypedNils[name] = x
-	} else {
-		protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
-	}
-	revProtoTypes[t] = name
-}
-
-// RegisterMapType is called from generated code and maps from the fully qualified
-// proto name to the native map type of the proto map definition.
-func RegisterMapType(x interface{}, name string) {
-	if reflect.TypeOf(x).Kind() != reflect.Map {
-		panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
-	}
-	if _, ok := protoMapTypes[name]; ok {
-		log.Printf("proto: duplicate proto type registered: %s", name)
-		return
-	}
-	t := reflect.TypeOf(x)
-	protoMapTypes[name] = t
-	revProtoTypes[t] = name
-}
-
-// MessageName returns the fully-qualified proto name for the given message type.
-func MessageName(x Message) string {
-	type xname interface {
-		XXX_MessageName() string
-	}
-	if m, ok := x.(xname); ok {
-		return m.XXX_MessageName()
-	}
-	return revProtoTypes[reflect.TypeOf(x)]
-}
-
-// MessageType returns the message type (pointer to struct) for a named message.
-// The type is not guaranteed to implement proto.Message if the name refers to a
-// map entry.
-func MessageType(name string) reflect.Type {
-	if t, ok := protoTypedNils[name]; ok {
-		return reflect.TypeOf(t)
-	}
-	return protoMapTypes[name]
-}
-
-// A registry of all linked proto files.
-var (
-	protoFiles = make(map[string][]byte) // file name => fileDescriptor
-)
-
-// RegisterFile is called from generated code and maps from the
-// full file name of a .proto file to its compressed FileDescriptorProto.
-func RegisterFile(filename string, fileDescriptor []byte) {
-	protoFiles[filename] = fileDescriptor
-}
-
-// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
-func FileDescriptor(filename string) []byte { return protoFiles[filename] }
+func (sp *StructProperties) Len() int           { return len(sp.Prop) }
+func (sp *StructProperties) Less(i, j int) bool { return false }
+func (sp *StructProperties) Swap(i, j int)      { return }
diff --git a/vendor/github.com/golang/protobuf/proto/proto.go b/vendor/github.com/golang/protobuf/proto/proto.go
new file mode 100644
index 0000000..5aee89c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/proto.go
@@ -0,0 +1,167 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package proto provides functionality for handling protocol buffer messages.
+// In particular, it provides marshaling and unmarshaling between a protobuf
+// message and the binary wire format.
+//
+// See https://developers.google.com/protocol-buffers/docs/gotutorial for
+// more information.
+//
+// Deprecated: Use the "google.golang.org/protobuf/proto" package instead.
+package proto
+
+import (
+	protoV2 "google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/runtime/protoiface"
+	"google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+	ProtoPackageIsVersion1 = true
+	ProtoPackageIsVersion2 = true
+	ProtoPackageIsVersion3 = true
+	ProtoPackageIsVersion4 = true
+)
+
+// GeneratedEnum is any enum type generated by protoc-gen-go
+// which is a named int32 kind.
+// This type exists for documentation purposes.
+type GeneratedEnum interface{}
+
+// GeneratedMessage is any message type generated by protoc-gen-go
+// which is a pointer to a named struct kind.
+// This type exists for documentation purposes.
+type GeneratedMessage interface{}
+
+// Message is a protocol buffer message.
+//
+// This is the v1 version of the message interface and is marginally better
+// than an empty interface as it lacks any method to programatically interact
+// with the contents of the message.
+//
+// A v2 message is declared in "google.golang.org/protobuf/proto".Message and
+// exposes protobuf reflection as a first-class feature of the interface.
+//
+// To convert a v1 message to a v2 message, use the MessageV2 function.
+// To convert a v2 message to a v1 message, use the MessageV1 function.
+type Message = protoiface.MessageV1
+
+// MessageV1 converts either a v1 or v2 message to a v1 message.
+// It returns nil if m is nil.
+func MessageV1(m GeneratedMessage) protoiface.MessageV1 {
+	return protoimpl.X.ProtoMessageV1Of(m)
+}
+
+// MessageV2 converts either a v1 or v2 message to a v2 message.
+// It returns nil if m is nil.
+func MessageV2(m GeneratedMessage) protoV2.Message {
+	return protoimpl.X.ProtoMessageV2Of(m)
+}
+
+// MessageReflect returns a reflective view for a message.
+// It returns nil if m is nil.
+func MessageReflect(m Message) protoreflect.Message {
+	return protoimpl.X.MessageOf(m)
+}
+
+// Marshaler is implemented by messages that can marshal themselves.
+// This interface is used by the following functions: Size, Marshal,
+// Buffer.Marshal, and Buffer.EncodeMessage.
+//
+// Deprecated: Do not implement.
+type Marshaler interface {
+	// Marshal formats the encoded bytes of the message.
+	// It should be deterministic and emit valid protobuf wire data.
+	// The caller takes ownership of the returned buffer.
+	Marshal() ([]byte, error)
+}
+
+// Unmarshaler is implemented by messages that can unmarshal themselves.
+// This interface is used by the following functions: Unmarshal, UnmarshalMerge,
+// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup.
+//
+// Deprecated: Do not implement.
+type Unmarshaler interface {
+	// Unmarshal parses the encoded bytes of the protobuf wire input.
+	// The provided buffer is only valid for during method call.
+	// It should not reset the receiver message.
+	Unmarshal([]byte) error
+}
+
+// Merger is implemented by messages that can merge themselves.
+// This interface is used by the following functions: Clone and Merge.
+//
+// Deprecated: Do not implement.
+type Merger interface {
+	// Merge merges the contents of src into the receiver message.
+	// It clones all data structures in src such that it aliases no mutable
+	// memory referenced by src.
+	Merge(src Message)
+}
+
+// RequiredNotSetError is an error type returned when
+// marshaling or unmarshaling a message with missing required fields.
+type RequiredNotSetError struct {
+	err error
+}
+
+func (e *RequiredNotSetError) Error() string {
+	if e.err != nil {
+		return e.err.Error()
+	}
+	return "proto: required field not set"
+}
+func (e *RequiredNotSetError) RequiredNotSet() bool {
+	return true
+}
+
+func checkRequiredNotSet(m protoV2.Message) error {
+	if err := protoV2.CheckInitialized(m); err != nil {
+		return &RequiredNotSetError{err: err}
+	}
+	return nil
+}
+
+// Clone returns a deep copy of src.
+func Clone(src Message) Message {
+	return MessageV1(protoV2.Clone(MessageV2(src)))
+}
+
+// Merge merges src into dst, which must be messages of the same type.
+//
+// Populated scalar fields in src are copied to dst, while populated
+// singular messages in src are merged into dst by recursively calling Merge.
+// The elements of every list field in src is appended to the corresponded
+// list fields in dst. The entries of every map field in src is copied into
+// the corresponding map field in dst, possibly replacing existing entries.
+// The unknown fields of src are appended to the unknown fields of dst.
+func Merge(dst, src Message) {
+	protoV2.Merge(MessageV2(dst), MessageV2(src))
+}
+
+// Equal reports whether two messages are equal.
+// If two messages marshal to the same bytes under deterministic serialization,
+// then Equal is guaranteed to report true.
+//
+// Two messages are equal if they are the same protobuf message type,
+// have the same set of populated known and extension field values,
+// and the same set of unknown fields values.
+//
+// Scalar values are compared with the equivalent of the == operator in Go,
+// except bytes values which are compared using bytes.Equal and
+// floating point values which specially treat NaNs as equal.
+// Message values are compared by recursively calling Equal.
+// Lists are equal if each element value is also equal.
+// Maps are equal if they have the same set of keys, where the pair of values
+// for each key is also equal.
+func Equal(x, y Message) bool {
+	return protoV2.Equal(MessageV2(x), MessageV2(y))
+}
+
+func isMessageSet(md protoreflect.MessageDescriptor) bool {
+	ms, ok := md.(interface{ IsMessageSet() bool })
+	return ok && ms.IsMessageSet()
+}
diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go
new file mode 100644
index 0000000..066b432
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/registry.go
@@ -0,0 +1,317 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+	"bytes"
+	"compress/gzip"
+	"fmt"
+	"io/ioutil"
+	"reflect"
+	"strings"
+	"sync"
+
+	"google.golang.org/protobuf/reflect/protodesc"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+	"google.golang.org/protobuf/runtime/protoimpl"
+)
+
+// filePath is the path to the proto source file.
+type filePath = string // e.g., "google/protobuf/descriptor.proto"
+
+// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto.
+type fileDescGZIP = []byte
+
+var fileCache sync.Map // map[filePath]fileDescGZIP
+
+// RegisterFile is called from generated code to register the compressed
+// FileDescriptorProto with the file path for a proto source file.
+//
+// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead.
+func RegisterFile(s filePath, d fileDescGZIP) {
+	// Decompress the descriptor.
+	zr, err := gzip.NewReader(bytes.NewReader(d))
+	if err != nil {
+		panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
+	}
+	b, err := ioutil.ReadAll(zr)
+	if err != nil {
+		panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
+	}
+
+	// Construct a protoreflect.FileDescriptor from the raw descriptor.
+	// Note that DescBuilder.Build automatically registers the constructed
+	// file descriptor with the v2 registry.
+	protoimpl.DescBuilder{RawDescriptor: b}.Build()
+
+	// Locally cache the raw descriptor form for the file.
+	fileCache.Store(s, d)
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto given the file path
+// for a proto source file. It returns nil if not found.
+//
+// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead.
+func FileDescriptor(s filePath) fileDescGZIP {
+	if v, ok := fileCache.Load(s); ok {
+		return v.(fileDescGZIP)
+	}
+
+	// Find the descriptor in the v2 registry.
+	var b []byte
+	if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
+		b, _ = Marshal(protodesc.ToFileDescriptorProto(fd))
+	}
+
+	// Locally cache the raw descriptor form for the file.
+	if len(b) > 0 {
+		v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b))
+		return v.(fileDescGZIP)
+	}
+	return nil
+}
+
+// enumName is the name of an enum. For historical reasons, the enum name is
+// neither the full Go name nor the full protobuf name of the enum.
+// The name is the dot-separated combination of just the proto package that the
+// enum is declared within followed by the Go type name of the generated enum.
+type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum"
+
+// enumsByName maps enum values by name to their numeric counterpart.
+type enumsByName = map[string]int32
+
+// enumsByNumber maps enum values by number to their name counterpart.
+type enumsByNumber = map[int32]string
+
+var enumCache sync.Map     // map[enumName]enumsByName
+var numFilesCache sync.Map // map[protoreflect.FullName]int
+
+// RegisterEnum is called from the generated code to register the mapping of
+// enum value names to enum numbers for the enum identified by s.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead.
+func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) {
+	if _, ok := enumCache.Load(s); ok {
+		panic("proto: duplicate enum registered: " + s)
+	}
+	enumCache.Store(s, m)
+
+	// This does not forward registration to the v2 registry since this API
+	// lacks sufficient information to construct a complete v2 enum descriptor.
+}
+
+// EnumValueMap returns the mapping from enum value names to enum numbers for
+// the enum of the given name. It returns nil if not found.
+//
+// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead.
+func EnumValueMap(s enumName) enumsByName {
+	if v, ok := enumCache.Load(s); ok {
+		return v.(enumsByName)
+	}
+
+	// Check whether the cache is stale. If the number of files in the current
+	// package differs, then it means that some enums may have been recently
+	// registered upstream that we do not know about.
+	var protoPkg protoreflect.FullName
+	if i := strings.LastIndexByte(s, '.'); i >= 0 {
+		protoPkg = protoreflect.FullName(s[:i])
+	}
+	v, _ := numFilesCache.Load(protoPkg)
+	numFiles, _ := v.(int)
+	if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles {
+		return nil // cache is up-to-date; was not found earlier
+	}
+
+	// Update the enum cache for all enums declared in the given proto package.
+	numFiles = 0
+	protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool {
+		walkEnums(fd, func(ed protoreflect.EnumDescriptor) {
+			name := protoimpl.X.LegacyEnumName(ed)
+			if _, ok := enumCache.Load(name); !ok {
+				m := make(enumsByName)
+				evs := ed.Values()
+				for i := evs.Len() - 1; i >= 0; i-- {
+					ev := evs.Get(i)
+					m[string(ev.Name())] = int32(ev.Number())
+				}
+				enumCache.LoadOrStore(name, m)
+			}
+		})
+		numFiles++
+		return true
+	})
+	numFilesCache.Store(protoPkg, numFiles)
+
+	// Check cache again for enum map.
+	if v, ok := enumCache.Load(s); ok {
+		return v.(enumsByName)
+	}
+	return nil
+}
+
+// walkEnums recursively walks all enums declared in d.
+func walkEnums(d interface {
+	Enums() protoreflect.EnumDescriptors
+	Messages() protoreflect.MessageDescriptors
+}, f func(protoreflect.EnumDescriptor)) {
+	eds := d.Enums()
+	for i := eds.Len() - 1; i >= 0; i-- {
+		f(eds.Get(i))
+	}
+	mds := d.Messages()
+	for i := mds.Len() - 1; i >= 0; i-- {
+		walkEnums(mds.Get(i), f)
+	}
+}
+
+// messageName is the full name of protobuf message.
+type messageName = string
+
+var messageTypeCache sync.Map // map[messageName]reflect.Type
+
+// RegisterType is called from generated code to register the message Go type
+// for a message of the given name.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead.
+func RegisterType(m Message, s messageName) {
+	mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s))
+	if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil {
+		panic(err)
+	}
+	messageTypeCache.Store(s, reflect.TypeOf(m))
+}
+
+// RegisterMapType is called from generated code to register the Go map type
+// for a protobuf message representing a map entry.
+//
+// Deprecated: Do not use.
+func RegisterMapType(m interface{}, s messageName) {
+	t := reflect.TypeOf(m)
+	if t.Kind() != reflect.Map {
+		panic(fmt.Sprintf("invalid map kind: %v", t))
+	}
+	if _, ok := messageTypeCache.Load(s); ok {
+		panic(fmt.Errorf("proto: duplicate proto message registered: %s", s))
+	}
+	messageTypeCache.Store(s, t)
+}
+
+// MessageType returns the message type for a named message.
+// It returns nil if not found.
+//
+// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead.
+func MessageType(s messageName) reflect.Type {
+	if v, ok := messageTypeCache.Load(s); ok {
+		return v.(reflect.Type)
+	}
+
+	// Derive the message type from the v2 registry.
+	var t reflect.Type
+	if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil {
+		t = messageGoType(mt)
+	}
+
+	// If we could not get a concrete type, it is possible that it is a
+	// pseudo-message for a map entry.
+	if t == nil {
+		d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s))
+		if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() {
+			kt := goTypeForField(md.Fields().ByNumber(1))
+			vt := goTypeForField(md.Fields().ByNumber(2))
+			t = reflect.MapOf(kt, vt)
+		}
+	}
+
+	// Locally cache the message type for the given name.
+	if t != nil {
+		v, _ := messageTypeCache.LoadOrStore(s, t)
+		return v.(reflect.Type)
+	}
+	return nil
+}
+
+func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type {
+	switch k := fd.Kind(); k {
+	case protoreflect.EnumKind:
+		if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil {
+			return enumGoType(et)
+		}
+		return reflect.TypeOf(protoreflect.EnumNumber(0))
+	case protoreflect.MessageKind, protoreflect.GroupKind:
+		if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil {
+			return messageGoType(mt)
+		}
+		return reflect.TypeOf((*protoreflect.Message)(nil)).Elem()
+	default:
+		return reflect.TypeOf(fd.Default().Interface())
+	}
+}
+
+func enumGoType(et protoreflect.EnumType) reflect.Type {
+	return reflect.TypeOf(et.New(0))
+}
+
+func messageGoType(mt protoreflect.MessageType) reflect.Type {
+	return reflect.TypeOf(MessageV1(mt.Zero().Interface()))
+}
+
+// MessageName returns the full protobuf name for the given message type.
+//
+// Deprecated: Use protoreflect.MessageDescriptor.FullName instead.
+func MessageName(m Message) messageName {
+	if m == nil {
+		return ""
+	}
+	if m, ok := m.(interface{ XXX_MessageName() messageName }); ok {
+		return m.XXX_MessageName()
+	}
+	return messageName(protoimpl.X.MessageDescriptorOf(m).FullName())
+}
+
+// RegisterExtension is called from the generated code to register
+// the extension descriptor.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead.
+func RegisterExtension(d *ExtensionDesc) {
+	if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil {
+		panic(err)
+	}
+}
+
+type extensionsByNumber = map[int32]*ExtensionDesc
+
+var extensionCache sync.Map // map[messageName]extensionsByNumber
+
+// RegisteredExtensions returns a map of the registered extensions for the
+// provided protobuf message, indexed by the extension field number.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead.
+func RegisteredExtensions(m Message) extensionsByNumber {
+	// Check whether the cache is stale. If the number of extensions for
+	// the given message differs, then it means that some extensions were
+	// recently registered upstream that we do not know about.
+	s := MessageName(m)
+	v, _ := extensionCache.Load(s)
+	xs, _ := v.(extensionsByNumber)
+	if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) {
+		return xs // cache is up-to-date
+	}
+
+	// Cache is stale, re-compute the extensions map.
+	xs = make(extensionsByNumber)
+	protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool {
+		if xd, ok := xt.(*ExtensionDesc); ok {
+			xs[int32(xt.TypeDescriptor().Number())] = xd
+		} else {
+			// TODO: This implies that the protoreflect.ExtensionType is a
+			// custom type not generated by protoc-gen-go. We could try and
+			// convert the type to an ExtensionDesc.
+		}
+		return true
+	})
+	extensionCache.Store(s, xs)
+	return xs
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go
deleted file mode 100644
index 5cb11fa..0000000
--- a/vendor/github.com/golang/protobuf/proto/table_marshal.go
+++ /dev/null
@@ -1,2776 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
-	"errors"
-	"fmt"
-	"math"
-	"reflect"
-	"sort"
-	"strconv"
-	"strings"
-	"sync"
-	"sync/atomic"
-	"unicode/utf8"
-)
-
-// a sizer takes a pointer to a field and the size of its tag, computes the size of
-// the encoded data.
-type sizer func(pointer, int) int
-
-// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format),
-// marshals the field to the end of the slice, returns the slice and error (if any).
-type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error)
-
-// marshalInfo is the information used for marshaling a message.
-type marshalInfo struct {
-	typ          reflect.Type
-	fields       []*marshalFieldInfo
-	unrecognized field                      // offset of XXX_unrecognized
-	extensions   field                      // offset of XXX_InternalExtensions
-	v1extensions field                      // offset of XXX_extensions
-	sizecache    field                      // offset of XXX_sizecache
-	initialized  int32                      // 0 -- only typ is set, 1 -- fully initialized
-	messageset   bool                       // uses message set wire format
-	hasmarshaler bool                       // has custom marshaler
-	sync.RWMutex                            // protect extElems map, also for initialization
-	extElems     map[int32]*marshalElemInfo // info of extension elements
-}
-
-// marshalFieldInfo is the information used for marshaling a field of a message.
-type marshalFieldInfo struct {
-	field      field
-	wiretag    uint64 // tag in wire format
-	tagsize    int    // size of tag in wire format
-	sizer      sizer
-	marshaler  marshaler
-	isPointer  bool
-	required   bool                              // field is required
-	name       string                            // name of the field, for error reporting
-	oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements
-}
-
-// marshalElemInfo is the information used for marshaling an extension or oneof element.
-type marshalElemInfo struct {
-	wiretag   uint64 // tag in wire format
-	tagsize   int    // size of tag in wire format
-	sizer     sizer
-	marshaler marshaler
-	isptr     bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
-	deref     bool // dereference the pointer before operating on it; implies isptr
-}
-
-var (
-	marshalInfoMap  = map[reflect.Type]*marshalInfo{}
-	marshalInfoLock sync.Mutex
-)
-
-// getMarshalInfo returns the information to marshal a given type of message.
-// The info it returns may not necessarily initialized.
-// t is the type of the message (NOT the pointer to it).
-func getMarshalInfo(t reflect.Type) *marshalInfo {
-	marshalInfoLock.Lock()
-	u, ok := marshalInfoMap[t]
-	if !ok {
-		u = &marshalInfo{typ: t}
-		marshalInfoMap[t] = u
-	}
-	marshalInfoLock.Unlock()
-	return u
-}
-
-// Size is the entry point from generated code,
-// and should be ONLY called by generated code.
-// It computes the size of encoded data of msg.
-// a is a pointer to a place to store cached marshal info.
-func (a *InternalMessageInfo) Size(msg Message) int {
-	u := getMessageMarshalInfo(msg, a)
-	ptr := toPointer(&msg)
-	if ptr.isNil() {
-		// We get here if msg is a typed nil ((*SomeMessage)(nil)),
-		// so it satisfies the interface, and msg == nil wouldn't
-		// catch it. We don't want crash in this case.
-		return 0
-	}
-	return u.size(ptr)
-}
-
-// Marshal is the entry point from generated code,
-// and should be ONLY called by generated code.
-// It marshals msg to the end of b.
-// a is a pointer to a place to store cached marshal info.
-func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) {
-	u := getMessageMarshalInfo(msg, a)
-	ptr := toPointer(&msg)
-	if ptr.isNil() {
-		// We get here if msg is a typed nil ((*SomeMessage)(nil)),
-		// so it satisfies the interface, and msg == nil wouldn't
-		// catch it. We don't want crash in this case.
-		return b, ErrNil
-	}
-	return u.marshal(b, ptr, deterministic)
-}
-
-func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo {
-	// u := a.marshal, but atomically.
-	// We use an atomic here to ensure memory consistency.
-	u := atomicLoadMarshalInfo(&a.marshal)
-	if u == nil {
-		// Get marshal information from type of message.
-		t := reflect.ValueOf(msg).Type()
-		if t.Kind() != reflect.Ptr {
-			panic(fmt.Sprintf("cannot handle non-pointer message type %v", t))
-		}
-		u = getMarshalInfo(t.Elem())
-		// Store it in the cache for later users.
-		// a.marshal = u, but atomically.
-		atomicStoreMarshalInfo(&a.marshal, u)
-	}
-	return u
-}
-
-// size is the main function to compute the size of the encoded data of a message.
-// ptr is the pointer to the message.
-func (u *marshalInfo) size(ptr pointer) int {
-	if atomic.LoadInt32(&u.initialized) == 0 {
-		u.computeMarshalInfo()
-	}
-
-	// If the message can marshal itself, let it do it, for compatibility.
-	// NOTE: This is not efficient.
-	if u.hasmarshaler {
-		m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
-		b, _ := m.Marshal()
-		return len(b)
-	}
-
-	n := 0
-	for _, f := range u.fields {
-		if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
-			// nil pointer always marshals to nothing
-			continue
-		}
-		n += f.sizer(ptr.offset(f.field), f.tagsize)
-	}
-	if u.extensions.IsValid() {
-		e := ptr.offset(u.extensions).toExtensions()
-		if u.messageset {
-			n += u.sizeMessageSet(e)
-		} else {
-			n += u.sizeExtensions(e)
-		}
-	}
-	if u.v1extensions.IsValid() {
-		m := *ptr.offset(u.v1extensions).toOldExtensions()
-		n += u.sizeV1Extensions(m)
-	}
-	if u.unrecognized.IsValid() {
-		s := *ptr.offset(u.unrecognized).toBytes()
-		n += len(s)
-	}
-	// cache the result for use in marshal
-	if u.sizecache.IsValid() {
-		atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n))
-	}
-	return n
-}
-
-// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated),
-// fall back to compute the size.
-func (u *marshalInfo) cachedsize(ptr pointer) int {
-	if u.sizecache.IsValid() {
-		return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32()))
-	}
-	return u.size(ptr)
-}
-
-// marshal is the main function to marshal a message. It takes a byte slice and appends
-// the encoded data to the end of the slice, returns the slice and error (if any).
-// ptr is the pointer to the message.
-// If deterministic is true, map is marshaled in deterministic order.
-func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) {
-	if atomic.LoadInt32(&u.initialized) == 0 {
-		u.computeMarshalInfo()
-	}
-
-	// If the message can marshal itself, let it do it, for compatibility.
-	// NOTE: This is not efficient.
-	if u.hasmarshaler {
-		m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
-		b1, err := m.Marshal()
-		b = append(b, b1...)
-		return b, err
-	}
-
-	var err, errLater error
-	// The old marshaler encodes extensions at beginning.
-	if u.extensions.IsValid() {
-		e := ptr.offset(u.extensions).toExtensions()
-		if u.messageset {
-			b, err = u.appendMessageSet(b, e, deterministic)
-		} else {
-			b, err = u.appendExtensions(b, e, deterministic)
-		}
-		if err != nil {
-			return b, err
-		}
-	}
-	if u.v1extensions.IsValid() {
-		m := *ptr.offset(u.v1extensions).toOldExtensions()
-		b, err = u.appendV1Extensions(b, m, deterministic)
-		if err != nil {
-			return b, err
-		}
-	}
-	for _, f := range u.fields {
-		if f.required {
-			if ptr.offset(f.field).getPointer().isNil() {
-				// Required field is not set.
-				// We record the error but keep going, to give a complete marshaling.
-				if errLater == nil {
-					errLater = &RequiredNotSetError{f.name}
-				}
-				continue
-			}
-		}
-		if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
-			// nil pointer always marshals to nothing
-			continue
-		}
-		b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic)
-		if err != nil {
-			if err1, ok := err.(*RequiredNotSetError); ok {
-				// Required field in submessage is not set.
-				// We record the error but keep going, to give a complete marshaling.
-				if errLater == nil {
-					errLater = &RequiredNotSetError{f.name + "." + err1.field}
-				}
-				continue
-			}
-			if err == errRepeatedHasNil {
-				err = errors.New("proto: repeated field " + f.name + " has nil element")
-			}
-			if err == errInvalidUTF8 {
-				if errLater == nil {
-					fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
-					errLater = &invalidUTF8Error{fullName}
-				}
-				continue
-			}
-			return b, err
-		}
-	}
-	if u.unrecognized.IsValid() {
-		s := *ptr.offset(u.unrecognized).toBytes()
-		b = append(b, s...)
-	}
-	return b, errLater
-}
-
-// computeMarshalInfo initializes the marshal info.
-func (u *marshalInfo) computeMarshalInfo() {
-	u.Lock()
-	defer u.Unlock()
-	if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock
-		return
-	}
-
-	t := u.typ
-	u.unrecognized = invalidField
-	u.extensions = invalidField
-	u.v1extensions = invalidField
-	u.sizecache = invalidField
-
-	// If the message can marshal itself, let it do it, for compatibility.
-	// NOTE: This is not efficient.
-	if reflect.PtrTo(t).Implements(marshalerType) {
-		u.hasmarshaler = true
-		atomic.StoreInt32(&u.initialized, 1)
-		return
-	}
-
-	// get oneof implementers
-	var oneofImplementers []interface{}
-	switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
-	case oneofFuncsIface:
-		_, _, _, oneofImplementers = m.XXX_OneofFuncs()
-	case oneofWrappersIface:
-		oneofImplementers = m.XXX_OneofWrappers()
-	}
-
-	n := t.NumField()
-
-	// deal with XXX fields first
-	for i := 0; i < t.NumField(); i++ {
-		f := t.Field(i)
-		if !strings.HasPrefix(f.Name, "XXX_") {
-			continue
-		}
-		switch f.Name {
-		case "XXX_sizecache":
-			u.sizecache = toField(&f)
-		case "XXX_unrecognized":
-			u.unrecognized = toField(&f)
-		case "XXX_InternalExtensions":
-			u.extensions = toField(&f)
-			u.messageset = f.Tag.Get("protobuf_messageset") == "1"
-		case "XXX_extensions":
-			u.v1extensions = toField(&f)
-		case "XXX_NoUnkeyedLiteral":
-			// nothing to do
-		default:
-			panic("unknown XXX field: " + f.Name)
-		}
-		n--
-	}
-
-	// normal fields
-	fields := make([]marshalFieldInfo, n) // batch allocation
-	u.fields = make([]*marshalFieldInfo, 0, n)
-	for i, j := 0, 0; i < t.NumField(); i++ {
-		f := t.Field(i)
-
-		if strings.HasPrefix(f.Name, "XXX_") {
-			continue
-		}
-		field := &fields[j]
-		j++
-		field.name = f.Name
-		u.fields = append(u.fields, field)
-		if f.Tag.Get("protobuf_oneof") != "" {
-			field.computeOneofFieldInfo(&f, oneofImplementers)
-			continue
-		}
-		if f.Tag.Get("protobuf") == "" {
-			// field has no tag (not in generated message), ignore it
-			u.fields = u.fields[:len(u.fields)-1]
-			j--
-			continue
-		}
-		field.computeMarshalFieldInfo(&f)
-	}
-
-	// fields are marshaled in tag order on the wire.
-	sort.Sort(byTag(u.fields))
-
-	atomic.StoreInt32(&u.initialized, 1)
-}
-
-// helper for sorting fields by tag
-type byTag []*marshalFieldInfo
-
-func (a byTag) Len() int           { return len(a) }
-func (a byTag) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
-func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag }
-
-// getExtElemInfo returns the information to marshal an extension element.
-// The info it returns is initialized.
-func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
-	// get from cache first
-	u.RLock()
-	e, ok := u.extElems[desc.Field]
-	u.RUnlock()
-	if ok {
-		return e
-	}
-
-	t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct
-	tags := strings.Split(desc.Tag, ",")
-	tag, err := strconv.Atoi(tags[1])
-	if err != nil {
-		panic("tag is not an integer")
-	}
-	wt := wiretype(tags[0])
-	if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct {
-		t = t.Elem()
-	}
-	sizer, marshaler := typeMarshaler(t, tags, false, false)
-	var deref bool
-	if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
-		t = reflect.PtrTo(t)
-		deref = true
-	}
-	e = &marshalElemInfo{
-		wiretag:   uint64(tag)<<3 | wt,
-		tagsize:   SizeVarint(uint64(tag) << 3),
-		sizer:     sizer,
-		marshaler: marshaler,
-		isptr:     t.Kind() == reflect.Ptr,
-		deref:     deref,
-	}
-
-	// update cache
-	u.Lock()
-	if u.extElems == nil {
-		u.extElems = make(map[int32]*marshalElemInfo)
-	}
-	u.extElems[desc.Field] = e
-	u.Unlock()
-	return e
-}
-
-// computeMarshalFieldInfo fills up the information to marshal a field.
-func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
-	// parse protobuf tag of the field.
-	// tag has format of "bytes,49,opt,name=foo,def=hello!"
-	tags := strings.Split(f.Tag.Get("protobuf"), ",")
-	if tags[0] == "" {
-		return
-	}
-	tag, err := strconv.Atoi(tags[1])
-	if err != nil {
-		panic("tag is not an integer")
-	}
-	wt := wiretype(tags[0])
-	if tags[2] == "req" {
-		fi.required = true
-	}
-	fi.setTag(f, tag, wt)
-	fi.setMarshaler(f, tags)
-}
-
-func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
-	fi.field = toField(f)
-	fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
-	fi.isPointer = true
-	fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
-	fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
-
-	ityp := f.Type // interface type
-	for _, o := range oneofImplementers {
-		t := reflect.TypeOf(o)
-		if !t.Implements(ityp) {
-			continue
-		}
-		sf := t.Elem().Field(0) // oneof implementer is a struct with a single field
-		tags := strings.Split(sf.Tag.Get("protobuf"), ",")
-		tag, err := strconv.Atoi(tags[1])
-		if err != nil {
-			panic("tag is not an integer")
-		}
-		wt := wiretype(tags[0])
-		sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value
-		fi.oneofElems[t.Elem()] = &marshalElemInfo{
-			wiretag:   uint64(tag)<<3 | wt,
-			tagsize:   SizeVarint(uint64(tag) << 3),
-			sizer:     sizer,
-			marshaler: marshaler,
-		}
-	}
-}
-
-// wiretype returns the wire encoding of the type.
-func wiretype(encoding string) uint64 {
-	switch encoding {
-	case "fixed32":
-		return WireFixed32
-	case "fixed64":
-		return WireFixed64
-	case "varint", "zigzag32", "zigzag64":
-		return WireVarint
-	case "bytes":
-		return WireBytes
-	case "group":
-		return WireStartGroup
-	}
-	panic("unknown wire type " + encoding)
-}
-
-// setTag fills up the tag (in wire format) and its size in the info of a field.
-func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) {
-	fi.field = toField(f)
-	fi.wiretag = uint64(tag)<<3 | wt
-	fi.tagsize = SizeVarint(uint64(tag) << 3)
-}
-
-// setMarshaler fills up the sizer and marshaler in the info of a field.
-func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) {
-	switch f.Type.Kind() {
-	case reflect.Map:
-		// map field
-		fi.isPointer = true
-		fi.sizer, fi.marshaler = makeMapMarshaler(f)
-		return
-	case reflect.Ptr, reflect.Slice:
-		fi.isPointer = true
-	}
-	fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false)
-}
-
-// typeMarshaler returns the sizer and marshaler of a given field.
-// t is the type of the field.
-// tags is the generated "protobuf" tag of the field.
-// If nozero is true, zero value is not marshaled to the wire.
-// If oneof is true, it is a oneof field.
-func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) {
-	encoding := tags[0]
-
-	pointer := false
-	slice := false
-	if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
-		slice = true
-		t = t.Elem()
-	}
-	if t.Kind() == reflect.Ptr {
-		pointer = true
-		t = t.Elem()
-	}
-
-	packed := false
-	proto3 := false
-	validateUTF8 := true
-	for i := 2; i < len(tags); i++ {
-		if tags[i] == "packed" {
-			packed = true
-		}
-		if tags[i] == "proto3" {
-			proto3 = true
-		}
-	}
-	validateUTF8 = validateUTF8 && proto3
-
-	switch t.Kind() {
-	case reflect.Bool:
-		if pointer {
-			return sizeBoolPtr, appendBoolPtr
-		}
-		if slice {
-			if packed {
-				return sizeBoolPackedSlice, appendBoolPackedSlice
-			}
-			return sizeBoolSlice, appendBoolSlice
-		}
-		if nozero {
-			return sizeBoolValueNoZero, appendBoolValueNoZero
-		}
-		return sizeBoolValue, appendBoolValue
-	case reflect.Uint32:
-		switch encoding {
-		case "fixed32":
-			if pointer {
-				return sizeFixed32Ptr, appendFixed32Ptr
-			}
-			if slice {
-				if packed {
-					return sizeFixed32PackedSlice, appendFixed32PackedSlice
-				}
-				return sizeFixed32Slice, appendFixed32Slice
-			}
-			if nozero {
-				return sizeFixed32ValueNoZero, appendFixed32ValueNoZero
-			}
-			return sizeFixed32Value, appendFixed32Value
-		case "varint":
-			if pointer {
-				return sizeVarint32Ptr, appendVarint32Ptr
-			}
-			if slice {
-				if packed {
-					return sizeVarint32PackedSlice, appendVarint32PackedSlice
-				}
-				return sizeVarint32Slice, appendVarint32Slice
-			}
-			if nozero {
-				return sizeVarint32ValueNoZero, appendVarint32ValueNoZero
-			}
-			return sizeVarint32Value, appendVarint32Value
-		}
-	case reflect.Int32:
-		switch encoding {
-		case "fixed32":
-			if pointer {
-				return sizeFixedS32Ptr, appendFixedS32Ptr
-			}
-			if slice {
-				if packed {
-					return sizeFixedS32PackedSlice, appendFixedS32PackedSlice
-				}
-				return sizeFixedS32Slice, appendFixedS32Slice
-			}
-			if nozero {
-				return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero
-			}
-			return sizeFixedS32Value, appendFixedS32Value
-		case "varint":
-			if pointer {
-				return sizeVarintS32Ptr, appendVarintS32Ptr
-			}
-			if slice {
-				if packed {
-					return sizeVarintS32PackedSlice, appendVarintS32PackedSlice
-				}
-				return sizeVarintS32Slice, appendVarintS32Slice
-			}
-			if nozero {
-				return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero
-			}
-			return sizeVarintS32Value, appendVarintS32Value
-		case "zigzag32":
-			if pointer {
-				return sizeZigzag32Ptr, appendZigzag32Ptr
-			}
-			if slice {
-				if packed {
-					return sizeZigzag32PackedSlice, appendZigzag32PackedSlice
-				}
-				return sizeZigzag32Slice, appendZigzag32Slice
-			}
-			if nozero {
-				return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero
-			}
-			return sizeZigzag32Value, appendZigzag32Value
-		}
-	case reflect.Uint64:
-		switch encoding {
-		case "fixed64":
-			if pointer {
-				return sizeFixed64Ptr, appendFixed64Ptr
-			}
-			if slice {
-				if packed {
-					return sizeFixed64PackedSlice, appendFixed64PackedSlice
-				}
-				return sizeFixed64Slice, appendFixed64Slice
-			}
-			if nozero {
-				return sizeFixed64ValueNoZero, appendFixed64ValueNoZero
-			}
-			return sizeFixed64Value, appendFixed64Value
-		case "varint":
-			if pointer {
-				return sizeVarint64Ptr, appendVarint64Ptr
-			}
-			if slice {
-				if packed {
-					return sizeVarint64PackedSlice, appendVarint64PackedSlice
-				}
-				return sizeVarint64Slice, appendVarint64Slice
-			}
-			if nozero {
-				return sizeVarint64ValueNoZero, appendVarint64ValueNoZero
-			}
-			return sizeVarint64Value, appendVarint64Value
-		}
-	case reflect.Int64:
-		switch encoding {
-		case "fixed64":
-			if pointer {
-				return sizeFixedS64Ptr, appendFixedS64Ptr
-			}
-			if slice {
-				if packed {
-					return sizeFixedS64PackedSlice, appendFixedS64PackedSlice
-				}
-				return sizeFixedS64Slice, appendFixedS64Slice
-			}
-			if nozero {
-				return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero
-			}
-			return sizeFixedS64Value, appendFixedS64Value
-		case "varint":
-			if pointer {
-				return sizeVarintS64Ptr, appendVarintS64Ptr
-			}
-			if slice {
-				if packed {
-					return sizeVarintS64PackedSlice, appendVarintS64PackedSlice
-				}
-				return sizeVarintS64Slice, appendVarintS64Slice
-			}
-			if nozero {
-				return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero
-			}
-			return sizeVarintS64Value, appendVarintS64Value
-		case "zigzag64":
-			if pointer {
-				return sizeZigzag64Ptr, appendZigzag64Ptr
-			}
-			if slice {
-				if packed {
-					return sizeZigzag64PackedSlice, appendZigzag64PackedSlice
-				}
-				return sizeZigzag64Slice, appendZigzag64Slice
-			}
-			if nozero {
-				return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero
-			}
-			return sizeZigzag64Value, appendZigzag64Value
-		}
-	case reflect.Float32:
-		if pointer {
-			return sizeFloat32Ptr, appendFloat32Ptr
-		}
-		if slice {
-			if packed {
-				return sizeFloat32PackedSlice, appendFloat32PackedSlice
-			}
-			return sizeFloat32Slice, appendFloat32Slice
-		}
-		if nozero {
-			return sizeFloat32ValueNoZero, appendFloat32ValueNoZero
-		}
-		return sizeFloat32Value, appendFloat32Value
-	case reflect.Float64:
-		if pointer {
-			return sizeFloat64Ptr, appendFloat64Ptr
-		}
-		if slice {
-			if packed {
-				return sizeFloat64PackedSlice, appendFloat64PackedSlice
-			}
-			return sizeFloat64Slice, appendFloat64Slice
-		}
-		if nozero {
-			return sizeFloat64ValueNoZero, appendFloat64ValueNoZero
-		}
-		return sizeFloat64Value, appendFloat64Value
-	case reflect.String:
-		if validateUTF8 {
-			if pointer {
-				return sizeStringPtr, appendUTF8StringPtr
-			}
-			if slice {
-				return sizeStringSlice, appendUTF8StringSlice
-			}
-			if nozero {
-				return sizeStringValueNoZero, appendUTF8StringValueNoZero
-			}
-			return sizeStringValue, appendUTF8StringValue
-		}
-		if pointer {
-			return sizeStringPtr, appendStringPtr
-		}
-		if slice {
-			return sizeStringSlice, appendStringSlice
-		}
-		if nozero {
-			return sizeStringValueNoZero, appendStringValueNoZero
-		}
-		return sizeStringValue, appendStringValue
-	case reflect.Slice:
-		if slice {
-			return sizeBytesSlice, appendBytesSlice
-		}
-		if oneof {
-			// Oneof bytes field may also have "proto3" tag.
-			// We want to marshal it as a oneof field. Do this
-			// check before the proto3 check.
-			return sizeBytesOneof, appendBytesOneof
-		}
-		if proto3 {
-			return sizeBytes3, appendBytes3
-		}
-		return sizeBytes, appendBytes
-	case reflect.Struct:
-		switch encoding {
-		case "group":
-			if slice {
-				return makeGroupSliceMarshaler(getMarshalInfo(t))
-			}
-			return makeGroupMarshaler(getMarshalInfo(t))
-		case "bytes":
-			if slice {
-				return makeMessageSliceMarshaler(getMarshalInfo(t))
-			}
-			return makeMessageMarshaler(getMarshalInfo(t))
-		}
-	}
-	panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding))
-}
-
-// Below are functions to size/marshal a specific type of a field.
-// They are stored in the field's info, and called by function pointers.
-// They have type sizer or marshaler.
-
-func sizeFixed32Value(_ pointer, tagsize int) int {
-	return 4 + tagsize
-}
-func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int {
-	v := *ptr.toUint32()
-	if v == 0 {
-		return 0
-	}
-	return 4 + tagsize
-}
-func sizeFixed32Ptr(ptr pointer, tagsize int) int {
-	p := *ptr.toUint32Ptr()
-	if p == nil {
-		return 0
-	}
-	return 4 + tagsize
-}
-func sizeFixed32Slice(ptr pointer, tagsize int) int {
-	s := *ptr.toUint32Slice()
-	return (4 + tagsize) * len(s)
-}
-func sizeFixed32PackedSlice(ptr pointer, tagsize int) int {
-	s := *ptr.toUint32Slice()
-	if len(s) == 0 {
-		return 0
-	}
-	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
-}
-func sizeFixedS32Value(_ pointer, tagsize int) int {
-	return 4 + tagsize
-}
-func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int {
-	v := *ptr.toInt32()
-	if v == 0 {
-		return 0
-	}
-	return 4 + tagsize
-}
-func sizeFixedS32Ptr(ptr pointer, tagsize int) int {
-	p := ptr.getInt32Ptr()
-	if p == nil {
-		return 0
-	}
-	return 4 + tagsize
-}
-func sizeFixedS32Slice(ptr pointer, tagsize int) int {
-	s := ptr.getInt32Slice()
-	return (4 + tagsize) * len(s)
-}
-func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int {
-	s := ptr.getInt32Slice()
-	if len(s) == 0 {
-		return 0
-	}
-	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
-}
-func sizeFloat32Value(_ pointer, tagsize int) int {
-	return 4 + tagsize
-}
-func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int {
-	v := math.Float32bits(*ptr.toFloat32())
-	if v == 0 {
-		return 0
-	}
-	return 4 + tagsize
-}
-func sizeFloat32Ptr(ptr pointer, tagsize int) int {
-	p := *ptr.toFloat32Ptr()
-	if p == nil {
-		return 0
-	}
-	return 4 + tagsize
-}
-func sizeFloat32Slice(ptr pointer, tagsize int) int {
-	s := *ptr.toFloat32Slice()
-	return (4 + tagsize) * len(s)
-}
-func sizeFloat32PackedSlice(ptr pointer, tagsize int) int {
-	s := *ptr.toFloat32Slice()
-	if len(s) == 0 {
-		return 0
-	}
-	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
-}
-func sizeFixed64Value(_ pointer, tagsize int) int {
-	return 8 + tagsize
-}
-func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int {
-	v := *ptr.toUint64()
-	if v == 0 {
-		return 0
-	}
-	return 8 + tagsize
-}
-func sizeFixed64Ptr(ptr pointer, tagsize int) int {
-	p := *ptr.toUint64Ptr()
-	if p == nil {
-		return 0
-	}
-	return 8 + tagsize
-}
-func sizeFixed64Slice(ptr pointer, tagsize int) int {
-	s := *ptr.toUint64Slice()
-	return (8 + tagsize) * len(s)
-}
-func sizeFixed64PackedSlice(ptr pointer, tagsize int) int {
-	s := *ptr.toUint64Slice()
-	if len(s) == 0 {
-		return 0
-	}
-	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
-}
-func sizeFixedS64Value(_ pointer, tagsize int) int {
-	return 8 + tagsize
-}
-func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int {
-	v := *ptr.toInt64()
-	if v == 0 {
-		return 0
-	}
-	return 8 + tagsize
-}
-func sizeFixedS64Ptr(ptr pointer, tagsize int) int {
-	p := *ptr.toInt64Ptr()
-	if p == nil {
-		return 0
-	}
-	return 8 + tagsize
-}
-func sizeFixedS64Slice(ptr pointer, tagsize int) int {
-	s := *ptr.toInt64Slice()
-	return (8 + tagsize) * len(s)
-}
-func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int {
-	s := *ptr.toInt64Slice()
-	if len(s) == 0 {
-		return 0
-	}
-	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
-}
-func sizeFloat64Value(_ pointer, tagsize int) int {
-	return 8 + tagsize
-}
-func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int {
-	v := math.Float64bits(*ptr.toFloat64())
-	if v == 0 {
-		return 0
-	}
-	return 8 + tagsize
-}
-func sizeFloat64Ptr(ptr pointer, tagsize int) int {
-	p := *ptr.toFloat64Ptr()
-	if p == nil {
-		return 0
-	}
-	return 8 + tagsize
-}
-func sizeFloat64Slice(ptr pointer, tagsize int) int {
-	s := *ptr.toFloat64Slice()
-	return (8 + tagsize) * len(s)
-}
-func sizeFloat64PackedSlice(ptr pointer, tagsize int) int {
-	s := *ptr.toFloat64Slice()
-	if len(s) == 0 {
-		return 0
-	}
-	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
-}
-func sizeVarint32Value(ptr pointer, tagsize int) int {
-	v := *ptr.toUint32()
-	return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int {
-	v := *ptr.toUint32()
-	if v == 0 {
-		return 0
-	}
-	return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarint32Ptr(ptr pointer, tagsize int) int {
-	p := *ptr.toUint32Ptr()
-	if p == nil {
-		return 0
-	}
-	return SizeVarint(uint64(*p)) + tagsize
-}
-func sizeVarint32Slice(ptr pointer, tagsize int) int {
-	s := *ptr.toUint32Slice()
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64(v)) + tagsize
-	}
-	return n
-}
-func sizeVarint32PackedSlice(ptr pointer, tagsize int) int {
-	s := *ptr.toUint32Slice()
-	if len(s) == 0 {
-		return 0
-	}
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64(v))
-	}
-	return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeVarintS32Value(ptr pointer, tagsize int) int {
-	v := *ptr.toInt32()
-	return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int {
-	v := *ptr.toInt32()
-	if v == 0 {
-		return 0
-	}
-	return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarintS32Ptr(ptr pointer, tagsize int) int {
-	p := ptr.getInt32Ptr()
-	if p == nil {
-		return 0
-	}
-	return SizeVarint(uint64(*p)) + tagsize
-}
-func sizeVarintS32Slice(ptr pointer, tagsize int) int {
-	s := ptr.getInt32Slice()
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64(v)) + tagsize
-	}
-	return n
-}
-func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int {
-	s := ptr.getInt32Slice()
-	if len(s) == 0 {
-		return 0
-	}
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64(v))
-	}
-	return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeVarint64Value(ptr pointer, tagsize int) int {
-	v := *ptr.toUint64()
-	return SizeVarint(v) + tagsize
-}
-func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int {
-	v := *ptr.toUint64()
-	if v == 0 {
-		return 0
-	}
-	return SizeVarint(v) + tagsize
-}
-func sizeVarint64Ptr(ptr pointer, tagsize int) int {
-	p := *ptr.toUint64Ptr()
-	if p == nil {
-		return 0
-	}
-	return SizeVarint(*p) + tagsize
-}
-func sizeVarint64Slice(ptr pointer, tagsize int) int {
-	s := *ptr.toUint64Slice()
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(v) + tagsize
-	}
-	return n
-}
-func sizeVarint64PackedSlice(ptr pointer, tagsize int) int {
-	s := *ptr.toUint64Slice()
-	if len(s) == 0 {
-		return 0
-	}
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(v)
-	}
-	return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeVarintS64Value(ptr pointer, tagsize int) int {
-	v := *ptr.toInt64()
-	return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int {
-	v := *ptr.toInt64()
-	if v == 0 {
-		return 0
-	}
-	return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarintS64Ptr(ptr pointer, tagsize int) int {
-	p := *ptr.toInt64Ptr()
-	if p == nil {
-		return 0
-	}
-	return SizeVarint(uint64(*p)) + tagsize
-}
-func sizeVarintS64Slice(ptr pointer, tagsize int) int {
-	s := *ptr.toInt64Slice()
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64(v)) + tagsize
-	}
-	return n
-}
-func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int {
-	s := *ptr.toInt64Slice()
-	if len(s) == 0 {
-		return 0
-	}
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64(v))
-	}
-	return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeZigzag32Value(ptr pointer, tagsize int) int {
-	v := *ptr.toInt32()
-	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
-}
-func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int {
-	v := *ptr.toInt32()
-	if v == 0 {
-		return 0
-	}
-	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
-}
-func sizeZigzag32Ptr(ptr pointer, tagsize int) int {
-	p := ptr.getInt32Ptr()
-	if p == nil {
-		return 0
-	}
-	v := *p
-	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
-}
-func sizeZigzag32Slice(ptr pointer, tagsize int) int {
-	s := ptr.getInt32Slice()
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
-	}
-	return n
-}
-func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int {
-	s := ptr.getInt32Slice()
-	if len(s) == 0 {
-		return 0
-	}
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
-	}
-	return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeZigzag64Value(ptr pointer, tagsize int) int {
-	v := *ptr.toInt64()
-	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
-}
-func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int {
-	v := *ptr.toInt64()
-	if v == 0 {
-		return 0
-	}
-	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
-}
-func sizeZigzag64Ptr(ptr pointer, tagsize int) int {
-	p := *ptr.toInt64Ptr()
-	if p == nil {
-		return 0
-	}
-	v := *p
-	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
-}
-func sizeZigzag64Slice(ptr pointer, tagsize int) int {
-	s := *ptr.toInt64Slice()
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
-	}
-	return n
-}
-func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int {
-	s := *ptr.toInt64Slice()
-	if len(s) == 0 {
-		return 0
-	}
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
-	}
-	return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeBoolValue(_ pointer, tagsize int) int {
-	return 1 + tagsize
-}
-func sizeBoolValueNoZero(ptr pointer, tagsize int) int {
-	v := *ptr.toBool()
-	if !v {
-		return 0
-	}
-	return 1 + tagsize
-}
-func sizeBoolPtr(ptr pointer, tagsize int) int {
-	p := *ptr.toBoolPtr()
-	if p == nil {
-		return 0
-	}
-	return 1 + tagsize
-}
-func sizeBoolSlice(ptr pointer, tagsize int) int {
-	s := *ptr.toBoolSlice()
-	return (1 + tagsize) * len(s)
-}
-func sizeBoolPackedSlice(ptr pointer, tagsize int) int {
-	s := *ptr.toBoolSlice()
-	if len(s) == 0 {
-		return 0
-	}
-	return len(s) + SizeVarint(uint64(len(s))) + tagsize
-}
-func sizeStringValue(ptr pointer, tagsize int) int {
-	v := *ptr.toString()
-	return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeStringValueNoZero(ptr pointer, tagsize int) int {
-	v := *ptr.toString()
-	if v == "" {
-		return 0
-	}
-	return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeStringPtr(ptr pointer, tagsize int) int {
-	p := *ptr.toStringPtr()
-	if p == nil {
-		return 0
-	}
-	v := *p
-	return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeStringSlice(ptr pointer, tagsize int) int {
-	s := *ptr.toStringSlice()
-	n := 0
-	for _, v := range s {
-		n += len(v) + SizeVarint(uint64(len(v))) + tagsize
-	}
-	return n
-}
-func sizeBytes(ptr pointer, tagsize int) int {
-	v := *ptr.toBytes()
-	if v == nil {
-		return 0
-	}
-	return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeBytes3(ptr pointer, tagsize int) int {
-	v := *ptr.toBytes()
-	if len(v) == 0 {
-		return 0
-	}
-	return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeBytesOneof(ptr pointer, tagsize int) int {
-	v := *ptr.toBytes()
-	return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeBytesSlice(ptr pointer, tagsize int) int {
-	s := *ptr.toBytesSlice()
-	n := 0
-	for _, v := range s {
-		n += len(v) + SizeVarint(uint64(len(v))) + tagsize
-	}
-	return n
-}
-
-// appendFixed32 appends an encoded fixed32 to b.
-func appendFixed32(b []byte, v uint32) []byte {
-	b = append(b,
-		byte(v),
-		byte(v>>8),
-		byte(v>>16),
-		byte(v>>24))
-	return b
-}
-
-// appendFixed64 appends an encoded fixed64 to b.
-func appendFixed64(b []byte, v uint64) []byte {
-	b = append(b,
-		byte(v),
-		byte(v>>8),
-		byte(v>>16),
-		byte(v>>24),
-		byte(v>>32),
-		byte(v>>40),
-		byte(v>>48),
-		byte(v>>56))
-	return b
-}
-
-// appendVarint appends an encoded varint to b.
-func appendVarint(b []byte, v uint64) []byte {
-	// TODO: make 1-byte (maybe 2-byte) case inline-able, once we
-	// have non-leaf inliner.
-	switch {
-	case v < 1<<7:
-		b = append(b, byte(v))
-	case v < 1<<14:
-		b = append(b,
-			byte(v&0x7f|0x80),
-			byte(v>>7))
-	case v < 1<<21:
-		b = append(b,
-			byte(v&0x7f|0x80),
-			byte((v>>7)&0x7f|0x80),
-			byte(v>>14))
-	case v < 1<<28:
-		b = append(b,
-			byte(v&0x7f|0x80),
-			byte((v>>7)&0x7f|0x80),
-			byte((v>>14)&0x7f|0x80),
-			byte(v>>21))
-	case v < 1<<35:
-		b = append(b,
-			byte(v&0x7f|0x80),
-			byte((v>>7)&0x7f|0x80),
-			byte((v>>14)&0x7f|0x80),
-			byte((v>>21)&0x7f|0x80),
-			byte(v>>28))
-	case v < 1<<42:
-		b = append(b,
-			byte(v&0x7f|0x80),
-			byte((v>>7)&0x7f|0x80),
-			byte((v>>14)&0x7f|0x80),
-			byte((v>>21)&0x7f|0x80),
-			byte((v>>28)&0x7f|0x80),
-			byte(v>>35))
-	case v < 1<<49:
-		b = append(b,
-			byte(v&0x7f|0x80),
-			byte((v>>7)&0x7f|0x80),
-			byte((v>>14)&0x7f|0x80),
-			byte((v>>21)&0x7f|0x80),
-			byte((v>>28)&0x7f|0x80),
-			byte((v>>35)&0x7f|0x80),
-			byte(v>>42))
-	case v < 1<<56:
-		b = append(b,
-			byte(v&0x7f|0x80),
-			byte((v>>7)&0x7f|0x80),
-			byte((v>>14)&0x7f|0x80),
-			byte((v>>21)&0x7f|0x80),
-			byte((v>>28)&0x7f|0x80),
-			byte((v>>35)&0x7f|0x80),
-			byte((v>>42)&0x7f|0x80),
-			byte(v>>49))
-	case v < 1<<63:
-		b = append(b,
-			byte(v&0x7f|0x80),
-			byte((v>>7)&0x7f|0x80),
-			byte((v>>14)&0x7f|0x80),
-			byte((v>>21)&0x7f|0x80),
-			byte((v>>28)&0x7f|0x80),
-			byte((v>>35)&0x7f|0x80),
-			byte((v>>42)&0x7f|0x80),
-			byte((v>>49)&0x7f|0x80),
-			byte(v>>56))
-	default:
-		b = append(b,
-			byte(v&0x7f|0x80),
-			byte((v>>7)&0x7f|0x80),
-			byte((v>>14)&0x7f|0x80),
-			byte((v>>21)&0x7f|0x80),
-			byte((v>>28)&0x7f|0x80),
-			byte((v>>35)&0x7f|0x80),
-			byte((v>>42)&0x7f|0x80),
-			byte((v>>49)&0x7f|0x80),
-			byte((v>>56)&0x7f|0x80),
-			1)
-	}
-	return b
-}
-
-func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toUint32()
-	b = appendVarint(b, wiretag)
-	b = appendFixed32(b, v)
-	return b, nil
-}
-func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toUint32()
-	if v == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendFixed32(b, v)
-	return b, nil
-}
-func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	p := *ptr.toUint32Ptr()
-	if p == nil {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendFixed32(b, *p)
-	return b, nil
-}
-func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toUint32Slice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		b = appendFixed32(b, v)
-	}
-	return b, nil
-}
-func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toUint32Slice()
-	if len(s) == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag&^7|WireBytes)
-	b = appendVarint(b, uint64(4*len(s)))
-	for _, v := range s {
-		b = appendFixed32(b, v)
-	}
-	return b, nil
-}
-func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toInt32()
-	b = appendVarint(b, wiretag)
-	b = appendFixed32(b, uint32(v))
-	return b, nil
-}
-func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toInt32()
-	if v == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendFixed32(b, uint32(v))
-	return b, nil
-}
-func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	p := ptr.getInt32Ptr()
-	if p == nil {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendFixed32(b, uint32(*p))
-	return b, nil
-}
-func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := ptr.getInt32Slice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		b = appendFixed32(b, uint32(v))
-	}
-	return b, nil
-}
-func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := ptr.getInt32Slice()
-	if len(s) == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag&^7|WireBytes)
-	b = appendVarint(b, uint64(4*len(s)))
-	for _, v := range s {
-		b = appendFixed32(b, uint32(v))
-	}
-	return b, nil
-}
-func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := math.Float32bits(*ptr.toFloat32())
-	b = appendVarint(b, wiretag)
-	b = appendFixed32(b, v)
-	return b, nil
-}
-func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := math.Float32bits(*ptr.toFloat32())
-	if v == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendFixed32(b, v)
-	return b, nil
-}
-func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	p := *ptr.toFloat32Ptr()
-	if p == nil {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendFixed32(b, math.Float32bits(*p))
-	return b, nil
-}
-func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toFloat32Slice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		b = appendFixed32(b, math.Float32bits(v))
-	}
-	return b, nil
-}
-func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toFloat32Slice()
-	if len(s) == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag&^7|WireBytes)
-	b = appendVarint(b, uint64(4*len(s)))
-	for _, v := range s {
-		b = appendFixed32(b, math.Float32bits(v))
-	}
-	return b, nil
-}
-func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toUint64()
-	b = appendVarint(b, wiretag)
-	b = appendFixed64(b, v)
-	return b, nil
-}
-func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toUint64()
-	if v == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendFixed64(b, v)
-	return b, nil
-}
-func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	p := *ptr.toUint64Ptr()
-	if p == nil {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendFixed64(b, *p)
-	return b, nil
-}
-func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toUint64Slice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		b = appendFixed64(b, v)
-	}
-	return b, nil
-}
-func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toUint64Slice()
-	if len(s) == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag&^7|WireBytes)
-	b = appendVarint(b, uint64(8*len(s)))
-	for _, v := range s {
-		b = appendFixed64(b, v)
-	}
-	return b, nil
-}
-func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toInt64()
-	b = appendVarint(b, wiretag)
-	b = appendFixed64(b, uint64(v))
-	return b, nil
-}
-func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toInt64()
-	if v == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendFixed64(b, uint64(v))
-	return b, nil
-}
-func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	p := *ptr.toInt64Ptr()
-	if p == nil {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendFixed64(b, uint64(*p))
-	return b, nil
-}
-func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toInt64Slice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		b = appendFixed64(b, uint64(v))
-	}
-	return b, nil
-}
-func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toInt64Slice()
-	if len(s) == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag&^7|WireBytes)
-	b = appendVarint(b, uint64(8*len(s)))
-	for _, v := range s {
-		b = appendFixed64(b, uint64(v))
-	}
-	return b, nil
-}
-func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := math.Float64bits(*ptr.toFloat64())
-	b = appendVarint(b, wiretag)
-	b = appendFixed64(b, v)
-	return b, nil
-}
-func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := math.Float64bits(*ptr.toFloat64())
-	if v == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendFixed64(b, v)
-	return b, nil
-}
-func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	p := *ptr.toFloat64Ptr()
-	if p == nil {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendFixed64(b, math.Float64bits(*p))
-	return b, nil
-}
-func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toFloat64Slice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		b = appendFixed64(b, math.Float64bits(v))
-	}
-	return b, nil
-}
-func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toFloat64Slice()
-	if len(s) == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag&^7|WireBytes)
-	b = appendVarint(b, uint64(8*len(s)))
-	for _, v := range s {
-		b = appendFixed64(b, math.Float64bits(v))
-	}
-	return b, nil
-}
-func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toUint32()
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(v))
-	return b, nil
-}
-func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toUint32()
-	if v == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(v))
-	return b, nil
-}
-func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	p := *ptr.toUint32Ptr()
-	if p == nil {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(*p))
-	return b, nil
-}
-func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toUint32Slice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		b = appendVarint(b, uint64(v))
-	}
-	return b, nil
-}
-func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toUint32Slice()
-	if len(s) == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag&^7|WireBytes)
-	// compute size
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64(v))
-	}
-	b = appendVarint(b, uint64(n))
-	for _, v := range s {
-		b = appendVarint(b, uint64(v))
-	}
-	return b, nil
-}
-func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toInt32()
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(v))
-	return b, nil
-}
-func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toInt32()
-	if v == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(v))
-	return b, nil
-}
-func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	p := ptr.getInt32Ptr()
-	if p == nil {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(*p))
-	return b, nil
-}
-func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := ptr.getInt32Slice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		b = appendVarint(b, uint64(v))
-	}
-	return b, nil
-}
-func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := ptr.getInt32Slice()
-	if len(s) == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag&^7|WireBytes)
-	// compute size
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64(v))
-	}
-	b = appendVarint(b, uint64(n))
-	for _, v := range s {
-		b = appendVarint(b, uint64(v))
-	}
-	return b, nil
-}
-func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toUint64()
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, v)
-	return b, nil
-}
-func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toUint64()
-	if v == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, v)
-	return b, nil
-}
-func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	p := *ptr.toUint64Ptr()
-	if p == nil {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, *p)
-	return b, nil
-}
-func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toUint64Slice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		b = appendVarint(b, v)
-	}
-	return b, nil
-}
-func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toUint64Slice()
-	if len(s) == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag&^7|WireBytes)
-	// compute size
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(v)
-	}
-	b = appendVarint(b, uint64(n))
-	for _, v := range s {
-		b = appendVarint(b, v)
-	}
-	return b, nil
-}
-func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toInt64()
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(v))
-	return b, nil
-}
-func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toInt64()
-	if v == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(v))
-	return b, nil
-}
-func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	p := *ptr.toInt64Ptr()
-	if p == nil {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(*p))
-	return b, nil
-}
-func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toInt64Slice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		b = appendVarint(b, uint64(v))
-	}
-	return b, nil
-}
-func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toInt64Slice()
-	if len(s) == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag&^7|WireBytes)
-	// compute size
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64(v))
-	}
-	b = appendVarint(b, uint64(n))
-	for _, v := range s {
-		b = appendVarint(b, uint64(v))
-	}
-	return b, nil
-}
-func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toInt32()
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
-	return b, nil
-}
-func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toInt32()
-	if v == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
-	return b, nil
-}
-func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	p := ptr.getInt32Ptr()
-	if p == nil {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	v := *p
-	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
-	return b, nil
-}
-func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := ptr.getInt32Slice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
-	}
-	return b, nil
-}
-func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := ptr.getInt32Slice()
-	if len(s) == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag&^7|WireBytes)
-	// compute size
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
-	}
-	b = appendVarint(b, uint64(n))
-	for _, v := range s {
-		b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
-	}
-	return b, nil
-}
-func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toInt64()
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
-	return b, nil
-}
-func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toInt64()
-	if v == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
-	return b, nil
-}
-func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	p := *ptr.toInt64Ptr()
-	if p == nil {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	v := *p
-	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
-	return b, nil
-}
-func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toInt64Slice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
-	}
-	return b, nil
-}
-func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toInt64Slice()
-	if len(s) == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag&^7|WireBytes)
-	// compute size
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
-	}
-	b = appendVarint(b, uint64(n))
-	for _, v := range s {
-		b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
-	}
-	return b, nil
-}
-func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toBool()
-	b = appendVarint(b, wiretag)
-	if v {
-		b = append(b, 1)
-	} else {
-		b = append(b, 0)
-	}
-	return b, nil
-}
-func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toBool()
-	if !v {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = append(b, 1)
-	return b, nil
-}
-
-func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	p := *ptr.toBoolPtr()
-	if p == nil {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	if *p {
-		b = append(b, 1)
-	} else {
-		b = append(b, 0)
-	}
-	return b, nil
-}
-func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toBoolSlice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		if v {
-			b = append(b, 1)
-		} else {
-			b = append(b, 0)
-		}
-	}
-	return b, nil
-}
-func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toBoolSlice()
-	if len(s) == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag&^7|WireBytes)
-	b = appendVarint(b, uint64(len(s)))
-	for _, v := range s {
-		if v {
-			b = append(b, 1)
-		} else {
-			b = append(b, 0)
-		}
-	}
-	return b, nil
-}
-func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toString()
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(len(v)))
-	b = append(b, v...)
-	return b, nil
-}
-func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toString()
-	if v == "" {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(len(v)))
-	b = append(b, v...)
-	return b, nil
-}
-func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	p := *ptr.toStringPtr()
-	if p == nil {
-		return b, nil
-	}
-	v := *p
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(len(v)))
-	b = append(b, v...)
-	return b, nil
-}
-func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toStringSlice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		b = appendVarint(b, uint64(len(v)))
-		b = append(b, v...)
-	}
-	return b, nil
-}
-func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	var invalidUTF8 bool
-	v := *ptr.toString()
-	if !utf8.ValidString(v) {
-		invalidUTF8 = true
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(len(v)))
-	b = append(b, v...)
-	if invalidUTF8 {
-		return b, errInvalidUTF8
-	}
-	return b, nil
-}
-func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	var invalidUTF8 bool
-	v := *ptr.toString()
-	if v == "" {
-		return b, nil
-	}
-	if !utf8.ValidString(v) {
-		invalidUTF8 = true
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(len(v)))
-	b = append(b, v...)
-	if invalidUTF8 {
-		return b, errInvalidUTF8
-	}
-	return b, nil
-}
-func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	var invalidUTF8 bool
-	p := *ptr.toStringPtr()
-	if p == nil {
-		return b, nil
-	}
-	v := *p
-	if !utf8.ValidString(v) {
-		invalidUTF8 = true
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(len(v)))
-	b = append(b, v...)
-	if invalidUTF8 {
-		return b, errInvalidUTF8
-	}
-	return b, nil
-}
-func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	var invalidUTF8 bool
-	s := *ptr.toStringSlice()
-	for _, v := range s {
-		if !utf8.ValidString(v) {
-			invalidUTF8 = true
-		}
-		b = appendVarint(b, wiretag)
-		b = appendVarint(b, uint64(len(v)))
-		b = append(b, v...)
-	}
-	if invalidUTF8 {
-		return b, errInvalidUTF8
-	}
-	return b, nil
-}
-func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toBytes()
-	if v == nil {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(len(v)))
-	b = append(b, v...)
-	return b, nil
-}
-func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toBytes()
-	if len(v) == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(len(v)))
-	b = append(b, v...)
-	return b, nil
-}
-func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toBytes()
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(len(v)))
-	b = append(b, v...)
-	return b, nil
-}
-func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toBytesSlice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		b = appendVarint(b, uint64(len(v)))
-		b = append(b, v...)
-	}
-	return b, nil
-}
-
-// makeGroupMarshaler returns the sizer and marshaler for a group.
-// u is the marshal info of the underlying message.
-func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) {
-	return func(ptr pointer, tagsize int) int {
-			p := ptr.getPointer()
-			if p.isNil() {
-				return 0
-			}
-			return u.size(p) + 2*tagsize
-		},
-		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
-			p := ptr.getPointer()
-			if p.isNil() {
-				return b, nil
-			}
-			var err error
-			b = appendVarint(b, wiretag) // start group
-			b, err = u.marshal(b, p, deterministic)
-			b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
-			return b, err
-		}
-}
-
-// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice.
-// u is the marshal info of the underlying message.
-func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
-	return func(ptr pointer, tagsize int) int {
-			s := ptr.getPointerSlice()
-			n := 0
-			for _, v := range s {
-				if v.isNil() {
-					continue
-				}
-				n += u.size(v) + 2*tagsize
-			}
-			return n
-		},
-		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
-			s := ptr.getPointerSlice()
-			var err error
-			var nerr nonFatal
-			for _, v := range s {
-				if v.isNil() {
-					return b, errRepeatedHasNil
-				}
-				b = appendVarint(b, wiretag) // start group
-				b, err = u.marshal(b, v, deterministic)
-				b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
-				if !nerr.Merge(err) {
-					if err == ErrNil {
-						err = errRepeatedHasNil
-					}
-					return b, err
-				}
-			}
-			return b, nerr.E
-		}
-}
-
-// makeMessageMarshaler returns the sizer and marshaler for a message field.
-// u is the marshal info of the message.
-func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) {
-	return func(ptr pointer, tagsize int) int {
-			p := ptr.getPointer()
-			if p.isNil() {
-				return 0
-			}
-			siz := u.size(p)
-			return siz + SizeVarint(uint64(siz)) + tagsize
-		},
-		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
-			p := ptr.getPointer()
-			if p.isNil() {
-				return b, nil
-			}
-			b = appendVarint(b, wiretag)
-			siz := u.cachedsize(p)
-			b = appendVarint(b, uint64(siz))
-			return u.marshal(b, p, deterministic)
-		}
-}
-
-// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice.
-// u is the marshal info of the message.
-func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
-	return func(ptr pointer, tagsize int) int {
-			s := ptr.getPointerSlice()
-			n := 0
-			for _, v := range s {
-				if v.isNil() {
-					continue
-				}
-				siz := u.size(v)
-				n += siz + SizeVarint(uint64(siz)) + tagsize
-			}
-			return n
-		},
-		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
-			s := ptr.getPointerSlice()
-			var err error
-			var nerr nonFatal
-			for _, v := range s {
-				if v.isNil() {
-					return b, errRepeatedHasNil
-				}
-				b = appendVarint(b, wiretag)
-				siz := u.cachedsize(v)
-				b = appendVarint(b, uint64(siz))
-				b, err = u.marshal(b, v, deterministic)
-
-				if !nerr.Merge(err) {
-					if err == ErrNil {
-						err = errRepeatedHasNil
-					}
-					return b, err
-				}
-			}
-			return b, nerr.E
-		}
-}
-
-// makeMapMarshaler returns the sizer and marshaler for a map field.
-// f is the pointer to the reflect data structure of the field.
-func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
-	// figure out key and value type
-	t := f.Type
-	keyType := t.Key()
-	valType := t.Elem()
-	keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",")
-	valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
-	keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map
-	valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map
-	keyWireTag := 1<<3 | wiretype(keyTags[0])
-	valWireTag := 2<<3 | wiretype(valTags[0])
-
-	// We create an interface to get the addresses of the map key and value.
-	// If value is pointer-typed, the interface is a direct interface, the
-	// idata itself is the value. Otherwise, the idata is the pointer to the
-	// value.
-	// Key cannot be pointer-typed.
-	valIsPtr := valType.Kind() == reflect.Ptr
-
-	// If value is a message with nested maps, calling
-	// valSizer in marshal may be quadratic. We should use
-	// cached version in marshal (but not in size).
-	// If value is not message type, we don't have size cache,
-	// but it cannot be nested either. Just use valSizer.
-	valCachedSizer := valSizer
-	if valIsPtr && valType.Elem().Kind() == reflect.Struct {
-		u := getMarshalInfo(valType.Elem())
-		valCachedSizer = func(ptr pointer, tagsize int) int {
-			// Same as message sizer, but use cache.
-			p := ptr.getPointer()
-			if p.isNil() {
-				return 0
-			}
-			siz := u.cachedsize(p)
-			return siz + SizeVarint(uint64(siz)) + tagsize
-		}
-	}
-	return func(ptr pointer, tagsize int) int {
-			m := ptr.asPointerTo(t).Elem() // the map
-			n := 0
-			for _, k := range m.MapKeys() {
-				ki := k.Interface()
-				vi := m.MapIndex(k).Interface()
-				kaddr := toAddrPointer(&ki, false, false)      // pointer to key
-				vaddr := toAddrPointer(&vi, valIsPtr, false)   // pointer to value
-				siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
-				n += siz + SizeVarint(uint64(siz)) + tagsize
-			}
-			return n
-		},
-		func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) {
-			m := ptr.asPointerTo(t).Elem() // the map
-			var err error
-			keys := m.MapKeys()
-			if len(keys) > 1 && deterministic {
-				sort.Sort(mapKeys(keys))
-			}
-
-			var nerr nonFatal
-			for _, k := range keys {
-				ki := k.Interface()
-				vi := m.MapIndex(k).Interface()
-				kaddr := toAddrPointer(&ki, false, false)    // pointer to key
-				vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
-				b = appendVarint(b, tag)
-				siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
-				b = appendVarint(b, uint64(siz))
-				b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
-				if !nerr.Merge(err) {
-					return b, err
-				}
-				b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
-				if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
-					return b, err
-				}
-			}
-			return b, nerr.E
-		}
-}
-
-// makeOneOfMarshaler returns the sizer and marshaler for a oneof field.
-// fi is the marshal info of the field.
-// f is the pointer to the reflect data structure of the field.
-func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) {
-	// Oneof field is an interface. We need to get the actual data type on the fly.
-	t := f.Type
-	return func(ptr pointer, _ int) int {
-			p := ptr.getInterfacePointer()
-			if p.isNil() {
-				return 0
-			}
-			v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
-			telem := v.Type()
-			e := fi.oneofElems[telem]
-			return e.sizer(p, e.tagsize)
-		},
-		func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) {
-			p := ptr.getInterfacePointer()
-			if p.isNil() {
-				return b, nil
-			}
-			v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
-			telem := v.Type()
-			if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() {
-				return b, errOneofHasNil
-			}
-			e := fi.oneofElems[telem]
-			return e.marshaler(b, p, e.wiretag, deterministic)
-		}
-}
-
-// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field.
-func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
-	m, mu := ext.extensionsRead()
-	if m == nil {
-		return 0
-	}
-	mu.Lock()
-
-	n := 0
-	for _, e := range m {
-		if e.value == nil || e.desc == nil {
-			// Extension is only in its encoded form.
-			n += len(e.enc)
-			continue
-		}
-
-		// We don't skip extensions that have an encoded form set,
-		// because the extension value may have been mutated after
-		// the last time this function was called.
-		ei := u.getExtElemInfo(e.desc)
-		v := e.value
-		p := toAddrPointer(&v, ei.isptr, ei.deref)
-		n += ei.sizer(p, ei.tagsize)
-	}
-	mu.Unlock()
-	return n
-}
-
-// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b.
-func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
-	m, mu := ext.extensionsRead()
-	if m == nil {
-		return b, nil
-	}
-	mu.Lock()
-	defer mu.Unlock()
-
-	var err error
-	var nerr nonFatal
-
-	// Fast-path for common cases: zero or one extensions.
-	// Don't bother sorting the keys.
-	if len(m) <= 1 {
-		for _, e := range m {
-			if e.value == nil || e.desc == nil {
-				// Extension is only in its encoded form.
-				b = append(b, e.enc...)
-				continue
-			}
-
-			// We don't skip extensions that have an encoded form set,
-			// because the extension value may have been mutated after
-			// the last time this function was called.
-
-			ei := u.getExtElemInfo(e.desc)
-			v := e.value
-			p := toAddrPointer(&v, ei.isptr, ei.deref)
-			b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
-			if !nerr.Merge(err) {
-				return b, err
-			}
-		}
-		return b, nerr.E
-	}
-
-	// Sort the keys to provide a deterministic encoding.
-	// Not sure this is required, but the old code does it.
-	keys := make([]int, 0, len(m))
-	for k := range m {
-		keys = append(keys, int(k))
-	}
-	sort.Ints(keys)
-
-	for _, k := range keys {
-		e := m[int32(k)]
-		if e.value == nil || e.desc == nil {
-			// Extension is only in its encoded form.
-			b = append(b, e.enc...)
-			continue
-		}
-
-		// We don't skip extensions that have an encoded form set,
-		// because the extension value may have been mutated after
-		// the last time this function was called.
-
-		ei := u.getExtElemInfo(e.desc)
-		v := e.value
-		p := toAddrPointer(&v, ei.isptr, ei.deref)
-		b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
-		if !nerr.Merge(err) {
-			return b, err
-		}
-	}
-	return b, nerr.E
-}
-
-// message set format is:
-//   message MessageSet {
-//     repeated group Item = 1 {
-//       required int32 type_id = 2;
-//       required string message = 3;
-//     };
-//   }
-
-// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field
-// in message set format (above).
-func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
-	m, mu := ext.extensionsRead()
-	if m == nil {
-		return 0
-	}
-	mu.Lock()
-
-	n := 0
-	for id, e := range m {
-		n += 2                          // start group, end group. tag = 1 (size=1)
-		n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1)
-
-		if e.value == nil || e.desc == nil {
-			// Extension is only in its encoded form.
-			msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
-			siz := len(msgWithLen)
-			n += siz + 1 // message, tag = 3 (size=1)
-			continue
-		}
-
-		// We don't skip extensions that have an encoded form set,
-		// because the extension value may have been mutated after
-		// the last time this function was called.
-
-		ei := u.getExtElemInfo(e.desc)
-		v := e.value
-		p := toAddrPointer(&v, ei.isptr, ei.deref)
-		n += ei.sizer(p, 1) // message, tag = 3 (size=1)
-	}
-	mu.Unlock()
-	return n
-}
-
-// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above)
-// to the end of byte slice b.
-func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
-	m, mu := ext.extensionsRead()
-	if m == nil {
-		return b, nil
-	}
-	mu.Lock()
-	defer mu.Unlock()
-
-	var err error
-	var nerr nonFatal
-
-	// Fast-path for common cases: zero or one extensions.
-	// Don't bother sorting the keys.
-	if len(m) <= 1 {
-		for id, e := range m {
-			b = append(b, 1<<3|WireStartGroup)
-			b = append(b, 2<<3|WireVarint)
-			b = appendVarint(b, uint64(id))
-
-			if e.value == nil || e.desc == nil {
-				// Extension is only in its encoded form.
-				msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
-				b = append(b, 3<<3|WireBytes)
-				b = append(b, msgWithLen...)
-				b = append(b, 1<<3|WireEndGroup)
-				continue
-			}
-
-			// We don't skip extensions that have an encoded form set,
-			// because the extension value may have been mutated after
-			// the last time this function was called.
-
-			ei := u.getExtElemInfo(e.desc)
-			v := e.value
-			p := toAddrPointer(&v, ei.isptr, ei.deref)
-			b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
-			if !nerr.Merge(err) {
-				return b, err
-			}
-			b = append(b, 1<<3|WireEndGroup)
-		}
-		return b, nerr.E
-	}
-
-	// Sort the keys to provide a deterministic encoding.
-	keys := make([]int, 0, len(m))
-	for k := range m {
-		keys = append(keys, int(k))
-	}
-	sort.Ints(keys)
-
-	for _, id := range keys {
-		e := m[int32(id)]
-		b = append(b, 1<<3|WireStartGroup)
-		b = append(b, 2<<3|WireVarint)
-		b = appendVarint(b, uint64(id))
-
-		if e.value == nil || e.desc == nil {
-			// Extension is only in its encoded form.
-			msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
-			b = append(b, 3<<3|WireBytes)
-			b = append(b, msgWithLen...)
-			b = append(b, 1<<3|WireEndGroup)
-			continue
-		}
-
-		// We don't skip extensions that have an encoded form set,
-		// because the extension value may have been mutated after
-		// the last time this function was called.
-
-		ei := u.getExtElemInfo(e.desc)
-		v := e.value
-		p := toAddrPointer(&v, ei.isptr, ei.deref)
-		b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
-		b = append(b, 1<<3|WireEndGroup)
-		if !nerr.Merge(err) {
-			return b, err
-		}
-	}
-	return b, nerr.E
-}
-
-// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
-func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
-	if m == nil {
-		return 0
-	}
-
-	n := 0
-	for _, e := range m {
-		if e.value == nil || e.desc == nil {
-			// Extension is only in its encoded form.
-			n += len(e.enc)
-			continue
-		}
-
-		// We don't skip extensions that have an encoded form set,
-		// because the extension value may have been mutated after
-		// the last time this function was called.
-
-		ei := u.getExtElemInfo(e.desc)
-		v := e.value
-		p := toAddrPointer(&v, ei.isptr, ei.deref)
-		n += ei.sizer(p, ei.tagsize)
-	}
-	return n
-}
-
-// appendV1Extensions marshals a V1-API extension field to the end of byte slice b.
-func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) {
-	if m == nil {
-		return b, nil
-	}
-
-	// Sort the keys to provide a deterministic encoding.
-	keys := make([]int, 0, len(m))
-	for k := range m {
-		keys = append(keys, int(k))
-	}
-	sort.Ints(keys)
-
-	var err error
-	var nerr nonFatal
-	for _, k := range keys {
-		e := m[int32(k)]
-		if e.value == nil || e.desc == nil {
-			// Extension is only in its encoded form.
-			b = append(b, e.enc...)
-			continue
-		}
-
-		// We don't skip extensions that have an encoded form set,
-		// because the extension value may have been mutated after
-		// the last time this function was called.
-
-		ei := u.getExtElemInfo(e.desc)
-		v := e.value
-		p := toAddrPointer(&v, ei.isptr, ei.deref)
-		b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
-		if !nerr.Merge(err) {
-			return b, err
-		}
-	}
-	return b, nerr.E
-}
-
-// newMarshaler is the interface representing objects that can marshal themselves.
-//
-// This exists to support protoc-gen-go generated messages.
-// The proto package will stop type-asserting to this interface in the future.
-//
-// DO NOT DEPEND ON THIS.
-type newMarshaler interface {
-	XXX_Size() int
-	XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
-}
-
-// Size returns the encoded size of a protocol buffer message.
-// This is the main entry point.
-func Size(pb Message) int {
-	if m, ok := pb.(newMarshaler); ok {
-		return m.XXX_Size()
-	}
-	if m, ok := pb.(Marshaler); ok {
-		// If the message can marshal itself, let it do it, for compatibility.
-		// NOTE: This is not efficient.
-		b, _ := m.Marshal()
-		return len(b)
-	}
-	// in case somehow we didn't generate the wrapper
-	if pb == nil {
-		return 0
-	}
-	var info InternalMessageInfo
-	return info.Size(pb)
-}
-
-// Marshal takes a protocol buffer message
-// and encodes it into the wire format, returning the data.
-// This is the main entry point.
-func Marshal(pb Message) ([]byte, error) {
-	if m, ok := pb.(newMarshaler); ok {
-		siz := m.XXX_Size()
-		b := make([]byte, 0, siz)
-		return m.XXX_Marshal(b, false)
-	}
-	if m, ok := pb.(Marshaler); ok {
-		// If the message can marshal itself, let it do it, for compatibility.
-		// NOTE: This is not efficient.
-		return m.Marshal()
-	}
-	// in case somehow we didn't generate the wrapper
-	if pb == nil {
-		return nil, ErrNil
-	}
-	var info InternalMessageInfo
-	siz := info.Size(pb)
-	b := make([]byte, 0, siz)
-	return info.Marshal(b, pb, false)
-}
-
-// Marshal takes a protocol buffer message
-// and encodes it into the wire format, writing the result to the
-// Buffer.
-// This is an alternative entry point. It is not necessary to use
-// a Buffer for most applications.
-func (p *Buffer) Marshal(pb Message) error {
-	var err error
-	if m, ok := pb.(newMarshaler); ok {
-		siz := m.XXX_Size()
-		p.grow(siz) // make sure buf has enough capacity
-		p.buf, err = m.XXX_Marshal(p.buf, p.deterministic)
-		return err
-	}
-	if m, ok := pb.(Marshaler); ok {
-		// If the message can marshal itself, let it do it, for compatibility.
-		// NOTE: This is not efficient.
-		b, err := m.Marshal()
-		p.buf = append(p.buf, b...)
-		return err
-	}
-	// in case somehow we didn't generate the wrapper
-	if pb == nil {
-		return ErrNil
-	}
-	var info InternalMessageInfo
-	siz := info.Size(pb)
-	p.grow(siz) // make sure buf has enough capacity
-	p.buf, err = info.Marshal(p.buf, pb, p.deterministic)
-	return err
-}
-
-// grow grows the buffer's capacity, if necessary, to guarantee space for
-// another n bytes. After grow(n), at least n bytes can be written to the
-// buffer without another allocation.
-func (p *Buffer) grow(n int) {
-	need := len(p.buf) + n
-	if need <= cap(p.buf) {
-		return
-	}
-	newCap := len(p.buf) * 2
-	if newCap < need {
-		newCap = need
-	}
-	p.buf = append(make([]byte, 0, newCap), p.buf...)
-}
diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go
deleted file mode 100644
index 5525def..0000000
--- a/vendor/github.com/golang/protobuf/proto/table_merge.go
+++ /dev/null
@@ -1,654 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
-	"fmt"
-	"reflect"
-	"strings"
-	"sync"
-	"sync/atomic"
-)
-
-// Merge merges the src message into dst.
-// This assumes that dst and src of the same type and are non-nil.
-func (a *InternalMessageInfo) Merge(dst, src Message) {
-	mi := atomicLoadMergeInfo(&a.merge)
-	if mi == nil {
-		mi = getMergeInfo(reflect.TypeOf(dst).Elem())
-		atomicStoreMergeInfo(&a.merge, mi)
-	}
-	mi.merge(toPointer(&dst), toPointer(&src))
-}
-
-type mergeInfo struct {
-	typ reflect.Type
-
-	initialized int32 // 0: only typ is valid, 1: everything is valid
-	lock        sync.Mutex
-
-	fields       []mergeFieldInfo
-	unrecognized field // Offset of XXX_unrecognized
-}
-
-type mergeFieldInfo struct {
-	field field // Offset of field, guaranteed to be valid
-
-	// isPointer reports whether the value in the field is a pointer.
-	// This is true for the following situations:
-	//	* Pointer to struct
-	//	* Pointer to basic type (proto2 only)
-	//	* Slice (first value in slice header is a pointer)
-	//	* String (first value in string header is a pointer)
-	isPointer bool
-
-	// basicWidth reports the width of the field assuming that it is directly
-	// embedded in the struct (as is the case for basic types in proto3).
-	// The possible values are:
-	// 	0: invalid
-	//	1: bool
-	//	4: int32, uint32, float32
-	//	8: int64, uint64, float64
-	basicWidth int
-
-	// Where dst and src are pointers to the types being merged.
-	merge func(dst, src pointer)
-}
-
-var (
-	mergeInfoMap  = map[reflect.Type]*mergeInfo{}
-	mergeInfoLock sync.Mutex
-)
-
-func getMergeInfo(t reflect.Type) *mergeInfo {
-	mergeInfoLock.Lock()
-	defer mergeInfoLock.Unlock()
-	mi := mergeInfoMap[t]
-	if mi == nil {
-		mi = &mergeInfo{typ: t}
-		mergeInfoMap[t] = mi
-	}
-	return mi
-}
-
-// merge merges src into dst assuming they are both of type *mi.typ.
-func (mi *mergeInfo) merge(dst, src pointer) {
-	if dst.isNil() {
-		panic("proto: nil destination")
-	}
-	if src.isNil() {
-		return // Nothing to do.
-	}
-
-	if atomic.LoadInt32(&mi.initialized) == 0 {
-		mi.computeMergeInfo()
-	}
-
-	for _, fi := range mi.fields {
-		sfp := src.offset(fi.field)
-
-		// As an optimization, we can avoid the merge function call cost
-		// if we know for sure that the source will have no effect
-		// by checking if it is the zero value.
-		if unsafeAllowed {
-			if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
-				continue
-			}
-			if fi.basicWidth > 0 {
-				switch {
-				case fi.basicWidth == 1 && !*sfp.toBool():
-					continue
-				case fi.basicWidth == 4 && *sfp.toUint32() == 0:
-					continue
-				case fi.basicWidth == 8 && *sfp.toUint64() == 0:
-					continue
-				}
-			}
-		}
-
-		dfp := dst.offset(fi.field)
-		fi.merge(dfp, sfp)
-	}
-
-	// TODO: Make this faster?
-	out := dst.asPointerTo(mi.typ).Elem()
-	in := src.asPointerTo(mi.typ).Elem()
-	if emIn, err := extendable(in.Addr().Interface()); err == nil {
-		emOut, _ := extendable(out.Addr().Interface())
-		mIn, muIn := emIn.extensionsRead()
-		if mIn != nil {
-			mOut := emOut.extensionsWrite()
-			muIn.Lock()
-			mergeExtension(mOut, mIn)
-			muIn.Unlock()
-		}
-	}
-
-	if mi.unrecognized.IsValid() {
-		if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
-			*dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
-		}
-	}
-}
-
-func (mi *mergeInfo) computeMergeInfo() {
-	mi.lock.Lock()
-	defer mi.lock.Unlock()
-	if mi.initialized != 0 {
-		return
-	}
-	t := mi.typ
-	n := t.NumField()
-
-	props := GetProperties(t)
-	for i := 0; i < n; i++ {
-		f := t.Field(i)
-		if strings.HasPrefix(f.Name, "XXX_") {
-			continue
-		}
-
-		mfi := mergeFieldInfo{field: toField(&f)}
-		tf := f.Type
-
-		// As an optimization, we can avoid the merge function call cost
-		// if we know for sure that the source will have no effect
-		// by checking if it is the zero value.
-		if unsafeAllowed {
-			switch tf.Kind() {
-			case reflect.Ptr, reflect.Slice, reflect.String:
-				// As a special case, we assume slices and strings are pointers
-				// since we know that the first field in the SliceSlice or
-				// StringHeader is a data pointer.
-				mfi.isPointer = true
-			case reflect.Bool:
-				mfi.basicWidth = 1
-			case reflect.Int32, reflect.Uint32, reflect.Float32:
-				mfi.basicWidth = 4
-			case reflect.Int64, reflect.Uint64, reflect.Float64:
-				mfi.basicWidth = 8
-			}
-		}
-
-		// Unwrap tf to get at its most basic type.
-		var isPointer, isSlice bool
-		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
-			isSlice = true
-			tf = tf.Elem()
-		}
-		if tf.Kind() == reflect.Ptr {
-			isPointer = true
-			tf = tf.Elem()
-		}
-		if isPointer && isSlice && tf.Kind() != reflect.Struct {
-			panic("both pointer and slice for basic type in " + tf.Name())
-		}
-
-		switch tf.Kind() {
-		case reflect.Int32:
-			switch {
-			case isSlice: // E.g., []int32
-				mfi.merge = func(dst, src pointer) {
-					// NOTE: toInt32Slice is not defined (see pointer_reflect.go).
-					/*
-						sfsp := src.toInt32Slice()
-						if *sfsp != nil {
-							dfsp := dst.toInt32Slice()
-							*dfsp = append(*dfsp, *sfsp...)
-							if *dfsp == nil {
-								*dfsp = []int64{}
-							}
-						}
-					*/
-					sfs := src.getInt32Slice()
-					if sfs != nil {
-						dfs := dst.getInt32Slice()
-						dfs = append(dfs, sfs...)
-						if dfs == nil {
-							dfs = []int32{}
-						}
-						dst.setInt32Slice(dfs)
-					}
-				}
-			case isPointer: // E.g., *int32
-				mfi.merge = func(dst, src pointer) {
-					// NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
-					/*
-						sfpp := src.toInt32Ptr()
-						if *sfpp != nil {
-							dfpp := dst.toInt32Ptr()
-							if *dfpp == nil {
-								*dfpp = Int32(**sfpp)
-							} else {
-								**dfpp = **sfpp
-							}
-						}
-					*/
-					sfp := src.getInt32Ptr()
-					if sfp != nil {
-						dfp := dst.getInt32Ptr()
-						if dfp == nil {
-							dst.setInt32Ptr(*sfp)
-						} else {
-							*dfp = *sfp
-						}
-					}
-				}
-			default: // E.g., int32
-				mfi.merge = func(dst, src pointer) {
-					if v := *src.toInt32(); v != 0 {
-						*dst.toInt32() = v
-					}
-				}
-			}
-		case reflect.Int64:
-			switch {
-			case isSlice: // E.g., []int64
-				mfi.merge = func(dst, src pointer) {
-					sfsp := src.toInt64Slice()
-					if *sfsp != nil {
-						dfsp := dst.toInt64Slice()
-						*dfsp = append(*dfsp, *sfsp...)
-						if *dfsp == nil {
-							*dfsp = []int64{}
-						}
-					}
-				}
-			case isPointer: // E.g., *int64
-				mfi.merge = func(dst, src pointer) {
-					sfpp := src.toInt64Ptr()
-					if *sfpp != nil {
-						dfpp := dst.toInt64Ptr()
-						if *dfpp == nil {
-							*dfpp = Int64(**sfpp)
-						} else {
-							**dfpp = **sfpp
-						}
-					}
-				}
-			default: // E.g., int64
-				mfi.merge = func(dst, src pointer) {
-					if v := *src.toInt64(); v != 0 {
-						*dst.toInt64() = v
-					}
-				}
-			}
-		case reflect.Uint32:
-			switch {
-			case isSlice: // E.g., []uint32
-				mfi.merge = func(dst, src pointer) {
-					sfsp := src.toUint32Slice()
-					if *sfsp != nil {
-						dfsp := dst.toUint32Slice()
-						*dfsp = append(*dfsp, *sfsp...)
-						if *dfsp == nil {
-							*dfsp = []uint32{}
-						}
-					}
-				}
-			case isPointer: // E.g., *uint32
-				mfi.merge = func(dst, src pointer) {
-					sfpp := src.toUint32Ptr()
-					if *sfpp != nil {
-						dfpp := dst.toUint32Ptr()
-						if *dfpp == nil {
-							*dfpp = Uint32(**sfpp)
-						} else {
-							**dfpp = **sfpp
-						}
-					}
-				}
-			default: // E.g., uint32
-				mfi.merge = func(dst, src pointer) {
-					if v := *src.toUint32(); v != 0 {
-						*dst.toUint32() = v
-					}
-				}
-			}
-		case reflect.Uint64:
-			switch {
-			case isSlice: // E.g., []uint64
-				mfi.merge = func(dst, src pointer) {
-					sfsp := src.toUint64Slice()
-					if *sfsp != nil {
-						dfsp := dst.toUint64Slice()
-						*dfsp = append(*dfsp, *sfsp...)
-						if *dfsp == nil {
-							*dfsp = []uint64{}
-						}
-					}
-				}
-			case isPointer: // E.g., *uint64
-				mfi.merge = func(dst, src pointer) {
-					sfpp := src.toUint64Ptr()
-					if *sfpp != nil {
-						dfpp := dst.toUint64Ptr()
-						if *dfpp == nil {
-							*dfpp = Uint64(**sfpp)
-						} else {
-							**dfpp = **sfpp
-						}
-					}
-				}
-			default: // E.g., uint64
-				mfi.merge = func(dst, src pointer) {
-					if v := *src.toUint64(); v != 0 {
-						*dst.toUint64() = v
-					}
-				}
-			}
-		case reflect.Float32:
-			switch {
-			case isSlice: // E.g., []float32
-				mfi.merge = func(dst, src pointer) {
-					sfsp := src.toFloat32Slice()
-					if *sfsp != nil {
-						dfsp := dst.toFloat32Slice()
-						*dfsp = append(*dfsp, *sfsp...)
-						if *dfsp == nil {
-							*dfsp = []float32{}
-						}
-					}
-				}
-			case isPointer: // E.g., *float32
-				mfi.merge = func(dst, src pointer) {
-					sfpp := src.toFloat32Ptr()
-					if *sfpp != nil {
-						dfpp := dst.toFloat32Ptr()
-						if *dfpp == nil {
-							*dfpp = Float32(**sfpp)
-						} else {
-							**dfpp = **sfpp
-						}
-					}
-				}
-			default: // E.g., float32
-				mfi.merge = func(dst, src pointer) {
-					if v := *src.toFloat32(); v != 0 {
-						*dst.toFloat32() = v
-					}
-				}
-			}
-		case reflect.Float64:
-			switch {
-			case isSlice: // E.g., []float64
-				mfi.merge = func(dst, src pointer) {
-					sfsp := src.toFloat64Slice()
-					if *sfsp != nil {
-						dfsp := dst.toFloat64Slice()
-						*dfsp = append(*dfsp, *sfsp...)
-						if *dfsp == nil {
-							*dfsp = []float64{}
-						}
-					}
-				}
-			case isPointer: // E.g., *float64
-				mfi.merge = func(dst, src pointer) {
-					sfpp := src.toFloat64Ptr()
-					if *sfpp != nil {
-						dfpp := dst.toFloat64Ptr()
-						if *dfpp == nil {
-							*dfpp = Float64(**sfpp)
-						} else {
-							**dfpp = **sfpp
-						}
-					}
-				}
-			default: // E.g., float64
-				mfi.merge = func(dst, src pointer) {
-					if v := *src.toFloat64(); v != 0 {
-						*dst.toFloat64() = v
-					}
-				}
-			}
-		case reflect.Bool:
-			switch {
-			case isSlice: // E.g., []bool
-				mfi.merge = func(dst, src pointer) {
-					sfsp := src.toBoolSlice()
-					if *sfsp != nil {
-						dfsp := dst.toBoolSlice()
-						*dfsp = append(*dfsp, *sfsp...)
-						if *dfsp == nil {
-							*dfsp = []bool{}
-						}
-					}
-				}
-			case isPointer: // E.g., *bool
-				mfi.merge = func(dst, src pointer) {
-					sfpp := src.toBoolPtr()
-					if *sfpp != nil {
-						dfpp := dst.toBoolPtr()
-						if *dfpp == nil {
-							*dfpp = Bool(**sfpp)
-						} else {
-							**dfpp = **sfpp
-						}
-					}
-				}
-			default: // E.g., bool
-				mfi.merge = func(dst, src pointer) {
-					if v := *src.toBool(); v {
-						*dst.toBool() = v
-					}
-				}
-			}
-		case reflect.String:
-			switch {
-			case isSlice: // E.g., []string
-				mfi.merge = func(dst, src pointer) {
-					sfsp := src.toStringSlice()
-					if *sfsp != nil {
-						dfsp := dst.toStringSlice()
-						*dfsp = append(*dfsp, *sfsp...)
-						if *dfsp == nil {
-							*dfsp = []string{}
-						}
-					}
-				}
-			case isPointer: // E.g., *string
-				mfi.merge = func(dst, src pointer) {
-					sfpp := src.toStringPtr()
-					if *sfpp != nil {
-						dfpp := dst.toStringPtr()
-						if *dfpp == nil {
-							*dfpp = String(**sfpp)
-						} else {
-							**dfpp = **sfpp
-						}
-					}
-				}
-			default: // E.g., string
-				mfi.merge = func(dst, src pointer) {
-					if v := *src.toString(); v != "" {
-						*dst.toString() = v
-					}
-				}
-			}
-		case reflect.Slice:
-			isProto3 := props.Prop[i].proto3
-			switch {
-			case isPointer:
-				panic("bad pointer in byte slice case in " + tf.Name())
-			case tf.Elem().Kind() != reflect.Uint8:
-				panic("bad element kind in byte slice case in " + tf.Name())
-			case isSlice: // E.g., [][]byte
-				mfi.merge = func(dst, src pointer) {
-					sbsp := src.toBytesSlice()
-					if *sbsp != nil {
-						dbsp := dst.toBytesSlice()
-						for _, sb := range *sbsp {
-							if sb == nil {
-								*dbsp = append(*dbsp, nil)
-							} else {
-								*dbsp = append(*dbsp, append([]byte{}, sb...))
-							}
-						}
-						if *dbsp == nil {
-							*dbsp = [][]byte{}
-						}
-					}
-				}
-			default: // E.g., []byte
-				mfi.merge = func(dst, src pointer) {
-					sbp := src.toBytes()
-					if *sbp != nil {
-						dbp := dst.toBytes()
-						if !isProto3 || len(*sbp) > 0 {
-							*dbp = append([]byte{}, *sbp...)
-						}
-					}
-				}
-			}
-		case reflect.Struct:
-			switch {
-			case !isPointer:
-				panic(fmt.Sprintf("message field %s without pointer", tf))
-			case isSlice: // E.g., []*pb.T
-				mi := getMergeInfo(tf)
-				mfi.merge = func(dst, src pointer) {
-					sps := src.getPointerSlice()
-					if sps != nil {
-						dps := dst.getPointerSlice()
-						for _, sp := range sps {
-							var dp pointer
-							if !sp.isNil() {
-								dp = valToPointer(reflect.New(tf))
-								mi.merge(dp, sp)
-							}
-							dps = append(dps, dp)
-						}
-						if dps == nil {
-							dps = []pointer{}
-						}
-						dst.setPointerSlice(dps)
-					}
-				}
-			default: // E.g., *pb.T
-				mi := getMergeInfo(tf)
-				mfi.merge = func(dst, src pointer) {
-					sp := src.getPointer()
-					if !sp.isNil() {
-						dp := dst.getPointer()
-						if dp.isNil() {
-							dp = valToPointer(reflect.New(tf))
-							dst.setPointer(dp)
-						}
-						mi.merge(dp, sp)
-					}
-				}
-			}
-		case reflect.Map:
-			switch {
-			case isPointer || isSlice:
-				panic("bad pointer or slice in map case in " + tf.Name())
-			default: // E.g., map[K]V
-				mfi.merge = func(dst, src pointer) {
-					sm := src.asPointerTo(tf).Elem()
-					if sm.Len() == 0 {
-						return
-					}
-					dm := dst.asPointerTo(tf).Elem()
-					if dm.IsNil() {
-						dm.Set(reflect.MakeMap(tf))
-					}
-
-					switch tf.Elem().Kind() {
-					case reflect.Ptr: // Proto struct (e.g., *T)
-						for _, key := range sm.MapKeys() {
-							val := sm.MapIndex(key)
-							val = reflect.ValueOf(Clone(val.Interface().(Message)))
-							dm.SetMapIndex(key, val)
-						}
-					case reflect.Slice: // E.g. Bytes type (e.g., []byte)
-						for _, key := range sm.MapKeys() {
-							val := sm.MapIndex(key)
-							val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
-							dm.SetMapIndex(key, val)
-						}
-					default: // Basic type (e.g., string)
-						for _, key := range sm.MapKeys() {
-							val := sm.MapIndex(key)
-							dm.SetMapIndex(key, val)
-						}
-					}
-				}
-			}
-		case reflect.Interface:
-			// Must be oneof field.
-			switch {
-			case isPointer || isSlice:
-				panic("bad pointer or slice in interface case in " + tf.Name())
-			default: // E.g., interface{}
-				// TODO: Make this faster?
-				mfi.merge = func(dst, src pointer) {
-					su := src.asPointerTo(tf).Elem()
-					if !su.IsNil() {
-						du := dst.asPointerTo(tf).Elem()
-						typ := su.Elem().Type()
-						if du.IsNil() || du.Elem().Type() != typ {
-							du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
-						}
-						sv := su.Elem().Elem().Field(0)
-						if sv.Kind() == reflect.Ptr && sv.IsNil() {
-							return
-						}
-						dv := du.Elem().Elem().Field(0)
-						if dv.Kind() == reflect.Ptr && dv.IsNil() {
-							dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
-						}
-						switch sv.Type().Kind() {
-						case reflect.Ptr: // Proto struct (e.g., *T)
-							Merge(dv.Interface().(Message), sv.Interface().(Message))
-						case reflect.Slice: // E.g. Bytes type (e.g., []byte)
-							dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
-						default: // Basic type (e.g., string)
-							dv.Set(sv)
-						}
-					}
-				}
-			}
-		default:
-			panic(fmt.Sprintf("merger not found for type:%s", tf))
-		}
-		mi.fields = append(mi.fields, mfi)
-	}
-
-	mi.unrecognized = invalidField
-	if f, ok := t.FieldByName("XXX_unrecognized"); ok {
-		if f.Type != reflect.TypeOf([]byte{}) {
-			panic("expected XXX_unrecognized to be of type []byte")
-		}
-		mi.unrecognized = toField(&f)
-	}
-
-	atomic.StoreInt32(&mi.initialized, 1)
-}
diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
deleted file mode 100644
index acee2fc..0000000
--- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
+++ /dev/null
@@ -1,2053 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
-	"errors"
-	"fmt"
-	"io"
-	"math"
-	"reflect"
-	"strconv"
-	"strings"
-	"sync"
-	"sync/atomic"
-	"unicode/utf8"
-)
-
-// Unmarshal is the entry point from the generated .pb.go files.
-// This function is not intended to be used by non-generated code.
-// This function is not subject to any compatibility guarantee.
-// msg contains a pointer to a protocol buffer struct.
-// b is the data to be unmarshaled into the protocol buffer.
-// a is a pointer to a place to store cached unmarshal information.
-func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error {
-	// Load the unmarshal information for this message type.
-	// The atomic load ensures memory consistency.
-	u := atomicLoadUnmarshalInfo(&a.unmarshal)
-	if u == nil {
-		// Slow path: find unmarshal info for msg, update a with it.
-		u = getUnmarshalInfo(reflect.TypeOf(msg).Elem())
-		atomicStoreUnmarshalInfo(&a.unmarshal, u)
-	}
-	// Then do the unmarshaling.
-	err := u.unmarshal(toPointer(&msg), b)
-	return err
-}
-
-type unmarshalInfo struct {
-	typ reflect.Type // type of the protobuf struct
-
-	// 0 = only typ field is initialized
-	// 1 = completely initialized
-	initialized     int32
-	lock            sync.Mutex                    // prevents double initialization
-	dense           []unmarshalFieldInfo          // fields indexed by tag #
-	sparse          map[uint64]unmarshalFieldInfo // fields indexed by tag #
-	reqFields       []string                      // names of required fields
-	reqMask         uint64                        // 1<<len(reqFields)-1
-	unrecognized    field                         // offset of []byte to put unrecognized data (or invalidField if we should throw it away)
-	extensions      field                         // offset of extensions field (of type proto.XXX_InternalExtensions), or invalidField if it does not exist
-	oldExtensions   field                         // offset of old-form extensions field (of type map[int]Extension)
-	extensionRanges []ExtensionRange              // if non-nil, implies extensions field is valid
-	isMessageSet    bool                          // if true, implies extensions field is valid
-}
-
-// An unmarshaler takes a stream of bytes and a pointer to a field of a message.
-// It decodes the field, stores it at f, and returns the unused bytes.
-// w is the wire encoding.
-// b is the data after the tag and wire encoding have been read.
-type unmarshaler func(b []byte, f pointer, w int) ([]byte, error)
-
-type unmarshalFieldInfo struct {
-	// location of the field in the proto message structure.
-	field field
-
-	// function to unmarshal the data for the field.
-	unmarshal unmarshaler
-
-	// if a required field, contains a single set bit at this field's index in the required field list.
-	reqMask uint64
-
-	name string // name of the field, for error reporting
-}
-
-var (
-	unmarshalInfoMap  = map[reflect.Type]*unmarshalInfo{}
-	unmarshalInfoLock sync.Mutex
-)
-
-// getUnmarshalInfo returns the data structure which can be
-// subsequently used to unmarshal a message of the given type.
-// t is the type of the message (note: not pointer to message).
-func getUnmarshalInfo(t reflect.Type) *unmarshalInfo {
-	// It would be correct to return a new unmarshalInfo
-	// unconditionally. We would end up allocating one
-	// per occurrence of that type as a message or submessage.
-	// We use a cache here just to reduce memory usage.
-	unmarshalInfoLock.Lock()
-	defer unmarshalInfoLock.Unlock()
-	u := unmarshalInfoMap[t]
-	if u == nil {
-		u = &unmarshalInfo{typ: t}
-		// Note: we just set the type here. The rest of the fields
-		// will be initialized on first use.
-		unmarshalInfoMap[t] = u
-	}
-	return u
-}
-
-// unmarshal does the main work of unmarshaling a message.
-// u provides type information used to unmarshal the message.
-// m is a pointer to a protocol buffer message.
-// b is a byte stream to unmarshal into m.
-// This is top routine used when recursively unmarshaling submessages.
-func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
-	if atomic.LoadInt32(&u.initialized) == 0 {
-		u.computeUnmarshalInfo()
-	}
-	if u.isMessageSet {
-		return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
-	}
-	var reqMask uint64 // bitmask of required fields we've seen.
-	var errLater error
-	for len(b) > 0 {
-		// Read tag and wire type.
-		// Special case 1 and 2 byte varints.
-		var x uint64
-		if b[0] < 128 {
-			x = uint64(b[0])
-			b = b[1:]
-		} else if len(b) >= 2 && b[1] < 128 {
-			x = uint64(b[0]&0x7f) + uint64(b[1])<<7
-			b = b[2:]
-		} else {
-			var n int
-			x, n = decodeVarint(b)
-			if n == 0 {
-				return io.ErrUnexpectedEOF
-			}
-			b = b[n:]
-		}
-		tag := x >> 3
-		wire := int(x) & 7
-
-		// Dispatch on the tag to one of the unmarshal* functions below.
-		var f unmarshalFieldInfo
-		if tag < uint64(len(u.dense)) {
-			f = u.dense[tag]
-		} else {
-			f = u.sparse[tag]
-		}
-		if fn := f.unmarshal; fn != nil {
-			var err error
-			b, err = fn(b, m.offset(f.field), wire)
-			if err == nil {
-				reqMask |= f.reqMask
-				continue
-			}
-			if r, ok := err.(*RequiredNotSetError); ok {
-				// Remember this error, but keep parsing. We need to produce
-				// a full parse even if a required field is missing.
-				if errLater == nil {
-					errLater = r
-				}
-				reqMask |= f.reqMask
-				continue
-			}
-			if err != errInternalBadWireType {
-				if err == errInvalidUTF8 {
-					if errLater == nil {
-						fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
-						errLater = &invalidUTF8Error{fullName}
-					}
-					continue
-				}
-				return err
-			}
-			// Fragments with bad wire type are treated as unknown fields.
-		}
-
-		// Unknown tag.
-		if !u.unrecognized.IsValid() {
-			// Don't keep unrecognized data; just skip it.
-			var err error
-			b, err = skipField(b, wire)
-			if err != nil {
-				return err
-			}
-			continue
-		}
-		// Keep unrecognized data around.
-		// maybe in extensions, maybe in the unrecognized field.
-		z := m.offset(u.unrecognized).toBytes()
-		var emap map[int32]Extension
-		var e Extension
-		for _, r := range u.extensionRanges {
-			if uint64(r.Start) <= tag && tag <= uint64(r.End) {
-				if u.extensions.IsValid() {
-					mp := m.offset(u.extensions).toExtensions()
-					emap = mp.extensionsWrite()
-					e = emap[int32(tag)]
-					z = &e.enc
-					break
-				}
-				if u.oldExtensions.IsValid() {
-					p := m.offset(u.oldExtensions).toOldExtensions()
-					emap = *p
-					if emap == nil {
-						emap = map[int32]Extension{}
-						*p = emap
-					}
-					e = emap[int32(tag)]
-					z = &e.enc
-					break
-				}
-				panic("no extensions field available")
-			}
-		}
-
-		// Use wire type to skip data.
-		var err error
-		b0 := b
-		b, err = skipField(b, wire)
-		if err != nil {
-			return err
-		}
-		*z = encodeVarint(*z, tag<<3|uint64(wire))
-		*z = append(*z, b0[:len(b0)-len(b)]...)
-
-		if emap != nil {
-			emap[int32(tag)] = e
-		}
-	}
-	if reqMask != u.reqMask && errLater == nil {
-		// A required field of this message is missing.
-		for _, n := range u.reqFields {
-			if reqMask&1 == 0 {
-				errLater = &RequiredNotSetError{n}
-			}
-			reqMask >>= 1
-		}
-	}
-	return errLater
-}
-
-// computeUnmarshalInfo fills in u with information for use
-// in unmarshaling protocol buffers of type u.typ.
-func (u *unmarshalInfo) computeUnmarshalInfo() {
-	u.lock.Lock()
-	defer u.lock.Unlock()
-	if u.initialized != 0 {
-		return
-	}
-	t := u.typ
-	n := t.NumField()
-
-	// Set up the "not found" value for the unrecognized byte buffer.
-	// This is the default for proto3.
-	u.unrecognized = invalidField
-	u.extensions = invalidField
-	u.oldExtensions = invalidField
-
-	// List of the generated type and offset for each oneof field.
-	type oneofField struct {
-		ityp  reflect.Type // interface type of oneof field
-		field field        // offset in containing message
-	}
-	var oneofFields []oneofField
-
-	for i := 0; i < n; i++ {
-		f := t.Field(i)
-		if f.Name == "XXX_unrecognized" {
-			// The byte slice used to hold unrecognized input is special.
-			if f.Type != reflect.TypeOf(([]byte)(nil)) {
-				panic("bad type for XXX_unrecognized field: " + f.Type.Name())
-			}
-			u.unrecognized = toField(&f)
-			continue
-		}
-		if f.Name == "XXX_InternalExtensions" {
-			// Ditto here.
-			if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) {
-				panic("bad type for XXX_InternalExtensions field: " + f.Type.Name())
-			}
-			u.extensions = toField(&f)
-			if f.Tag.Get("protobuf_messageset") == "1" {
-				u.isMessageSet = true
-			}
-			continue
-		}
-		if f.Name == "XXX_extensions" {
-			// An older form of the extensions field.
-			if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) {
-				panic("bad type for XXX_extensions field: " + f.Type.Name())
-			}
-			u.oldExtensions = toField(&f)
-			continue
-		}
-		if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" {
-			continue
-		}
-
-		oneof := f.Tag.Get("protobuf_oneof")
-		if oneof != "" {
-			oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)})
-			// The rest of oneof processing happens below.
-			continue
-		}
-
-		tags := f.Tag.Get("protobuf")
-		tagArray := strings.Split(tags, ",")
-		if len(tagArray) < 2 {
-			panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags)
-		}
-		tag, err := strconv.Atoi(tagArray[1])
-		if err != nil {
-			panic("protobuf tag field not an integer: " + tagArray[1])
-		}
-
-		name := ""
-		for _, tag := range tagArray[3:] {
-			if strings.HasPrefix(tag, "name=") {
-				name = tag[5:]
-			}
-		}
-
-		// Extract unmarshaling function from the field (its type and tags).
-		unmarshal := fieldUnmarshaler(&f)
-
-		// Required field?
-		var reqMask uint64
-		if tagArray[2] == "req" {
-			bit := len(u.reqFields)
-			u.reqFields = append(u.reqFields, name)
-			reqMask = uint64(1) << uint(bit)
-			// TODO: if we have more than 64 required fields, we end up
-			// not verifying that all required fields are present.
-			// Fix this, perhaps using a count of required fields?
-		}
-
-		// Store the info in the correct slot in the message.
-		u.setTag(tag, toField(&f), unmarshal, reqMask, name)
-	}
-
-	// Find any types associated with oneof fields.
-	var oneofImplementers []interface{}
-	switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
-	case oneofFuncsIface:
-		_, _, _, oneofImplementers = m.XXX_OneofFuncs()
-	case oneofWrappersIface:
-		oneofImplementers = m.XXX_OneofWrappers()
-	}
-	for _, v := range oneofImplementers {
-		tptr := reflect.TypeOf(v) // *Msg_X
-		typ := tptr.Elem()        // Msg_X
-
-		f := typ.Field(0) // oneof implementers have one field
-		baseUnmarshal := fieldUnmarshaler(&f)
-		tags := strings.Split(f.Tag.Get("protobuf"), ",")
-		fieldNum, err := strconv.Atoi(tags[1])
-		if err != nil {
-			panic("protobuf tag field not an integer: " + tags[1])
-		}
-		var name string
-		for _, tag := range tags {
-			if strings.HasPrefix(tag, "name=") {
-				name = strings.TrimPrefix(tag, "name=")
-				break
-			}
-		}
-
-		// Find the oneof field that this struct implements.
-		// Might take O(n^2) to process all of the oneofs, but who cares.
-		for _, of := range oneofFields {
-			if tptr.Implements(of.ityp) {
-				// We have found the corresponding interface for this struct.
-				// That lets us know where this struct should be stored
-				// when we encounter it during unmarshaling.
-				unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
-				u.setTag(fieldNum, of.field, unmarshal, 0, name)
-			}
-		}
-
-	}
-
-	// Get extension ranges, if any.
-	fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
-	if fn.IsValid() {
-		if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
-			panic("a message with extensions, but no extensions field in " + t.Name())
-		}
-		u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange)
-	}
-
-	// Explicitly disallow tag 0. This will ensure we flag an error
-	// when decoding a buffer of all zeros. Without this code, we
-	// would decode and skip an all-zero buffer of even length.
-	// [0 0] is [tag=0/wiretype=varint varint-encoded-0].
-	u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
-		return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
-	}, 0, "")
-
-	// Set mask for required field check.
-	u.reqMask = uint64(1)<<uint(len(u.reqFields)) - 1
-
-	atomic.StoreInt32(&u.initialized, 1)
-}
-
-// setTag stores the unmarshal information for the given tag.
-// tag = tag # for field
-// field/unmarshal = unmarshal info for that field.
-// reqMask = if required, bitmask for field position in required field list. 0 otherwise.
-// name = short name of the field.
-func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64, name string) {
-	i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask, name: name}
-	n := u.typ.NumField()
-	if tag >= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
-		for len(u.dense) <= tag {
-			u.dense = append(u.dense, unmarshalFieldInfo{})
-		}
-		u.dense[tag] = i
-		return
-	}
-	if u.sparse == nil {
-		u.sparse = map[uint64]unmarshalFieldInfo{}
-	}
-	u.sparse[uint64(tag)] = i
-}
-
-// fieldUnmarshaler returns an unmarshaler for the given field.
-func fieldUnmarshaler(f *reflect.StructField) unmarshaler {
-	if f.Type.Kind() == reflect.Map {
-		return makeUnmarshalMap(f)
-	}
-	return typeUnmarshaler(f.Type, f.Tag.Get("protobuf"))
-}
-
-// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair.
-func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
-	tagArray := strings.Split(tags, ",")
-	encoding := tagArray[0]
-	name := "unknown"
-	proto3 := false
-	validateUTF8 := true
-	for _, tag := range tagArray[3:] {
-		if strings.HasPrefix(tag, "name=") {
-			name = tag[5:]
-		}
-		if tag == "proto3" {
-			proto3 = true
-		}
-	}
-	validateUTF8 = validateUTF8 && proto3
-
-	// Figure out packaging (pointer, slice, or both)
-	slice := false
-	pointer := false
-	if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
-		slice = true
-		t = t.Elem()
-	}
-	if t.Kind() == reflect.Ptr {
-		pointer = true
-		t = t.Elem()
-	}
-
-	// We'll never have both pointer and slice for basic types.
-	if pointer && slice && t.Kind() != reflect.Struct {
-		panic("both pointer and slice for basic type in " + t.Name())
-	}
-
-	switch t.Kind() {
-	case reflect.Bool:
-		if pointer {
-			return unmarshalBoolPtr
-		}
-		if slice {
-			return unmarshalBoolSlice
-		}
-		return unmarshalBoolValue
-	case reflect.Int32:
-		switch encoding {
-		case "fixed32":
-			if pointer {
-				return unmarshalFixedS32Ptr
-			}
-			if slice {
-				return unmarshalFixedS32Slice
-			}
-			return unmarshalFixedS32Value
-		case "varint":
-			// this could be int32 or enum
-			if pointer {
-				return unmarshalInt32Ptr
-			}
-			if slice {
-				return unmarshalInt32Slice
-			}
-			return unmarshalInt32Value
-		case "zigzag32":
-			if pointer {
-				return unmarshalSint32Ptr
-			}
-			if slice {
-				return unmarshalSint32Slice
-			}
-			return unmarshalSint32Value
-		}
-	case reflect.Int64:
-		switch encoding {
-		case "fixed64":
-			if pointer {
-				return unmarshalFixedS64Ptr
-			}
-			if slice {
-				return unmarshalFixedS64Slice
-			}
-			return unmarshalFixedS64Value
-		case "varint":
-			if pointer {
-				return unmarshalInt64Ptr
-			}
-			if slice {
-				return unmarshalInt64Slice
-			}
-			return unmarshalInt64Value
-		case "zigzag64":
-			if pointer {
-				return unmarshalSint64Ptr
-			}
-			if slice {
-				return unmarshalSint64Slice
-			}
-			return unmarshalSint64Value
-		}
-	case reflect.Uint32:
-		switch encoding {
-		case "fixed32":
-			if pointer {
-				return unmarshalFixed32Ptr
-			}
-			if slice {
-				return unmarshalFixed32Slice
-			}
-			return unmarshalFixed32Value
-		case "varint":
-			if pointer {
-				return unmarshalUint32Ptr
-			}
-			if slice {
-				return unmarshalUint32Slice
-			}
-			return unmarshalUint32Value
-		}
-	case reflect.Uint64:
-		switch encoding {
-		case "fixed64":
-			if pointer {
-				return unmarshalFixed64Ptr
-			}
-			if slice {
-				return unmarshalFixed64Slice
-			}
-			return unmarshalFixed64Value
-		case "varint":
-			if pointer {
-				return unmarshalUint64Ptr
-			}
-			if slice {
-				return unmarshalUint64Slice
-			}
-			return unmarshalUint64Value
-		}
-	case reflect.Float32:
-		if pointer {
-			return unmarshalFloat32Ptr
-		}
-		if slice {
-			return unmarshalFloat32Slice
-		}
-		return unmarshalFloat32Value
-	case reflect.Float64:
-		if pointer {
-			return unmarshalFloat64Ptr
-		}
-		if slice {
-			return unmarshalFloat64Slice
-		}
-		return unmarshalFloat64Value
-	case reflect.Map:
-		panic("map type in typeUnmarshaler in " + t.Name())
-	case reflect.Slice:
-		if pointer {
-			panic("bad pointer in slice case in " + t.Name())
-		}
-		if slice {
-			return unmarshalBytesSlice
-		}
-		return unmarshalBytesValue
-	case reflect.String:
-		if validateUTF8 {
-			if pointer {
-				return unmarshalUTF8StringPtr
-			}
-			if slice {
-				return unmarshalUTF8StringSlice
-			}
-			return unmarshalUTF8StringValue
-		}
-		if pointer {
-			return unmarshalStringPtr
-		}
-		if slice {
-			return unmarshalStringSlice
-		}
-		return unmarshalStringValue
-	case reflect.Struct:
-		// message or group field
-		if !pointer {
-			panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding))
-		}
-		switch encoding {
-		case "bytes":
-			if slice {
-				return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name)
-			}
-			return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name)
-		case "group":
-			if slice {
-				return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name)
-			}
-			return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name)
-		}
-	}
-	panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding))
-}
-
-// Below are all the unmarshalers for individual fields of various types.
-
-func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := int64(x)
-	*f.toInt64() = v
-	return b, nil
-}
-
-func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := int64(x)
-	*f.toInt64Ptr() = &v
-	return b, nil
-}
-
-func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) {
-	if w == WireBytes { // packed
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		res := b[x:]
-		b = b[:x]
-		for len(b) > 0 {
-			x, n = decodeVarint(b)
-			if n == 0 {
-				return nil, io.ErrUnexpectedEOF
-			}
-			b = b[n:]
-			v := int64(x)
-			s := f.toInt64Slice()
-			*s = append(*s, v)
-		}
-		return res, nil
-	}
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := int64(x)
-	s := f.toInt64Slice()
-	*s = append(*s, v)
-	return b, nil
-}
-
-func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := int64(x>>1) ^ int64(x)<<63>>63
-	*f.toInt64() = v
-	return b, nil
-}
-
-func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := int64(x>>1) ^ int64(x)<<63>>63
-	*f.toInt64Ptr() = &v
-	return b, nil
-}
-
-func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) {
-	if w == WireBytes { // packed
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		res := b[x:]
-		b = b[:x]
-		for len(b) > 0 {
-			x, n = decodeVarint(b)
-			if n == 0 {
-				return nil, io.ErrUnexpectedEOF
-			}
-			b = b[n:]
-			v := int64(x>>1) ^ int64(x)<<63>>63
-			s := f.toInt64Slice()
-			*s = append(*s, v)
-		}
-		return res, nil
-	}
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := int64(x>>1) ^ int64(x)<<63>>63
-	s := f.toInt64Slice()
-	*s = append(*s, v)
-	return b, nil
-}
-
-func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := uint64(x)
-	*f.toUint64() = v
-	return b, nil
-}
-
-func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := uint64(x)
-	*f.toUint64Ptr() = &v
-	return b, nil
-}
-
-func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) {
-	if w == WireBytes { // packed
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		res := b[x:]
-		b = b[:x]
-		for len(b) > 0 {
-			x, n = decodeVarint(b)
-			if n == 0 {
-				return nil, io.ErrUnexpectedEOF
-			}
-			b = b[n:]
-			v := uint64(x)
-			s := f.toUint64Slice()
-			*s = append(*s, v)
-		}
-		return res, nil
-	}
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := uint64(x)
-	s := f.toUint64Slice()
-	*s = append(*s, v)
-	return b, nil
-}
-
-func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := int32(x)
-	*f.toInt32() = v
-	return b, nil
-}
-
-func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := int32(x)
-	f.setInt32Ptr(v)
-	return b, nil
-}
-
-func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) {
-	if w == WireBytes { // packed
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		res := b[x:]
-		b = b[:x]
-		for len(b) > 0 {
-			x, n = decodeVarint(b)
-			if n == 0 {
-				return nil, io.ErrUnexpectedEOF
-			}
-			b = b[n:]
-			v := int32(x)
-			f.appendInt32Slice(v)
-		}
-		return res, nil
-	}
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := int32(x)
-	f.appendInt32Slice(v)
-	return b, nil
-}
-
-func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := int32(x>>1) ^ int32(x)<<31>>31
-	*f.toInt32() = v
-	return b, nil
-}
-
-func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := int32(x>>1) ^ int32(x)<<31>>31
-	f.setInt32Ptr(v)
-	return b, nil
-}
-
-func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) {
-	if w == WireBytes { // packed
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		res := b[x:]
-		b = b[:x]
-		for len(b) > 0 {
-			x, n = decodeVarint(b)
-			if n == 0 {
-				return nil, io.ErrUnexpectedEOF
-			}
-			b = b[n:]
-			v := int32(x>>1) ^ int32(x)<<31>>31
-			f.appendInt32Slice(v)
-		}
-		return res, nil
-	}
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := int32(x>>1) ^ int32(x)<<31>>31
-	f.appendInt32Slice(v)
-	return b, nil
-}
-
-func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := uint32(x)
-	*f.toUint32() = v
-	return b, nil
-}
-
-func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := uint32(x)
-	*f.toUint32Ptr() = &v
-	return b, nil
-}
-
-func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) {
-	if w == WireBytes { // packed
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		res := b[x:]
-		b = b[:x]
-		for len(b) > 0 {
-			x, n = decodeVarint(b)
-			if n == 0 {
-				return nil, io.ErrUnexpectedEOF
-			}
-			b = b[n:]
-			v := uint32(x)
-			s := f.toUint32Slice()
-			*s = append(*s, v)
-		}
-		return res, nil
-	}
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := uint32(x)
-	s := f.toUint32Slice()
-	*s = append(*s, v)
-	return b, nil
-}
-
-func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireFixed64 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 8 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
-	*f.toUint64() = v
-	return b[8:], nil
-}
-
-func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireFixed64 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 8 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
-	*f.toUint64Ptr() = &v
-	return b[8:], nil
-}
-
-func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) {
-	if w == WireBytes { // packed
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		res := b[x:]
-		b = b[:x]
-		for len(b) > 0 {
-			if len(b) < 8 {
-				return nil, io.ErrUnexpectedEOF
-			}
-			v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
-			s := f.toUint64Slice()
-			*s = append(*s, v)
-			b = b[8:]
-		}
-		return res, nil
-	}
-	if w != WireFixed64 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 8 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
-	s := f.toUint64Slice()
-	*s = append(*s, v)
-	return b[8:], nil
-}
-
-func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireFixed64 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 8 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
-	*f.toInt64() = v
-	return b[8:], nil
-}
-
-func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireFixed64 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 8 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
-	*f.toInt64Ptr() = &v
-	return b[8:], nil
-}
-
-func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) {
-	if w == WireBytes { // packed
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		res := b[x:]
-		b = b[:x]
-		for len(b) > 0 {
-			if len(b) < 8 {
-				return nil, io.ErrUnexpectedEOF
-			}
-			v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
-			s := f.toInt64Slice()
-			*s = append(*s, v)
-			b = b[8:]
-		}
-		return res, nil
-	}
-	if w != WireFixed64 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 8 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
-	s := f.toInt64Slice()
-	*s = append(*s, v)
-	return b[8:], nil
-}
-
-func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireFixed32 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 4 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
-	*f.toUint32() = v
-	return b[4:], nil
-}
-
-func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireFixed32 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 4 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
-	*f.toUint32Ptr() = &v
-	return b[4:], nil
-}
-
-func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) {
-	if w == WireBytes { // packed
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		res := b[x:]
-		b = b[:x]
-		for len(b) > 0 {
-			if len(b) < 4 {
-				return nil, io.ErrUnexpectedEOF
-			}
-			v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
-			s := f.toUint32Slice()
-			*s = append(*s, v)
-			b = b[4:]
-		}
-		return res, nil
-	}
-	if w != WireFixed32 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 4 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
-	s := f.toUint32Slice()
-	*s = append(*s, v)
-	return b[4:], nil
-}
-
-func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireFixed32 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 4 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
-	*f.toInt32() = v
-	return b[4:], nil
-}
-
-func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireFixed32 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 4 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
-	f.setInt32Ptr(v)
-	return b[4:], nil
-}
-
-func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) {
-	if w == WireBytes { // packed
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		res := b[x:]
-		b = b[:x]
-		for len(b) > 0 {
-			if len(b) < 4 {
-				return nil, io.ErrUnexpectedEOF
-			}
-			v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
-			f.appendInt32Slice(v)
-			b = b[4:]
-		}
-		return res, nil
-	}
-	if w != WireFixed32 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 4 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
-	f.appendInt32Slice(v)
-	return b[4:], nil
-}
-
-func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	// Note: any length varint is allowed, even though any sane
-	// encoder will use one byte.
-	// See https://github.com/golang/protobuf/issues/76
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	// TODO: check if x>1? Tests seem to indicate no.
-	v := x != 0
-	*f.toBool() = v
-	return b[n:], nil
-}
-
-func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := x != 0
-	*f.toBoolPtr() = &v
-	return b[n:], nil
-}
-
-func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) {
-	if w == WireBytes { // packed
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		res := b[x:]
-		b = b[:x]
-		for len(b) > 0 {
-			x, n = decodeVarint(b)
-			if n == 0 {
-				return nil, io.ErrUnexpectedEOF
-			}
-			v := x != 0
-			s := f.toBoolSlice()
-			*s = append(*s, v)
-			b = b[n:]
-		}
-		return res, nil
-	}
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := x != 0
-	s := f.toBoolSlice()
-	*s = append(*s, v)
-	return b[n:], nil
-}
-
-func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireFixed64 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 8 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
-	*f.toFloat64() = v
-	return b[8:], nil
-}
-
-func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireFixed64 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 8 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
-	*f.toFloat64Ptr() = &v
-	return b[8:], nil
-}
-
-func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) {
-	if w == WireBytes { // packed
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		res := b[x:]
-		b = b[:x]
-		for len(b) > 0 {
-			if len(b) < 8 {
-				return nil, io.ErrUnexpectedEOF
-			}
-			v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
-			s := f.toFloat64Slice()
-			*s = append(*s, v)
-			b = b[8:]
-		}
-		return res, nil
-	}
-	if w != WireFixed64 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 8 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
-	s := f.toFloat64Slice()
-	*s = append(*s, v)
-	return b[8:], nil
-}
-
-func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireFixed32 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 4 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
-	*f.toFloat32() = v
-	return b[4:], nil
-}
-
-func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireFixed32 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 4 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
-	*f.toFloat32Ptr() = &v
-	return b[4:], nil
-}
-
-func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) {
-	if w == WireBytes { // packed
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		res := b[x:]
-		b = b[:x]
-		for len(b) > 0 {
-			if len(b) < 4 {
-				return nil, io.ErrUnexpectedEOF
-			}
-			v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
-			s := f.toFloat32Slice()
-			*s = append(*s, v)
-			b = b[4:]
-		}
-		return res, nil
-	}
-	if w != WireFixed32 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 4 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
-	s := f.toFloat32Slice()
-	*s = append(*s, v)
-	return b[4:], nil
-}
-
-func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireBytes {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	if x > uint64(len(b)) {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := string(b[:x])
-	*f.toString() = v
-	return b[x:], nil
-}
-
-func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireBytes {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	if x > uint64(len(b)) {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := string(b[:x])
-	*f.toStringPtr() = &v
-	return b[x:], nil
-}
-
-func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireBytes {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	if x > uint64(len(b)) {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := string(b[:x])
-	s := f.toStringSlice()
-	*s = append(*s, v)
-	return b[x:], nil
-}
-
-func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireBytes {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	if x > uint64(len(b)) {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := string(b[:x])
-	*f.toString() = v
-	if !utf8.ValidString(v) {
-		return b[x:], errInvalidUTF8
-	}
-	return b[x:], nil
-}
-
-func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireBytes {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	if x > uint64(len(b)) {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := string(b[:x])
-	*f.toStringPtr() = &v
-	if !utf8.ValidString(v) {
-		return b[x:], errInvalidUTF8
-	}
-	return b[x:], nil
-}
-
-func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireBytes {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	if x > uint64(len(b)) {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := string(b[:x])
-	s := f.toStringSlice()
-	*s = append(*s, v)
-	if !utf8.ValidString(v) {
-		return b[x:], errInvalidUTF8
-	}
-	return b[x:], nil
-}
-
-var emptyBuf [0]byte
-
-func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireBytes {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	if x > uint64(len(b)) {
-		return nil, io.ErrUnexpectedEOF
-	}
-	// The use of append here is a trick which avoids the zeroing
-	// that would be required if we used a make/copy pair.
-	// We append to emptyBuf instead of nil because we want
-	// a non-nil result even when the length is 0.
-	v := append(emptyBuf[:], b[:x]...)
-	*f.toBytes() = v
-	return b[x:], nil
-}
-
-func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireBytes {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	if x > uint64(len(b)) {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := append(emptyBuf[:], b[:x]...)
-	s := f.toBytesSlice()
-	*s = append(*s, v)
-	return b[x:], nil
-}
-
-func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler {
-	return func(b []byte, f pointer, w int) ([]byte, error) {
-		if w != WireBytes {
-			return b, errInternalBadWireType
-		}
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		// First read the message field to see if something is there.
-		// The semantics of multiple submessages are weird.  Instead of
-		// the last one winning (as it is for all other fields), multiple
-		// submessages are merged.
-		v := f.getPointer()
-		if v.isNil() {
-			v = valToPointer(reflect.New(sub.typ))
-			f.setPointer(v)
-		}
-		err := sub.unmarshal(v, b[:x])
-		if err != nil {
-			if r, ok := err.(*RequiredNotSetError); ok {
-				r.field = name + "." + r.field
-			} else {
-				return nil, err
-			}
-		}
-		return b[x:], err
-	}
-}
-
-func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
-	return func(b []byte, f pointer, w int) ([]byte, error) {
-		if w != WireBytes {
-			return b, errInternalBadWireType
-		}
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		v := valToPointer(reflect.New(sub.typ))
-		err := sub.unmarshal(v, b[:x])
-		if err != nil {
-			if r, ok := err.(*RequiredNotSetError); ok {
-				r.field = name + "." + r.field
-			} else {
-				return nil, err
-			}
-		}
-		f.appendPointer(v)
-		return b[x:], err
-	}
-}
-
-func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler {
-	return func(b []byte, f pointer, w int) ([]byte, error) {
-		if w != WireStartGroup {
-			return b, errInternalBadWireType
-		}
-		x, y := findEndGroup(b)
-		if x < 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		v := f.getPointer()
-		if v.isNil() {
-			v = valToPointer(reflect.New(sub.typ))
-			f.setPointer(v)
-		}
-		err := sub.unmarshal(v, b[:x])
-		if err != nil {
-			if r, ok := err.(*RequiredNotSetError); ok {
-				r.field = name + "." + r.field
-			} else {
-				return nil, err
-			}
-		}
-		return b[y:], err
-	}
-}
-
-func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
-	return func(b []byte, f pointer, w int) ([]byte, error) {
-		if w != WireStartGroup {
-			return b, errInternalBadWireType
-		}
-		x, y := findEndGroup(b)
-		if x < 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		v := valToPointer(reflect.New(sub.typ))
-		err := sub.unmarshal(v, b[:x])
-		if err != nil {
-			if r, ok := err.(*RequiredNotSetError); ok {
-				r.field = name + "." + r.field
-			} else {
-				return nil, err
-			}
-		}
-		f.appendPointer(v)
-		return b[y:], err
-	}
-}
-
-func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
-	t := f.Type
-	kt := t.Key()
-	vt := t.Elem()
-	unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key"))
-	unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val"))
-	return func(b []byte, f pointer, w int) ([]byte, error) {
-		// The map entry is a submessage. Figure out how big it is.
-		if w != WireBytes {
-			return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes)
-		}
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		r := b[x:] // unused data to return
-		b = b[:x]  // data for map entry
-
-		// Note: we could use #keys * #values ~= 200 functions
-		// to do map decoding without reflection. Probably not worth it.
-		// Maps will be somewhat slow. Oh well.
-
-		// Read key and value from data.
-		var nerr nonFatal
-		k := reflect.New(kt)
-		v := reflect.New(vt)
-		for len(b) > 0 {
-			x, n := decodeVarint(b)
-			if n == 0 {
-				return nil, io.ErrUnexpectedEOF
-			}
-			wire := int(x) & 7
-			b = b[n:]
-
-			var err error
-			switch x >> 3 {
-			case 1:
-				b, err = unmarshalKey(b, valToPointer(k), wire)
-			case 2:
-				b, err = unmarshalVal(b, valToPointer(v), wire)
-			default:
-				err = errInternalBadWireType // skip unknown tag
-			}
-
-			if nerr.Merge(err) {
-				continue
-			}
-			if err != errInternalBadWireType {
-				return nil, err
-			}
-
-			// Skip past unknown fields.
-			b, err = skipField(b, wire)
-			if err != nil {
-				return nil, err
-			}
-		}
-
-		// Get map, allocate if needed.
-		m := f.asPointerTo(t).Elem() // an addressable map[K]T
-		if m.IsNil() {
-			m.Set(reflect.MakeMap(t))
-		}
-
-		// Insert into map.
-		m.SetMapIndex(k.Elem(), v.Elem())
-
-		return r, nerr.E
-	}
-}
-
-// makeUnmarshalOneof makes an unmarshaler for oneof fields.
-// for:
-// message Msg {
-//   oneof F {
-//     int64 X = 1;
-//     float64 Y = 2;
-//   }
-// }
-// typ is the type of the concrete entry for a oneof case (e.g. Msg_X).
-// ityp is the interface type of the oneof field (e.g. isMsg_F).
-// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64).
-// Note that this function will be called once for each case in the oneof.
-func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler {
-	sf := typ.Field(0)
-	field0 := toField(&sf)
-	return func(b []byte, f pointer, w int) ([]byte, error) {
-		// Allocate holder for value.
-		v := reflect.New(typ)
-
-		// Unmarshal data into holder.
-		// We unmarshal into the first field of the holder object.
-		var err error
-		var nerr nonFatal
-		b, err = unmarshal(b, valToPointer(v).offset(field0), w)
-		if !nerr.Merge(err) {
-			return nil, err
-		}
-
-		// Write pointer to holder into target field.
-		f.asPointerTo(ityp).Elem().Set(v)
-
-		return b, nerr.E
-	}
-}
-
-// Error used by decode internally.
-var errInternalBadWireType = errors.New("proto: internal error: bad wiretype")
-
-// skipField skips past a field of type wire and returns the remaining bytes.
-func skipField(b []byte, wire int) ([]byte, error) {
-	switch wire {
-	case WireVarint:
-		_, k := decodeVarint(b)
-		if k == 0 {
-			return b, io.ErrUnexpectedEOF
-		}
-		b = b[k:]
-	case WireFixed32:
-		if len(b) < 4 {
-			return b, io.ErrUnexpectedEOF
-		}
-		b = b[4:]
-	case WireFixed64:
-		if len(b) < 8 {
-			return b, io.ErrUnexpectedEOF
-		}
-		b = b[8:]
-	case WireBytes:
-		m, k := decodeVarint(b)
-		if k == 0 || uint64(len(b)-k) < m {
-			return b, io.ErrUnexpectedEOF
-		}
-		b = b[uint64(k)+m:]
-	case WireStartGroup:
-		_, i := findEndGroup(b)
-		if i == -1 {
-			return b, io.ErrUnexpectedEOF
-		}
-		b = b[i:]
-	default:
-		return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire)
-	}
-	return b, nil
-}
-
-// findEndGroup finds the index of the next EndGroup tag.
-// Groups may be nested, so the "next" EndGroup tag is the first
-// unpaired EndGroup.
-// findEndGroup returns the indexes of the start and end of the EndGroup tag.
-// Returns (-1,-1) if it can't find one.
-func findEndGroup(b []byte) (int, int) {
-	depth := 1
-	i := 0
-	for {
-		x, n := decodeVarint(b[i:])
-		if n == 0 {
-			return -1, -1
-		}
-		j := i
-		i += n
-		switch x & 7 {
-		case WireVarint:
-			_, k := decodeVarint(b[i:])
-			if k == 0 {
-				return -1, -1
-			}
-			i += k
-		case WireFixed32:
-			if len(b)-4 < i {
-				return -1, -1
-			}
-			i += 4
-		case WireFixed64:
-			if len(b)-8 < i {
-				return -1, -1
-			}
-			i += 8
-		case WireBytes:
-			m, k := decodeVarint(b[i:])
-			if k == 0 {
-				return -1, -1
-			}
-			i += k
-			if uint64(len(b)-i) < m {
-				return -1, -1
-			}
-			i += int(m)
-		case WireStartGroup:
-			depth++
-		case WireEndGroup:
-			depth--
-			if depth == 0 {
-				return j, i
-			}
-		default:
-			return -1, -1
-		}
-	}
-}
-
-// encodeVarint appends a varint-encoded integer to b and returns the result.
-func encodeVarint(b []byte, x uint64) []byte {
-	for x >= 1<<7 {
-		b = append(b, byte(x&0x7f|0x80))
-		x >>= 7
-	}
-	return append(b, byte(x))
-}
-
-// decodeVarint reads a varint-encoded integer from b.
-// Returns the decoded integer and the number of bytes read.
-// If there is an error, it returns 0,0.
-func decodeVarint(b []byte) (uint64, int) {
-	var x, y uint64
-	if len(b) == 0 {
-		goto bad
-	}
-	x = uint64(b[0])
-	if x < 0x80 {
-		return x, 1
-	}
-	x -= 0x80
-
-	if len(b) <= 1 {
-		goto bad
-	}
-	y = uint64(b[1])
-	x += y << 7
-	if y < 0x80 {
-		return x, 2
-	}
-	x -= 0x80 << 7
-
-	if len(b) <= 2 {
-		goto bad
-	}
-	y = uint64(b[2])
-	x += y << 14
-	if y < 0x80 {
-		return x, 3
-	}
-	x -= 0x80 << 14
-
-	if len(b) <= 3 {
-		goto bad
-	}
-	y = uint64(b[3])
-	x += y << 21
-	if y < 0x80 {
-		return x, 4
-	}
-	x -= 0x80 << 21
-
-	if len(b) <= 4 {
-		goto bad
-	}
-	y = uint64(b[4])
-	x += y << 28
-	if y < 0x80 {
-		return x, 5
-	}
-	x -= 0x80 << 28
-
-	if len(b) <= 5 {
-		goto bad
-	}
-	y = uint64(b[5])
-	x += y << 35
-	if y < 0x80 {
-		return x, 6
-	}
-	x -= 0x80 << 35
-
-	if len(b) <= 6 {
-		goto bad
-	}
-	y = uint64(b[6])
-	x += y << 42
-	if y < 0x80 {
-		return x, 7
-	}
-	x -= 0x80 << 42
-
-	if len(b) <= 7 {
-		goto bad
-	}
-	y = uint64(b[7])
-	x += y << 49
-	if y < 0x80 {
-		return x, 8
-	}
-	x -= 0x80 << 49
-
-	if len(b) <= 8 {
-		goto bad
-	}
-	y = uint64(b[8])
-	x += y << 56
-	if y < 0x80 {
-		return x, 9
-	}
-	x -= 0x80 << 56
-
-	if len(b) <= 9 {
-		goto bad
-	}
-	y = uint64(b[9])
-	x += y << 63
-	if y < 2 {
-		return x, 10
-	}
-
-bad:
-	return 0, 0
-}
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
deleted file mode 100644
index 1aaee72..0000000
--- a/vendor/github.com/golang/protobuf/proto/text.go
+++ /dev/null
@@ -1,843 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-// Functions for writing the text protocol buffer format.
-
-import (
-	"bufio"
-	"bytes"
-	"encoding"
-	"errors"
-	"fmt"
-	"io"
-	"log"
-	"math"
-	"reflect"
-	"sort"
-	"strings"
-)
-
-var (
-	newline         = []byte("\n")
-	spaces          = []byte("                                        ")
-	endBraceNewline = []byte("}\n")
-	backslashN      = []byte{'\\', 'n'}
-	backslashR      = []byte{'\\', 'r'}
-	backslashT      = []byte{'\\', 't'}
-	backslashDQ     = []byte{'\\', '"'}
-	backslashBS     = []byte{'\\', '\\'}
-	posInf          = []byte("inf")
-	negInf          = []byte("-inf")
-	nan             = []byte("nan")
-)
-
-type writer interface {
-	io.Writer
-	WriteByte(byte) error
-}
-
-// textWriter is an io.Writer that tracks its indentation level.
-type textWriter struct {
-	ind      int
-	complete bool // if the current position is a complete line
-	compact  bool // whether to write out as a one-liner
-	w        writer
-}
-
-func (w *textWriter) WriteString(s string) (n int, err error) {
-	if !strings.Contains(s, "\n") {
-		if !w.compact && w.complete {
-			w.writeIndent()
-		}
-		w.complete = false
-		return io.WriteString(w.w, s)
-	}
-	// WriteString is typically called without newlines, so this
-	// codepath and its copy are rare.  We copy to avoid
-	// duplicating all of Write's logic here.
-	return w.Write([]byte(s))
-}
-
-func (w *textWriter) Write(p []byte) (n int, err error) {
-	newlines := bytes.Count(p, newline)
-	if newlines == 0 {
-		if !w.compact && w.complete {
-			w.writeIndent()
-		}
-		n, err = w.w.Write(p)
-		w.complete = false
-		return n, err
-	}
-
-	frags := bytes.SplitN(p, newline, newlines+1)
-	if w.compact {
-		for i, frag := range frags {
-			if i > 0 {
-				if err := w.w.WriteByte(' '); err != nil {
-					return n, err
-				}
-				n++
-			}
-			nn, err := w.w.Write(frag)
-			n += nn
-			if err != nil {
-				return n, err
-			}
-		}
-		return n, nil
-	}
-
-	for i, frag := range frags {
-		if w.complete {
-			w.writeIndent()
-		}
-		nn, err := w.w.Write(frag)
-		n += nn
-		if err != nil {
-			return n, err
-		}
-		if i+1 < len(frags) {
-			if err := w.w.WriteByte('\n'); err != nil {
-				return n, err
-			}
-			n++
-		}
-	}
-	w.complete = len(frags[len(frags)-1]) == 0
-	return n, nil
-}
-
-func (w *textWriter) WriteByte(c byte) error {
-	if w.compact && c == '\n' {
-		c = ' '
-	}
-	if !w.compact && w.complete {
-		w.writeIndent()
-	}
-	err := w.w.WriteByte(c)
-	w.complete = c == '\n'
-	return err
-}
-
-func (w *textWriter) indent() { w.ind++ }
-
-func (w *textWriter) unindent() {
-	if w.ind == 0 {
-		log.Print("proto: textWriter unindented too far")
-		return
-	}
-	w.ind--
-}
-
-func writeName(w *textWriter, props *Properties) error {
-	if _, err := w.WriteString(props.OrigName); err != nil {
-		return err
-	}
-	if props.Wire != "group" {
-		return w.WriteByte(':')
-	}
-	return nil
-}
-
-func requiresQuotes(u string) bool {
-	// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
-	for _, ch := range u {
-		switch {
-		case ch == '.' || ch == '/' || ch == '_':
-			continue
-		case '0' <= ch && ch <= '9':
-			continue
-		case 'A' <= ch && ch <= 'Z':
-			continue
-		case 'a' <= ch && ch <= 'z':
-			continue
-		default:
-			return true
-		}
-	}
-	return false
-}
-
-// isAny reports whether sv is a google.protobuf.Any message
-func isAny(sv reflect.Value) bool {
-	type wkt interface {
-		XXX_WellKnownType() string
-	}
-	t, ok := sv.Addr().Interface().(wkt)
-	return ok && t.XXX_WellKnownType() == "Any"
-}
-
-// writeProto3Any writes an expanded google.protobuf.Any message.
-//
-// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
-// required messages are not linked in).
-//
-// It returns (true, error) when sv was written in expanded format or an error
-// was encountered.
-func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
-	turl := sv.FieldByName("TypeUrl")
-	val := sv.FieldByName("Value")
-	if !turl.IsValid() || !val.IsValid() {
-		return true, errors.New("proto: invalid google.protobuf.Any message")
-	}
-
-	b, ok := val.Interface().([]byte)
-	if !ok {
-		return true, errors.New("proto: invalid google.protobuf.Any message")
-	}
-
-	parts := strings.Split(turl.String(), "/")
-	mt := MessageType(parts[len(parts)-1])
-	if mt == nil {
-		return false, nil
-	}
-	m := reflect.New(mt.Elem())
-	if err := Unmarshal(b, m.Interface().(Message)); err != nil {
-		return false, nil
-	}
-	w.Write([]byte("["))
-	u := turl.String()
-	if requiresQuotes(u) {
-		writeString(w, u)
-	} else {
-		w.Write([]byte(u))
-	}
-	if w.compact {
-		w.Write([]byte("]:<"))
-	} else {
-		w.Write([]byte("]: <\n"))
-		w.ind++
-	}
-	if err := tm.writeStruct(w, m.Elem()); err != nil {
-		return true, err
-	}
-	if w.compact {
-		w.Write([]byte("> "))
-	} else {
-		w.ind--
-		w.Write([]byte(">\n"))
-	}
-	return true, nil
-}
-
-func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
-	if tm.ExpandAny && isAny(sv) {
-		if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
-			return err
-		}
-	}
-	st := sv.Type()
-	sprops := GetProperties(st)
-	for i := 0; i < sv.NumField(); i++ {
-		fv := sv.Field(i)
-		props := sprops.Prop[i]
-		name := st.Field(i).Name
-
-		if name == "XXX_NoUnkeyedLiteral" {
-			continue
-		}
-
-		if strings.HasPrefix(name, "XXX_") {
-			// There are two XXX_ fields:
-			//   XXX_unrecognized []byte
-			//   XXX_extensions   map[int32]proto.Extension
-			// The first is handled here;
-			// the second is handled at the bottom of this function.
-			if name == "XXX_unrecognized" && !fv.IsNil() {
-				if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
-					return err
-				}
-			}
-			continue
-		}
-		if fv.Kind() == reflect.Ptr && fv.IsNil() {
-			// Field not filled in. This could be an optional field or
-			// a required field that wasn't filled in. Either way, there
-			// isn't anything we can show for it.
-			continue
-		}
-		if fv.Kind() == reflect.Slice && fv.IsNil() {
-			// Repeated field that is empty, or a bytes field that is unused.
-			continue
-		}
-
-		if props.Repeated && fv.Kind() == reflect.Slice {
-			// Repeated field.
-			for j := 0; j < fv.Len(); j++ {
-				if err := writeName(w, props); err != nil {
-					return err
-				}
-				if !w.compact {
-					if err := w.WriteByte(' '); err != nil {
-						return err
-					}
-				}
-				v := fv.Index(j)
-				if v.Kind() == reflect.Ptr && v.IsNil() {
-					// A nil message in a repeated field is not valid,
-					// but we can handle that more gracefully than panicking.
-					if _, err := w.Write([]byte("<nil>\n")); err != nil {
-						return err
-					}
-					continue
-				}
-				if err := tm.writeAny(w, v, props); err != nil {
-					return err
-				}
-				if err := w.WriteByte('\n'); err != nil {
-					return err
-				}
-			}
-			continue
-		}
-		if fv.Kind() == reflect.Map {
-			// Map fields are rendered as a repeated struct with key/value fields.
-			keys := fv.MapKeys()
-			sort.Sort(mapKeys(keys))
-			for _, key := range keys {
-				val := fv.MapIndex(key)
-				if err := writeName(w, props); err != nil {
-					return err
-				}
-				if !w.compact {
-					if err := w.WriteByte(' '); err != nil {
-						return err
-					}
-				}
-				// open struct
-				if err := w.WriteByte('<'); err != nil {
-					return err
-				}
-				if !w.compact {
-					if err := w.WriteByte('\n'); err != nil {
-						return err
-					}
-				}
-				w.indent()
-				// key
-				if _, err := w.WriteString("key:"); err != nil {
-					return err
-				}
-				if !w.compact {
-					if err := w.WriteByte(' '); err != nil {
-						return err
-					}
-				}
-				if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
-					return err
-				}
-				if err := w.WriteByte('\n'); err != nil {
-					return err
-				}
-				// nil values aren't legal, but we can avoid panicking because of them.
-				if val.Kind() != reflect.Ptr || !val.IsNil() {
-					// value
-					if _, err := w.WriteString("value:"); err != nil {
-						return err
-					}
-					if !w.compact {
-						if err := w.WriteByte(' '); err != nil {
-							return err
-						}
-					}
-					if err := tm.writeAny(w, val, props.MapValProp); err != nil {
-						return err
-					}
-					if err := w.WriteByte('\n'); err != nil {
-						return err
-					}
-				}
-				// close struct
-				w.unindent()
-				if err := w.WriteByte('>'); err != nil {
-					return err
-				}
-				if err := w.WriteByte('\n'); err != nil {
-					return err
-				}
-			}
-			continue
-		}
-		if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
-			// empty bytes field
-			continue
-		}
-		if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
-			// proto3 non-repeated scalar field; skip if zero value
-			if isProto3Zero(fv) {
-				continue
-			}
-		}
-
-		if fv.Kind() == reflect.Interface {
-			// Check if it is a oneof.
-			if st.Field(i).Tag.Get("protobuf_oneof") != "" {
-				// fv is nil, or holds a pointer to generated struct.
-				// That generated struct has exactly one field,
-				// which has a protobuf struct tag.
-				if fv.IsNil() {
-					continue
-				}
-				inner := fv.Elem().Elem() // interface -> *T -> T
-				tag := inner.Type().Field(0).Tag.Get("protobuf")
-				props = new(Properties) // Overwrite the outer props var, but not its pointee.
-				props.Parse(tag)
-				// Write the value in the oneof, not the oneof itself.
-				fv = inner.Field(0)
-
-				// Special case to cope with malformed messages gracefully:
-				// If the value in the oneof is a nil pointer, don't panic
-				// in writeAny.
-				if fv.Kind() == reflect.Ptr && fv.IsNil() {
-					// Use errors.New so writeAny won't render quotes.
-					msg := errors.New("/* nil */")
-					fv = reflect.ValueOf(&msg).Elem()
-				}
-			}
-		}
-
-		if err := writeName(w, props); err != nil {
-			return err
-		}
-		if !w.compact {
-			if err := w.WriteByte(' '); err != nil {
-				return err
-			}
-		}
-
-		// Enums have a String method, so writeAny will work fine.
-		if err := tm.writeAny(w, fv, props); err != nil {
-			return err
-		}
-
-		if err := w.WriteByte('\n'); err != nil {
-			return err
-		}
-	}
-
-	// Extensions (the XXX_extensions field).
-	pv := sv.Addr()
-	if _, err := extendable(pv.Interface()); err == nil {
-		if err := tm.writeExtensions(w, pv); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-// writeAny writes an arbitrary field.
-func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
-	v = reflect.Indirect(v)
-
-	// Floats have special cases.
-	if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
-		x := v.Float()
-		var b []byte
-		switch {
-		case math.IsInf(x, 1):
-			b = posInf
-		case math.IsInf(x, -1):
-			b = negInf
-		case math.IsNaN(x):
-			b = nan
-		}
-		if b != nil {
-			_, err := w.Write(b)
-			return err
-		}
-		// Other values are handled below.
-	}
-
-	// We don't attempt to serialise every possible value type; only those
-	// that can occur in protocol buffers.
-	switch v.Kind() {
-	case reflect.Slice:
-		// Should only be a []byte; repeated fields are handled in writeStruct.
-		if err := writeString(w, string(v.Bytes())); err != nil {
-			return err
-		}
-	case reflect.String:
-		if err := writeString(w, v.String()); err != nil {
-			return err
-		}
-	case reflect.Struct:
-		// Required/optional group/message.
-		var bra, ket byte = '<', '>'
-		if props != nil && props.Wire == "group" {
-			bra, ket = '{', '}'
-		}
-		if err := w.WriteByte(bra); err != nil {
-			return err
-		}
-		if !w.compact {
-			if err := w.WriteByte('\n'); err != nil {
-				return err
-			}
-		}
-		w.indent()
-		if v.CanAddr() {
-			// Calling v.Interface on a struct causes the reflect package to
-			// copy the entire struct. This is racy with the new Marshaler
-			// since we atomically update the XXX_sizecache.
-			//
-			// Thus, we retrieve a pointer to the struct if possible to avoid
-			// a race since v.Interface on the pointer doesn't copy the struct.
-			//
-			// If v is not addressable, then we are not worried about a race
-			// since it implies that the binary Marshaler cannot possibly be
-			// mutating this value.
-			v = v.Addr()
-		}
-		if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
-			text, err := etm.MarshalText()
-			if err != nil {
-				return err
-			}
-			if _, err = w.Write(text); err != nil {
-				return err
-			}
-		} else {
-			if v.Kind() == reflect.Ptr {
-				v = v.Elem()
-			}
-			if err := tm.writeStruct(w, v); err != nil {
-				return err
-			}
-		}
-		w.unindent()
-		if err := w.WriteByte(ket); err != nil {
-			return err
-		}
-	default:
-		_, err := fmt.Fprint(w, v.Interface())
-		return err
-	}
-	return nil
-}
-
-// equivalent to C's isprint.
-func isprint(c byte) bool {
-	return c >= 0x20 && c < 0x7f
-}
-
-// writeString writes a string in the protocol buffer text format.
-// It is similar to strconv.Quote except we don't use Go escape sequences,
-// we treat the string as a byte sequence, and we use octal escapes.
-// These differences are to maintain interoperability with the other
-// languages' implementations of the text format.
-func writeString(w *textWriter, s string) error {
-	// use WriteByte here to get any needed indent
-	if err := w.WriteByte('"'); err != nil {
-		return err
-	}
-	// Loop over the bytes, not the runes.
-	for i := 0; i < len(s); i++ {
-		var err error
-		// Divergence from C++: we don't escape apostrophes.
-		// There's no need to escape them, and the C++ parser
-		// copes with a naked apostrophe.
-		switch c := s[i]; c {
-		case '\n':
-			_, err = w.w.Write(backslashN)
-		case '\r':
-			_, err = w.w.Write(backslashR)
-		case '\t':
-			_, err = w.w.Write(backslashT)
-		case '"':
-			_, err = w.w.Write(backslashDQ)
-		case '\\':
-			_, err = w.w.Write(backslashBS)
-		default:
-			if isprint(c) {
-				err = w.w.WriteByte(c)
-			} else {
-				_, err = fmt.Fprintf(w.w, "\\%03o", c)
-			}
-		}
-		if err != nil {
-			return err
-		}
-	}
-	return w.WriteByte('"')
-}
-
-func writeUnknownStruct(w *textWriter, data []byte) (err error) {
-	if !w.compact {
-		if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
-			return err
-		}
-	}
-	b := NewBuffer(data)
-	for b.index < len(b.buf) {
-		x, err := b.DecodeVarint()
-		if err != nil {
-			_, err := fmt.Fprintf(w, "/* %v */\n", err)
-			return err
-		}
-		wire, tag := x&7, x>>3
-		if wire == WireEndGroup {
-			w.unindent()
-			if _, err := w.Write(endBraceNewline); err != nil {
-				return err
-			}
-			continue
-		}
-		if _, err := fmt.Fprint(w, tag); err != nil {
-			return err
-		}
-		if wire != WireStartGroup {
-			if err := w.WriteByte(':'); err != nil {
-				return err
-			}
-		}
-		if !w.compact || wire == WireStartGroup {
-			if err := w.WriteByte(' '); err != nil {
-				return err
-			}
-		}
-		switch wire {
-		case WireBytes:
-			buf, e := b.DecodeRawBytes(false)
-			if e == nil {
-				_, err = fmt.Fprintf(w, "%q", buf)
-			} else {
-				_, err = fmt.Fprintf(w, "/* %v */", e)
-			}
-		case WireFixed32:
-			x, err = b.DecodeFixed32()
-			err = writeUnknownInt(w, x, err)
-		case WireFixed64:
-			x, err = b.DecodeFixed64()
-			err = writeUnknownInt(w, x, err)
-		case WireStartGroup:
-			err = w.WriteByte('{')
-			w.indent()
-		case WireVarint:
-			x, err = b.DecodeVarint()
-			err = writeUnknownInt(w, x, err)
-		default:
-			_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
-		}
-		if err != nil {
-			return err
-		}
-		if err = w.WriteByte('\n'); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func writeUnknownInt(w *textWriter, x uint64, err error) error {
-	if err == nil {
-		_, err = fmt.Fprint(w, x)
-	} else {
-		_, err = fmt.Fprintf(w, "/* %v */", err)
-	}
-	return err
-}
-
-type int32Slice []int32
-
-func (s int32Slice) Len() int           { return len(s) }
-func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
-func (s int32Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
-
-// writeExtensions writes all the extensions in pv.
-// pv is assumed to be a pointer to a protocol message struct that is extendable.
-func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
-	emap := extensionMaps[pv.Type().Elem()]
-	ep, _ := extendable(pv.Interface())
-
-	// Order the extensions by ID.
-	// This isn't strictly necessary, but it will give us
-	// canonical output, which will also make testing easier.
-	m, mu := ep.extensionsRead()
-	if m == nil {
-		return nil
-	}
-	mu.Lock()
-	ids := make([]int32, 0, len(m))
-	for id := range m {
-		ids = append(ids, id)
-	}
-	sort.Sort(int32Slice(ids))
-	mu.Unlock()
-
-	for _, extNum := range ids {
-		ext := m[extNum]
-		var desc *ExtensionDesc
-		if emap != nil {
-			desc = emap[extNum]
-		}
-		if desc == nil {
-			// Unknown extension.
-			if err := writeUnknownStruct(w, ext.enc); err != nil {
-				return err
-			}
-			continue
-		}
-
-		pb, err := GetExtension(ep, desc)
-		if err != nil {
-			return fmt.Errorf("failed getting extension: %v", err)
-		}
-
-		// Repeated extensions will appear as a slice.
-		if !desc.repeated() {
-			if err := tm.writeExtension(w, desc.Name, pb); err != nil {
-				return err
-			}
-		} else {
-			v := reflect.ValueOf(pb)
-			for i := 0; i < v.Len(); i++ {
-				if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
-					return err
-				}
-			}
-		}
-	}
-	return nil
-}
-
-func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
-	if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
-		return err
-	}
-	if !w.compact {
-		if err := w.WriteByte(' '); err != nil {
-			return err
-		}
-	}
-	if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
-		return err
-	}
-	if err := w.WriteByte('\n'); err != nil {
-		return err
-	}
-	return nil
-}
-
-func (w *textWriter) writeIndent() {
-	if !w.complete {
-		return
-	}
-	remain := w.ind * 2
-	for remain > 0 {
-		n := remain
-		if n > len(spaces) {
-			n = len(spaces)
-		}
-		w.w.Write(spaces[:n])
-		remain -= n
-	}
-	w.complete = false
-}
-
-// TextMarshaler is a configurable text format marshaler.
-type TextMarshaler struct {
-	Compact   bool // use compact text format (one line).
-	ExpandAny bool // expand google.protobuf.Any messages of known types
-}
-
-// Marshal writes a given protocol buffer in text format.
-// The only errors returned are from w.
-func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
-	val := reflect.ValueOf(pb)
-	if pb == nil || val.IsNil() {
-		w.Write([]byte("<nil>"))
-		return nil
-	}
-	var bw *bufio.Writer
-	ww, ok := w.(writer)
-	if !ok {
-		bw = bufio.NewWriter(w)
-		ww = bw
-	}
-	aw := &textWriter{
-		w:        ww,
-		complete: true,
-		compact:  tm.Compact,
-	}
-
-	if etm, ok := pb.(encoding.TextMarshaler); ok {
-		text, err := etm.MarshalText()
-		if err != nil {
-			return err
-		}
-		if _, err = aw.Write(text); err != nil {
-			return err
-		}
-		if bw != nil {
-			return bw.Flush()
-		}
-		return nil
-	}
-	// Dereference the received pointer so we don't have outer < and >.
-	v := reflect.Indirect(val)
-	if err := tm.writeStruct(aw, v); err != nil {
-		return err
-	}
-	if bw != nil {
-		return bw.Flush()
-	}
-	return nil
-}
-
-// Text is the same as Marshal, but returns the string directly.
-func (tm *TextMarshaler) Text(pb Message) string {
-	var buf bytes.Buffer
-	tm.Marshal(&buf, pb)
-	return buf.String()
-}
-
-var (
-	defaultTextMarshaler = TextMarshaler{}
-	compactTextMarshaler = TextMarshaler{Compact: true}
-)
-
-// TODO: consider removing some of the Marshal functions below.
-
-// MarshalText writes a given protocol buffer in text format.
-// The only errors returned are from w.
-func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
-
-// MarshalTextString is the same as MarshalText, but returns the string directly.
-func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
-
-// CompactText writes a given protocol buffer in compact text format (one line).
-func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
-
-// CompactTextString is the same as CompactText, but returns the string directly.
-func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go
new file mode 100644
index 0000000..47eb3e4
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_decode.go
@@ -0,0 +1,801 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+	"encoding"
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+
+	"google.golang.org/protobuf/encoding/prototext"
+	protoV2 "google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const wrapTextUnmarshalV2 = false
+
+// ParseError is returned by UnmarshalText.
+type ParseError struct {
+	Message string
+
+	// Deprecated: Do not use.
+	Line, Offset int
+}
+
+func (e *ParseError) Error() string {
+	if wrapTextUnmarshalV2 {
+		return e.Message
+	}
+	if e.Line == 1 {
+		return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message)
+	}
+	return fmt.Sprintf("line %d: %v", e.Line, e.Message)
+}
+
+// UnmarshalText parses a proto text formatted string into m.
+func UnmarshalText(s string, m Message) error {
+	if u, ok := m.(encoding.TextUnmarshaler); ok {
+		return u.UnmarshalText([]byte(s))
+	}
+
+	m.Reset()
+	mi := MessageV2(m)
+
+	if wrapTextUnmarshalV2 {
+		err := prototext.UnmarshalOptions{
+			AllowPartial: true,
+		}.Unmarshal([]byte(s), mi)
+		if err != nil {
+			return &ParseError{Message: err.Error()}
+		}
+		return checkRequiredNotSet(mi)
+	} else {
+		if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil {
+			return err
+		}
+		return checkRequiredNotSet(mi)
+	}
+}
+
+type textParser struct {
+	s            string // remaining input
+	done         bool   // whether the parsing is finished (success or error)
+	backed       bool   // whether back() was called
+	offset, line int
+	cur          token
+}
+
+type token struct {
+	value    string
+	err      *ParseError
+	line     int    // line number
+	offset   int    // byte number from start of input, not start of line
+	unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func newTextParser(s string) *textParser {
+	p := new(textParser)
+	p.s = s
+	p.line = 1
+	p.cur.line = 1
+	return p
+}
+
+func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) {
+	md := m.Descriptor()
+	fds := md.Fields()
+
+	// A struct is a sequence of "name: value", terminated by one of
+	// '>' or '}', or the end of the input.  A name may also be
+	// "[extension]" or "[type/url]".
+	//
+	// The whole struct can also be an expanded Any message, like:
+	// [type/url] < ... struct contents ... >
+	seen := make(map[protoreflect.FieldNumber]bool)
+	for {
+		tok := p.next()
+		if tok.err != nil {
+			return tok.err
+		}
+		if tok.value == terminator {
+			break
+		}
+		if tok.value == "[" {
+			if err := p.unmarshalExtensionOrAny(m, seen); err != nil {
+				return err
+			}
+			continue
+		}
+
+		// This is a normal, non-extension field.
+		name := protoreflect.Name(tok.value)
+		fd := fds.ByName(name)
+		switch {
+		case fd == nil:
+			gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name))))
+			if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name {
+				fd = gd
+			}
+		case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name:
+			fd = nil
+		case fd.IsWeak() && fd.Message().IsPlaceholder():
+			fd = nil
+		}
+		if fd == nil {
+			typeName := string(md.FullName())
+			if m, ok := m.Interface().(Message); ok {
+				t := reflect.TypeOf(m)
+				if t.Kind() == reflect.Ptr {
+					typeName = t.Elem().String()
+				}
+			}
+			return p.errorf("unknown field name %q in %v", name, typeName)
+		}
+		if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil {
+			return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name())
+		}
+		if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] {
+			return p.errorf("non-repeated field %q was repeated", fd.Name())
+		}
+		seen[fd.Number()] = true
+
+		// Consume any colon.
+		if err := p.checkForColon(fd); err != nil {
+			return err
+		}
+
+		// Parse into the field.
+		v := m.Get(fd)
+		if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
+			v = m.Mutable(fd)
+		}
+		if v, err = p.unmarshalValue(v, fd); err != nil {
+			return err
+		}
+		m.Set(fd, v)
+
+		if err := p.consumeOptionalSeparator(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error {
+	name, err := p.consumeExtensionOrAnyName()
+	if err != nil {
+		return err
+	}
+
+	// If it contains a slash, it's an Any type URL.
+	if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 {
+		tok := p.next()
+		if tok.err != nil {
+			return tok.err
+		}
+		// consume an optional colon
+		if tok.value == ":" {
+			tok = p.next()
+			if tok.err != nil {
+				return tok.err
+			}
+		}
+
+		var terminator string
+		switch tok.value {
+		case "<":
+			terminator = ">"
+		case "{":
+			terminator = "}"
+		default:
+			return p.errorf("expected '{' or '<', found %q", tok.value)
+		}
+
+		mt, err := protoregistry.GlobalTypes.FindMessageByURL(name)
+		if err != nil {
+			return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):])
+		}
+		m2 := mt.New()
+		if err := p.unmarshalMessage(m2, terminator); err != nil {
+			return err
+		}
+		b, err := protoV2.Marshal(m2.Interface())
+		if err != nil {
+			return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err)
+		}
+
+		urlFD := m.Descriptor().Fields().ByName("type_url")
+		valFD := m.Descriptor().Fields().ByName("value")
+		if seen[urlFD.Number()] {
+			return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name())
+		}
+		if seen[valFD.Number()] {
+			return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name())
+		}
+		m.Set(urlFD, protoreflect.ValueOfString(name))
+		m.Set(valFD, protoreflect.ValueOfBytes(b))
+		seen[urlFD.Number()] = true
+		seen[valFD.Number()] = true
+		return nil
+	}
+
+	xname := protoreflect.FullName(name)
+	xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
+	if xt == nil && isMessageSet(m.Descriptor()) {
+		xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
+	}
+	if xt == nil {
+		return p.errorf("unrecognized extension %q", name)
+	}
+	fd := xt.TypeDescriptor()
+	if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
+		return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName())
+	}
+
+	if err := p.checkForColon(fd); err != nil {
+		return err
+	}
+
+	v := m.Get(fd)
+	if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
+		v = m.Mutable(fd)
+	}
+	v, err = p.unmarshalValue(v, fd)
+	if err != nil {
+		return err
+	}
+	m.Set(fd, v)
+	return p.consumeOptionalSeparator()
+}
+
+func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+	tok := p.next()
+	if tok.err != nil {
+		return v, tok.err
+	}
+	if tok.value == "" {
+		return v, p.errorf("unexpected EOF")
+	}
+
+	switch {
+	case fd.IsList():
+		lv := v.List()
+		var err error
+		if tok.value == "[" {
+			// Repeated field with list notation, like [1,2,3].
+			for {
+				vv := lv.NewElement()
+				vv, err = p.unmarshalSingularValue(vv, fd)
+				if err != nil {
+					return v, err
+				}
+				lv.Append(vv)
+
+				tok := p.next()
+				if tok.err != nil {
+					return v, tok.err
+				}
+				if tok.value == "]" {
+					break
+				}
+				if tok.value != "," {
+					return v, p.errorf("Expected ']' or ',' found %q", tok.value)
+				}
+			}
+			return v, nil
+		}
+
+		// One value of the repeated field.
+		p.back()
+		vv := lv.NewElement()
+		vv, err = p.unmarshalSingularValue(vv, fd)
+		if err != nil {
+			return v, err
+		}
+		lv.Append(vv)
+		return v, nil
+	case fd.IsMap():
+		// The map entry should be this sequence of tokens:
+		//	< key : KEY value : VALUE >
+		// However, implementations may omit key or value, and technically
+		// we should support them in any order.
+		var terminator string
+		switch tok.value {
+		case "<":
+			terminator = ">"
+		case "{":
+			terminator = "}"
+		default:
+			return v, p.errorf("expected '{' or '<', found %q", tok.value)
+		}
+
+		keyFD := fd.MapKey()
+		valFD := fd.MapValue()
+
+		mv := v.Map()
+		kv := keyFD.Default()
+		vv := mv.NewValue()
+		for {
+			tok := p.next()
+			if tok.err != nil {
+				return v, tok.err
+			}
+			if tok.value == terminator {
+				break
+			}
+			var err error
+			switch tok.value {
+			case "key":
+				if err := p.consumeToken(":"); err != nil {
+					return v, err
+				}
+				if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil {
+					return v, err
+				}
+				if err := p.consumeOptionalSeparator(); err != nil {
+					return v, err
+				}
+			case "value":
+				if err := p.checkForColon(valFD); err != nil {
+					return v, err
+				}
+				if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil {
+					return v, err
+				}
+				if err := p.consumeOptionalSeparator(); err != nil {
+					return v, err
+				}
+			default:
+				p.back()
+				return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+			}
+		}
+		mv.Set(kv.MapKey(), vv)
+		return v, nil
+	default:
+		p.back()
+		return p.unmarshalSingularValue(v, fd)
+	}
+}
+
+func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+	tok := p.next()
+	if tok.err != nil {
+		return v, tok.err
+	}
+	if tok.value == "" {
+		return v, p.errorf("unexpected EOF")
+	}
+
+	switch fd.Kind() {
+	case protoreflect.BoolKind:
+		switch tok.value {
+		case "true", "1", "t", "True":
+			return protoreflect.ValueOfBool(true), nil
+		case "false", "0", "f", "False":
+			return protoreflect.ValueOfBool(false), nil
+		}
+	case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+		if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+			return protoreflect.ValueOfInt32(int32(x)), nil
+		}
+
+		// The C++ parser accepts large positive hex numbers that uses
+		// two's complement arithmetic to represent negative numbers.
+		// This feature is here for backwards compatibility with C++.
+		if strings.HasPrefix(tok.value, "0x") {
+			if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+				return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil
+			}
+		}
+	case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+		if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+			return protoreflect.ValueOfInt64(int64(x)), nil
+		}
+
+		// The C++ parser accepts large positive hex numbers that uses
+		// two's complement arithmetic to represent negative numbers.
+		// This feature is here for backwards compatibility with C++.
+		if strings.HasPrefix(tok.value, "0x") {
+			if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+				return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil
+			}
+		}
+	case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+		if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+			return protoreflect.ValueOfUint32(uint32(x)), nil
+		}
+	case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+		if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+			return protoreflect.ValueOfUint64(uint64(x)), nil
+		}
+	case protoreflect.FloatKind:
+		// Ignore 'f' for compatibility with output generated by C++,
+		// but don't remove 'f' when the value is "-inf" or "inf".
+		v := tok.value
+		if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
+			v = v[:len(v)-len("f")]
+		}
+		if x, err := strconv.ParseFloat(v, 32); err == nil {
+			return protoreflect.ValueOfFloat32(float32(x)), nil
+		}
+	case protoreflect.DoubleKind:
+		// Ignore 'f' for compatibility with output generated by C++,
+		// but don't remove 'f' when the value is "-inf" or "inf".
+		v := tok.value
+		if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
+			v = v[:len(v)-len("f")]
+		}
+		if x, err := strconv.ParseFloat(v, 64); err == nil {
+			return protoreflect.ValueOfFloat64(float64(x)), nil
+		}
+	case protoreflect.StringKind:
+		if isQuote(tok.value[0]) {
+			return protoreflect.ValueOfString(tok.unquoted), nil
+		}
+	case protoreflect.BytesKind:
+		if isQuote(tok.value[0]) {
+			return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil
+		}
+	case protoreflect.EnumKind:
+		if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+			return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil
+		}
+		vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value))
+		if vd != nil {
+			return protoreflect.ValueOfEnum(vd.Number()), nil
+		}
+	case protoreflect.MessageKind, protoreflect.GroupKind:
+		var terminator string
+		switch tok.value {
+		case "{":
+			terminator = "}"
+		case "<":
+			terminator = ">"
+		default:
+			return v, p.errorf("expected '{' or '<', found %q", tok.value)
+		}
+		err := p.unmarshalMessage(v.Message(), terminator)
+		return v, err
+	default:
+		panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
+	}
+	return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value)
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != ":" {
+		if fd.Message() == nil {
+			return p.errorf("expected ':', found %q", tok.value)
+		}
+		p.back()
+	}
+	return nil
+}
+
+// consumeExtensionOrAnyName consumes an extension name or an Any type URL and
+// the following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtensionOrAnyName() (string, error) {
+	tok := p.next()
+	if tok.err != nil {
+		return "", tok.err
+	}
+
+	// If extension name or type url is quoted, it's a single token.
+	if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+		name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+		if err != nil {
+			return "", err
+		}
+		return name, p.consumeToken("]")
+	}
+
+	// Consume everything up to "]"
+	var parts []string
+	for tok.value != "]" {
+		parts = append(parts, tok.value)
+		tok = p.next()
+		if tok.err != nil {
+			return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+		}
+		if p.done && tok.value != "]" {
+			return "", p.errorf("unclosed type_url or extension name")
+		}
+	}
+	return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in unmarshalMessage to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != ";" && tok.value != "," {
+		p.back()
+	}
+	return nil
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+	pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+	p.cur.err = pe
+	p.done = true
+	return pe
+}
+
+func (p *textParser) skipWhitespace() {
+	i := 0
+	for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+		if p.s[i] == '#' {
+			// comment; skip to end of line or input
+			for i < len(p.s) && p.s[i] != '\n' {
+				i++
+			}
+			if i == len(p.s) {
+				break
+			}
+		}
+		if p.s[i] == '\n' {
+			p.line++
+		}
+		i++
+	}
+	p.offset += i
+	p.s = p.s[i:len(p.s)]
+	if len(p.s) == 0 {
+		p.done = true
+	}
+}
+
+func (p *textParser) advance() {
+	// Skip whitespace
+	p.skipWhitespace()
+	if p.done {
+		return
+	}
+
+	// Start of non-whitespace
+	p.cur.err = nil
+	p.cur.offset, p.cur.line = p.offset, p.line
+	p.cur.unquoted = ""
+	switch p.s[0] {
+	case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+		// Single symbol
+		p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+	case '"', '\'':
+		// Quoted string
+		i := 1
+		for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+			if p.s[i] == '\\' && i+1 < len(p.s) {
+				// skip escaped char
+				i++
+			}
+			i++
+		}
+		if i >= len(p.s) || p.s[i] != p.s[0] {
+			p.errorf("unmatched quote")
+			return
+		}
+		unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+		if err != nil {
+			p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+			return
+		}
+		p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+		p.cur.unquoted = unq
+	default:
+		i := 0
+		for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+			i++
+		}
+		if i == 0 {
+			p.errorf("unexpected byte %#x", p.s[0])
+			return
+		}
+		p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+	}
+	p.offset += len(p.cur.value)
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+	if p.backed || p.done {
+		p.backed = false
+		return &p.cur
+	}
+	p.advance()
+	if p.done {
+		p.cur.value = ""
+	} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+		// Look for multiple quoted strings separated by whitespace,
+		// and concatenate them.
+		cat := p.cur
+		for {
+			p.skipWhitespace()
+			if p.done || !isQuote(p.s[0]) {
+				break
+			}
+			p.advance()
+			if p.cur.err != nil {
+				return &p.cur
+			}
+			cat.value += " " + p.cur.value
+			cat.unquoted += p.cur.unquoted
+		}
+		p.done = false // parser may have seen EOF, but we want to return cat
+		p.cur = cat
+	}
+	return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != s {
+		p.back()
+		return p.errorf("expected %q, found %q", s, tok.value)
+	}
+	return nil
+}
+
+var errBadUTF8 = errors.New("proto: bad UTF-8")
+
+func unquoteC(s string, quote rune) (string, error) {
+	// This is based on C++'s tokenizer.cc.
+	// Despite its name, this is *not* parsing C syntax.
+	// For instance, "\0" is an invalid quoted string.
+
+	// Avoid allocation in trivial cases.
+	simple := true
+	for _, r := range s {
+		if r == '\\' || r == quote {
+			simple = false
+			break
+		}
+	}
+	if simple {
+		return s, nil
+	}
+
+	buf := make([]byte, 0, 3*len(s)/2)
+	for len(s) > 0 {
+		r, n := utf8.DecodeRuneInString(s)
+		if r == utf8.RuneError && n == 1 {
+			return "", errBadUTF8
+		}
+		s = s[n:]
+		if r != '\\' {
+			if r < utf8.RuneSelf {
+				buf = append(buf, byte(r))
+			} else {
+				buf = append(buf, string(r)...)
+			}
+			continue
+		}
+
+		ch, tail, err := unescape(s)
+		if err != nil {
+			return "", err
+		}
+		buf = append(buf, ch...)
+		s = tail
+	}
+	return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+	r, n := utf8.DecodeRuneInString(s)
+	if r == utf8.RuneError && n == 1 {
+		return "", "", errBadUTF8
+	}
+	s = s[n:]
+	switch r {
+	case 'a':
+		return "\a", s, nil
+	case 'b':
+		return "\b", s, nil
+	case 'f':
+		return "\f", s, nil
+	case 'n':
+		return "\n", s, nil
+	case 'r':
+		return "\r", s, nil
+	case 't':
+		return "\t", s, nil
+	case 'v':
+		return "\v", s, nil
+	case '?':
+		return "?", s, nil // trigraph workaround
+	case '\'', '"', '\\':
+		return string(r), s, nil
+	case '0', '1', '2', '3', '4', '5', '6', '7':
+		if len(s) < 2 {
+			return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+		}
+		ss := string(r) + s[:2]
+		s = s[2:]
+		i, err := strconv.ParseUint(ss, 8, 8)
+		if err != nil {
+			return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
+		}
+		return string([]byte{byte(i)}), s, nil
+	case 'x', 'X', 'u', 'U':
+		var n int
+		switch r {
+		case 'x', 'X':
+			n = 2
+		case 'u':
+			n = 4
+		case 'U':
+			n = 8
+		}
+		if len(s) < n {
+			return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
+		}
+		ss := s[:n]
+		s = s[n:]
+		i, err := strconv.ParseUint(ss, 16, 64)
+		if err != nil {
+			return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
+		}
+		if r == 'x' || r == 'X' {
+			return string([]byte{byte(i)}), s, nil
+		}
+		if i > utf8.MaxRune {
+			return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
+		}
+		return string(rune(i)), s, nil
+	}
+	return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+func isIdentOrNumberChar(c byte) bool {
+	switch {
+	case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+		return true
+	case '0' <= c && c <= '9':
+		return true
+	}
+	switch c {
+	case '-', '+', '.', '_':
+		return true
+	}
+	return false
+}
+
+func isWhitespace(c byte) bool {
+	switch c {
+	case ' ', '\t', '\n', '\r':
+		return true
+	}
+	return false
+}
+
+func isQuote(c byte) bool {
+	switch c {
+	case '"', '\'':
+		return true
+	}
+	return false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go
new file mode 100644
index 0000000..a31134e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_encode.go
@@ -0,0 +1,560 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+	"bytes"
+	"encoding"
+	"fmt"
+	"io"
+	"math"
+	"sort"
+	"strings"
+
+	"google.golang.org/protobuf/encoding/prototext"
+	"google.golang.org/protobuf/encoding/protowire"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const wrapTextMarshalV2 = false
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+	Compact   bool // use compact text format (one line)
+	ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes the proto text format of m to w.
+func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error {
+	b, err := tm.marshal(m)
+	if len(b) > 0 {
+		if _, err := w.Write(b); err != nil {
+			return err
+		}
+	}
+	return err
+}
+
+// Text returns a proto text formatted string of m.
+func (tm *TextMarshaler) Text(m Message) string {
+	b, _ := tm.marshal(m)
+	return string(b)
+}
+
+func (tm *TextMarshaler) marshal(m Message) ([]byte, error) {
+	mr := MessageReflect(m)
+	if mr == nil || !mr.IsValid() {
+		return []byte("<nil>"), nil
+	}
+
+	if wrapTextMarshalV2 {
+		if m, ok := m.(encoding.TextMarshaler); ok {
+			return m.MarshalText()
+		}
+
+		opts := prototext.MarshalOptions{
+			AllowPartial: true,
+			EmitUnknown:  true,
+		}
+		if !tm.Compact {
+			opts.Indent = "  "
+		}
+		if !tm.ExpandAny {
+			opts.Resolver = (*protoregistry.Types)(nil)
+		}
+		return opts.Marshal(mr.Interface())
+	} else {
+		w := &textWriter{
+			compact:   tm.Compact,
+			expandAny: tm.ExpandAny,
+			complete:  true,
+		}
+
+		if m, ok := m.(encoding.TextMarshaler); ok {
+			b, err := m.MarshalText()
+			if err != nil {
+				return nil, err
+			}
+			w.Write(b)
+			return w.buf, nil
+		}
+
+		err := w.writeMessage(mr)
+		return w.buf, err
+	}
+}
+
+var (
+	defaultTextMarshaler = TextMarshaler{}
+	compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// MarshalText writes the proto text format of m to w.
+func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) }
+
+// MarshalTextString returns a proto text formatted string of m.
+func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) }
+
+// CompactText writes the compact proto text format of m to w.
+func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) }
+
+// CompactTextString returns a compact proto text formatted string of m.
+func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) }
+
+var (
+	newline         = []byte("\n")
+	endBraceNewline = []byte("}\n")
+	posInf          = []byte("inf")
+	negInf          = []byte("-inf")
+	nan             = []byte("nan")
+)
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+	compact   bool // same as TextMarshaler.Compact
+	expandAny bool // same as TextMarshaler.ExpandAny
+	complete  bool // whether the current position is a complete line
+	indent    int  // indentation level; never negative
+	buf       []byte
+}
+
+func (w *textWriter) Write(p []byte) (n int, _ error) {
+	newlines := bytes.Count(p, newline)
+	if newlines == 0 {
+		if !w.compact && w.complete {
+			w.writeIndent()
+		}
+		w.buf = append(w.buf, p...)
+		w.complete = false
+		return len(p), nil
+	}
+
+	frags := bytes.SplitN(p, newline, newlines+1)
+	if w.compact {
+		for i, frag := range frags {
+			if i > 0 {
+				w.buf = append(w.buf, ' ')
+				n++
+			}
+			w.buf = append(w.buf, frag...)
+			n += len(frag)
+		}
+		return n, nil
+	}
+
+	for i, frag := range frags {
+		if w.complete {
+			w.writeIndent()
+		}
+		w.buf = append(w.buf, frag...)
+		n += len(frag)
+		if i+1 < len(frags) {
+			w.buf = append(w.buf, '\n')
+			n++
+		}
+	}
+	w.complete = len(frags[len(frags)-1]) == 0
+	return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+	if w.compact && c == '\n' {
+		c = ' '
+	}
+	if !w.compact && w.complete {
+		w.writeIndent()
+	}
+	w.buf = append(w.buf, c)
+	w.complete = c == '\n'
+	return nil
+}
+
+func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) {
+	if !w.compact && w.complete {
+		w.writeIndent()
+	}
+	w.complete = false
+
+	if fd.Kind() != protoreflect.GroupKind {
+		w.buf = append(w.buf, fd.Name()...)
+		w.WriteByte(':')
+	} else {
+		// Use message type name for group field name.
+		w.buf = append(w.buf, fd.Message().Name()...)
+	}
+
+	if !w.compact {
+		w.WriteByte(' ')
+	}
+}
+
+func requiresQuotes(u string) bool {
+	// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+	for _, ch := range u {
+		switch {
+		case ch == '.' || ch == '/' || ch == '_':
+			continue
+		case '0' <= ch && ch <= '9':
+			continue
+		case 'A' <= ch && ch <= 'Z':
+			continue
+		case 'a' <= ch && ch <= 'z':
+			continue
+		default:
+			return true
+		}
+	}
+	return false
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) {
+	md := m.Descriptor()
+	fdURL := md.Fields().ByName("type_url")
+	fdVal := md.Fields().ByName("value")
+
+	url := m.Get(fdURL).String()
+	mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
+	if err != nil {
+		return false, nil
+	}
+
+	b := m.Get(fdVal).Bytes()
+	m2 := mt.New()
+	if err := proto.Unmarshal(b, m2.Interface()); err != nil {
+		return false, nil
+	}
+	w.Write([]byte("["))
+	if requiresQuotes(url) {
+		w.writeQuotedString(url)
+	} else {
+		w.Write([]byte(url))
+	}
+	if w.compact {
+		w.Write([]byte("]:<"))
+	} else {
+		w.Write([]byte("]: <\n"))
+		w.indent++
+	}
+	if err := w.writeMessage(m2); err != nil {
+		return true, err
+	}
+	if w.compact {
+		w.Write([]byte("> "))
+	} else {
+		w.indent--
+		w.Write([]byte(">\n"))
+	}
+	return true, nil
+}
+
+func (w *textWriter) writeMessage(m protoreflect.Message) error {
+	md := m.Descriptor()
+	if w.expandAny && md.FullName() == "google.protobuf.Any" {
+		if canExpand, err := w.writeProto3Any(m); canExpand {
+			return err
+		}
+	}
+
+	fds := md.Fields()
+	for i := 0; i < fds.Len(); {
+		fd := fds.Get(i)
+		if od := fd.ContainingOneof(); od != nil {
+			fd = m.WhichOneof(od)
+			i += od.Fields().Len()
+		} else {
+			i++
+		}
+		if fd == nil || !m.Has(fd) {
+			continue
+		}
+
+		switch {
+		case fd.IsList():
+			lv := m.Get(fd).List()
+			for j := 0; j < lv.Len(); j++ {
+				w.writeName(fd)
+				v := lv.Get(j)
+				if err := w.writeSingularValue(v, fd); err != nil {
+					return err
+				}
+				w.WriteByte('\n')
+			}
+		case fd.IsMap():
+			kfd := fd.MapKey()
+			vfd := fd.MapValue()
+			mv := m.Get(fd).Map()
+
+			type entry struct{ key, val protoreflect.Value }
+			var entries []entry
+			mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
+				entries = append(entries, entry{k.Value(), v})
+				return true
+			})
+			sort.Slice(entries, func(i, j int) bool {
+				switch kfd.Kind() {
+				case protoreflect.BoolKind:
+					return !entries[i].key.Bool() && entries[j].key.Bool()
+				case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+					return entries[i].key.Int() < entries[j].key.Int()
+				case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+					return entries[i].key.Uint() < entries[j].key.Uint()
+				case protoreflect.StringKind:
+					return entries[i].key.String() < entries[j].key.String()
+				default:
+					panic("invalid kind")
+				}
+			})
+			for _, entry := range entries {
+				w.writeName(fd)
+				w.WriteByte('<')
+				if !w.compact {
+					w.WriteByte('\n')
+				}
+				w.indent++
+				w.writeName(kfd)
+				if err := w.writeSingularValue(entry.key, kfd); err != nil {
+					return err
+				}
+				w.WriteByte('\n')
+				w.writeName(vfd)
+				if err := w.writeSingularValue(entry.val, vfd); err != nil {
+					return err
+				}
+				w.WriteByte('\n')
+				w.indent--
+				w.WriteByte('>')
+				w.WriteByte('\n')
+			}
+		default:
+			w.writeName(fd)
+			if err := w.writeSingularValue(m.Get(fd), fd); err != nil {
+				return err
+			}
+			w.WriteByte('\n')
+		}
+	}
+
+	if b := m.GetUnknown(); len(b) > 0 {
+		w.writeUnknownFields(b)
+	}
+	return w.writeExtensions(m)
+}
+
+func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
+	switch fd.Kind() {
+	case protoreflect.FloatKind, protoreflect.DoubleKind:
+		switch vf := v.Float(); {
+		case math.IsInf(vf, +1):
+			w.Write(posInf)
+		case math.IsInf(vf, -1):
+			w.Write(negInf)
+		case math.IsNaN(vf):
+			w.Write(nan)
+		default:
+			fmt.Fprint(w, v.Interface())
+		}
+	case protoreflect.StringKind:
+		// NOTE: This does not validate UTF-8 for historical reasons.
+		w.writeQuotedString(string(v.String()))
+	case protoreflect.BytesKind:
+		w.writeQuotedString(string(v.Bytes()))
+	case protoreflect.MessageKind, protoreflect.GroupKind:
+		var bra, ket byte = '<', '>'
+		if fd.Kind() == protoreflect.GroupKind {
+			bra, ket = '{', '}'
+		}
+		w.WriteByte(bra)
+		if !w.compact {
+			w.WriteByte('\n')
+		}
+		w.indent++
+		m := v.Message()
+		if m2, ok := m.Interface().(encoding.TextMarshaler); ok {
+			b, err := m2.MarshalText()
+			if err != nil {
+				return err
+			}
+			w.Write(b)
+		} else {
+			w.writeMessage(m)
+		}
+		w.indent--
+		w.WriteByte(ket)
+	case protoreflect.EnumKind:
+		if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil {
+			fmt.Fprint(w, ev.Name())
+		} else {
+			fmt.Fprint(w, v.Enum())
+		}
+	default:
+		fmt.Fprint(w, v.Interface())
+	}
+	return nil
+}
+
+// writeQuotedString writes a quoted string in the protocol buffer text format.
+func (w *textWriter) writeQuotedString(s string) {
+	w.WriteByte('"')
+	for i := 0; i < len(s); i++ {
+		switch c := s[i]; c {
+		case '\n':
+			w.buf = append(w.buf, `\n`...)
+		case '\r':
+			w.buf = append(w.buf, `\r`...)
+		case '\t':
+			w.buf = append(w.buf, `\t`...)
+		case '"':
+			w.buf = append(w.buf, `\"`...)
+		case '\\':
+			w.buf = append(w.buf, `\\`...)
+		default:
+			if isPrint := c >= 0x20 && c < 0x7f; isPrint {
+				w.buf = append(w.buf, c)
+			} else {
+				w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...)
+			}
+		}
+	}
+	w.WriteByte('"')
+}
+
+func (w *textWriter) writeUnknownFields(b []byte) {
+	if !w.compact {
+		fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b))
+	}
+
+	for len(b) > 0 {
+		num, wtyp, n := protowire.ConsumeTag(b)
+		if n < 0 {
+			return
+		}
+		b = b[n:]
+
+		if wtyp == protowire.EndGroupType {
+			w.indent--
+			w.Write(endBraceNewline)
+			continue
+		}
+		fmt.Fprint(w, num)
+		if wtyp != protowire.StartGroupType {
+			w.WriteByte(':')
+		}
+		if !w.compact || wtyp == protowire.StartGroupType {
+			w.WriteByte(' ')
+		}
+		switch wtyp {
+		case protowire.VarintType:
+			v, n := protowire.ConsumeVarint(b)
+			if n < 0 {
+				return
+			}
+			b = b[n:]
+			fmt.Fprint(w, v)
+		case protowire.Fixed32Type:
+			v, n := protowire.ConsumeFixed32(b)
+			if n < 0 {
+				return
+			}
+			b = b[n:]
+			fmt.Fprint(w, v)
+		case protowire.Fixed64Type:
+			v, n := protowire.ConsumeFixed64(b)
+			if n < 0 {
+				return
+			}
+			b = b[n:]
+			fmt.Fprint(w, v)
+		case protowire.BytesType:
+			v, n := protowire.ConsumeBytes(b)
+			if n < 0 {
+				return
+			}
+			b = b[n:]
+			fmt.Fprintf(w, "%q", v)
+		case protowire.StartGroupType:
+			w.WriteByte('{')
+			w.indent++
+		default:
+			fmt.Fprintf(w, "/* unknown wire type %d */", wtyp)
+		}
+		w.WriteByte('\n')
+	}
+}
+
+// writeExtensions writes all the extensions in m.
+func (w *textWriter) writeExtensions(m protoreflect.Message) error {
+	md := m.Descriptor()
+	if md.ExtensionRanges().Len() == 0 {
+		return nil
+	}
+
+	type ext struct {
+		desc protoreflect.FieldDescriptor
+		val  protoreflect.Value
+	}
+	var exts []ext
+	m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+		if fd.IsExtension() {
+			exts = append(exts, ext{fd, v})
+		}
+		return true
+	})
+	sort.Slice(exts, func(i, j int) bool {
+		return exts[i].desc.Number() < exts[j].desc.Number()
+	})
+
+	for _, ext := range exts {
+		// For message set, use the name of the message as the extension name.
+		name := string(ext.desc.FullName())
+		if isMessageSet(ext.desc.ContainingMessage()) {
+			name = strings.TrimSuffix(name, ".message_set_extension")
+		}
+
+		if !ext.desc.IsList() {
+			if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil {
+				return err
+			}
+		} else {
+			lv := ext.val.List()
+			for i := 0; i < lv.Len(); i++ {
+				if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil {
+					return err
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
+	fmt.Fprintf(w, "[%s]:", name)
+	if !w.compact {
+		w.WriteByte(' ')
+	}
+	if err := w.writeSingularValue(v, fd); err != nil {
+		return err
+	}
+	w.WriteByte('\n')
+	return nil
+}
+
+func (w *textWriter) writeIndent() {
+	if !w.complete {
+		return
+	}
+	for i := 0; i < w.indent*2; i++ {
+		w.buf = append(w.buf, ' ')
+	}
+	w.complete = false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
deleted file mode 100644
index bb55a3a..0000000
--- a/vendor/github.com/golang/protobuf/proto/text_parser.go
+++ /dev/null
@@ -1,880 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-// Functions for parsing the Text protocol buffer format.
-// TODO: message sets.
-
-import (
-	"encoding"
-	"errors"
-	"fmt"
-	"reflect"
-	"strconv"
-	"strings"
-	"unicode/utf8"
-)
-
-// Error string emitted when deserializing Any and fields are already set
-const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
-
-type ParseError struct {
-	Message string
-	Line    int // 1-based line number
-	Offset  int // 0-based byte offset from start of input
-}
-
-func (p *ParseError) Error() string {
-	if p.Line == 1 {
-		// show offset only for first line
-		return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
-	}
-	return fmt.Sprintf("line %d: %v", p.Line, p.Message)
-}
-
-type token struct {
-	value    string
-	err      *ParseError
-	line     int    // line number
-	offset   int    // byte number from start of input, not start of line
-	unquoted string // the unquoted version of value, if it was a quoted string
-}
-
-func (t *token) String() string {
-	if t.err == nil {
-		return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
-	}
-	return fmt.Sprintf("parse error: %v", t.err)
-}
-
-type textParser struct {
-	s            string // remaining input
-	done         bool   // whether the parsing is finished (success or error)
-	backed       bool   // whether back() was called
-	offset, line int
-	cur          token
-}
-
-func newTextParser(s string) *textParser {
-	p := new(textParser)
-	p.s = s
-	p.line = 1
-	p.cur.line = 1
-	return p
-}
-
-func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
-	pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
-	p.cur.err = pe
-	p.done = true
-	return pe
-}
-
-// Numbers and identifiers are matched by [-+._A-Za-z0-9]
-func isIdentOrNumberChar(c byte) bool {
-	switch {
-	case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
-		return true
-	case '0' <= c && c <= '9':
-		return true
-	}
-	switch c {
-	case '-', '+', '.', '_':
-		return true
-	}
-	return false
-}
-
-func isWhitespace(c byte) bool {
-	switch c {
-	case ' ', '\t', '\n', '\r':
-		return true
-	}
-	return false
-}
-
-func isQuote(c byte) bool {
-	switch c {
-	case '"', '\'':
-		return true
-	}
-	return false
-}
-
-func (p *textParser) skipWhitespace() {
-	i := 0
-	for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
-		if p.s[i] == '#' {
-			// comment; skip to end of line or input
-			for i < len(p.s) && p.s[i] != '\n' {
-				i++
-			}
-			if i == len(p.s) {
-				break
-			}
-		}
-		if p.s[i] == '\n' {
-			p.line++
-		}
-		i++
-	}
-	p.offset += i
-	p.s = p.s[i:len(p.s)]
-	if len(p.s) == 0 {
-		p.done = true
-	}
-}
-
-func (p *textParser) advance() {
-	// Skip whitespace
-	p.skipWhitespace()
-	if p.done {
-		return
-	}
-
-	// Start of non-whitespace
-	p.cur.err = nil
-	p.cur.offset, p.cur.line = p.offset, p.line
-	p.cur.unquoted = ""
-	switch p.s[0] {
-	case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
-		// Single symbol
-		p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
-	case '"', '\'':
-		// Quoted string
-		i := 1
-		for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
-			if p.s[i] == '\\' && i+1 < len(p.s) {
-				// skip escaped char
-				i++
-			}
-			i++
-		}
-		if i >= len(p.s) || p.s[i] != p.s[0] {
-			p.errorf("unmatched quote")
-			return
-		}
-		unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
-		if err != nil {
-			p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
-			return
-		}
-		p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
-		p.cur.unquoted = unq
-	default:
-		i := 0
-		for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
-			i++
-		}
-		if i == 0 {
-			p.errorf("unexpected byte %#x", p.s[0])
-			return
-		}
-		p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
-	}
-	p.offset += len(p.cur.value)
-}
-
-var (
-	errBadUTF8 = errors.New("proto: bad UTF-8")
-)
-
-func unquoteC(s string, quote rune) (string, error) {
-	// This is based on C++'s tokenizer.cc.
-	// Despite its name, this is *not* parsing C syntax.
-	// For instance, "\0" is an invalid quoted string.
-
-	// Avoid allocation in trivial cases.
-	simple := true
-	for _, r := range s {
-		if r == '\\' || r == quote {
-			simple = false
-			break
-		}
-	}
-	if simple {
-		return s, nil
-	}
-
-	buf := make([]byte, 0, 3*len(s)/2)
-	for len(s) > 0 {
-		r, n := utf8.DecodeRuneInString(s)
-		if r == utf8.RuneError && n == 1 {
-			return "", errBadUTF8
-		}
-		s = s[n:]
-		if r != '\\' {
-			if r < utf8.RuneSelf {
-				buf = append(buf, byte(r))
-			} else {
-				buf = append(buf, string(r)...)
-			}
-			continue
-		}
-
-		ch, tail, err := unescape(s)
-		if err != nil {
-			return "", err
-		}
-		buf = append(buf, ch...)
-		s = tail
-	}
-	return string(buf), nil
-}
-
-func unescape(s string) (ch string, tail string, err error) {
-	r, n := utf8.DecodeRuneInString(s)
-	if r == utf8.RuneError && n == 1 {
-		return "", "", errBadUTF8
-	}
-	s = s[n:]
-	switch r {
-	case 'a':
-		return "\a", s, nil
-	case 'b':
-		return "\b", s, nil
-	case 'f':
-		return "\f", s, nil
-	case 'n':
-		return "\n", s, nil
-	case 'r':
-		return "\r", s, nil
-	case 't':
-		return "\t", s, nil
-	case 'v':
-		return "\v", s, nil
-	case '?':
-		return "?", s, nil // trigraph workaround
-	case '\'', '"', '\\':
-		return string(r), s, nil
-	case '0', '1', '2', '3', '4', '5', '6', '7':
-		if len(s) < 2 {
-			return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
-		}
-		ss := string(r) + s[:2]
-		s = s[2:]
-		i, err := strconv.ParseUint(ss, 8, 8)
-		if err != nil {
-			return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
-		}
-		return string([]byte{byte(i)}), s, nil
-	case 'x', 'X', 'u', 'U':
-		var n int
-		switch r {
-		case 'x', 'X':
-			n = 2
-		case 'u':
-			n = 4
-		case 'U':
-			n = 8
-		}
-		if len(s) < n {
-			return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
-		}
-		ss := s[:n]
-		s = s[n:]
-		i, err := strconv.ParseUint(ss, 16, 64)
-		if err != nil {
-			return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
-		}
-		if r == 'x' || r == 'X' {
-			return string([]byte{byte(i)}), s, nil
-		}
-		if i > utf8.MaxRune {
-			return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
-		}
-		return string(i), s, nil
-	}
-	return "", "", fmt.Errorf(`unknown escape \%c`, r)
-}
-
-// Back off the parser by one token. Can only be done between calls to next().
-// It makes the next advance() a no-op.
-func (p *textParser) back() { p.backed = true }
-
-// Advances the parser and returns the new current token.
-func (p *textParser) next() *token {
-	if p.backed || p.done {
-		p.backed = false
-		return &p.cur
-	}
-	p.advance()
-	if p.done {
-		p.cur.value = ""
-	} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
-		// Look for multiple quoted strings separated by whitespace,
-		// and concatenate them.
-		cat := p.cur
-		for {
-			p.skipWhitespace()
-			if p.done || !isQuote(p.s[0]) {
-				break
-			}
-			p.advance()
-			if p.cur.err != nil {
-				return &p.cur
-			}
-			cat.value += " " + p.cur.value
-			cat.unquoted += p.cur.unquoted
-		}
-		p.done = false // parser may have seen EOF, but we want to return cat
-		p.cur = cat
-	}
-	return &p.cur
-}
-
-func (p *textParser) consumeToken(s string) error {
-	tok := p.next()
-	if tok.err != nil {
-		return tok.err
-	}
-	if tok.value != s {
-		p.back()
-		return p.errorf("expected %q, found %q", s, tok.value)
-	}
-	return nil
-}
-
-// Return a RequiredNotSetError indicating which required field was not set.
-func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
-	st := sv.Type()
-	sprops := GetProperties(st)
-	for i := 0; i < st.NumField(); i++ {
-		if !isNil(sv.Field(i)) {
-			continue
-		}
-
-		props := sprops.Prop[i]
-		if props.Required {
-			return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
-		}
-	}
-	return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
-}
-
-// Returns the index in the struct for the named field, as well as the parsed tag properties.
-func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
-	i, ok := sprops.decoderOrigNames[name]
-	if ok {
-		return i, sprops.Prop[i], true
-	}
-	return -1, nil, false
-}
-
-// Consume a ':' from the input stream (if the next token is a colon),
-// returning an error if a colon is needed but not present.
-func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
-	tok := p.next()
-	if tok.err != nil {
-		return tok.err
-	}
-	if tok.value != ":" {
-		// Colon is optional when the field is a group or message.
-		needColon := true
-		switch props.Wire {
-		case "group":
-			needColon = false
-		case "bytes":
-			// A "bytes" field is either a message, a string, or a repeated field;
-			// those three become *T, *string and []T respectively, so we can check for
-			// this field being a pointer to a non-string.
-			if typ.Kind() == reflect.Ptr {
-				// *T or *string
-				if typ.Elem().Kind() == reflect.String {
-					break
-				}
-			} else if typ.Kind() == reflect.Slice {
-				// []T or []*T
-				if typ.Elem().Kind() != reflect.Ptr {
-					break
-				}
-			} else if typ.Kind() == reflect.String {
-				// The proto3 exception is for a string field,
-				// which requires a colon.
-				break
-			}
-			needColon = false
-		}
-		if needColon {
-			return p.errorf("expected ':', found %q", tok.value)
-		}
-		p.back()
-	}
-	return nil
-}
-
-func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
-	st := sv.Type()
-	sprops := GetProperties(st)
-	reqCount := sprops.reqCount
-	var reqFieldErr error
-	fieldSet := make(map[string]bool)
-	// A struct is a sequence of "name: value", terminated by one of
-	// '>' or '}', or the end of the input.  A name may also be
-	// "[extension]" or "[type/url]".
-	//
-	// The whole struct can also be an expanded Any message, like:
-	// [type/url] < ... struct contents ... >
-	for {
-		tok := p.next()
-		if tok.err != nil {
-			return tok.err
-		}
-		if tok.value == terminator {
-			break
-		}
-		if tok.value == "[" {
-			// Looks like an extension or an Any.
-			//
-			// TODO: Check whether we need to handle
-			// namespace rooted names (e.g. ".something.Foo").
-			extName, err := p.consumeExtName()
-			if err != nil {
-				return err
-			}
-
-			if s := strings.LastIndex(extName, "/"); s >= 0 {
-				// If it contains a slash, it's an Any type URL.
-				messageName := extName[s+1:]
-				mt := MessageType(messageName)
-				if mt == nil {
-					return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
-				}
-				tok = p.next()
-				if tok.err != nil {
-					return tok.err
-				}
-				// consume an optional colon
-				if tok.value == ":" {
-					tok = p.next()
-					if tok.err != nil {
-						return tok.err
-					}
-				}
-				var terminator string
-				switch tok.value {
-				case "<":
-					terminator = ">"
-				case "{":
-					terminator = "}"
-				default:
-					return p.errorf("expected '{' or '<', found %q", tok.value)
-				}
-				v := reflect.New(mt.Elem())
-				if pe := p.readStruct(v.Elem(), terminator); pe != nil {
-					return pe
-				}
-				b, err := Marshal(v.Interface().(Message))
-				if err != nil {
-					return p.errorf("failed to marshal message of type %q: %v", messageName, err)
-				}
-				if fieldSet["type_url"] {
-					return p.errorf(anyRepeatedlyUnpacked, "type_url")
-				}
-				if fieldSet["value"] {
-					return p.errorf(anyRepeatedlyUnpacked, "value")
-				}
-				sv.FieldByName("TypeUrl").SetString(extName)
-				sv.FieldByName("Value").SetBytes(b)
-				fieldSet["type_url"] = true
-				fieldSet["value"] = true
-				continue
-			}
-
-			var desc *ExtensionDesc
-			// This could be faster, but it's functional.
-			// TODO: Do something smarter than a linear scan.
-			for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
-				if d.Name == extName {
-					desc = d
-					break
-				}
-			}
-			if desc == nil {
-				return p.errorf("unrecognized extension %q", extName)
-			}
-
-			props := &Properties{}
-			props.Parse(desc.Tag)
-
-			typ := reflect.TypeOf(desc.ExtensionType)
-			if err := p.checkForColon(props, typ); err != nil {
-				return err
-			}
-
-			rep := desc.repeated()
-
-			// Read the extension structure, and set it in
-			// the value we're constructing.
-			var ext reflect.Value
-			if !rep {
-				ext = reflect.New(typ).Elem()
-			} else {
-				ext = reflect.New(typ.Elem()).Elem()
-			}
-			if err := p.readAny(ext, props); err != nil {
-				if _, ok := err.(*RequiredNotSetError); !ok {
-					return err
-				}
-				reqFieldErr = err
-			}
-			ep := sv.Addr().Interface().(Message)
-			if !rep {
-				SetExtension(ep, desc, ext.Interface())
-			} else {
-				old, err := GetExtension(ep, desc)
-				var sl reflect.Value
-				if err == nil {
-					sl = reflect.ValueOf(old) // existing slice
-				} else {
-					sl = reflect.MakeSlice(typ, 0, 1)
-				}
-				sl = reflect.Append(sl, ext)
-				SetExtension(ep, desc, sl.Interface())
-			}
-			if err := p.consumeOptionalSeparator(); err != nil {
-				return err
-			}
-			continue
-		}
-
-		// This is a normal, non-extension field.
-		name := tok.value
-		var dst reflect.Value
-		fi, props, ok := structFieldByName(sprops, name)
-		if ok {
-			dst = sv.Field(fi)
-		} else if oop, ok := sprops.OneofTypes[name]; ok {
-			// It is a oneof.
-			props = oop.Prop
-			nv := reflect.New(oop.Type.Elem())
-			dst = nv.Elem().Field(0)
-			field := sv.Field(oop.Field)
-			if !field.IsNil() {
-				return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
-			}
-			field.Set(nv)
-		}
-		if !dst.IsValid() {
-			return p.errorf("unknown field name %q in %v", name, st)
-		}
-
-		if dst.Kind() == reflect.Map {
-			// Consume any colon.
-			if err := p.checkForColon(props, dst.Type()); err != nil {
-				return err
-			}
-
-			// Construct the map if it doesn't already exist.
-			if dst.IsNil() {
-				dst.Set(reflect.MakeMap(dst.Type()))
-			}
-			key := reflect.New(dst.Type().Key()).Elem()
-			val := reflect.New(dst.Type().Elem()).Elem()
-
-			// The map entry should be this sequence of tokens:
-			//	< key : KEY value : VALUE >
-			// However, implementations may omit key or value, and technically
-			// we should support them in any order.  See b/28924776 for a time
-			// this went wrong.
-
-			tok := p.next()
-			var terminator string
-			switch tok.value {
-			case "<":
-				terminator = ">"
-			case "{":
-				terminator = "}"
-			default:
-				return p.errorf("expected '{' or '<', found %q", tok.value)
-			}
-			for {
-				tok := p.next()
-				if tok.err != nil {
-					return tok.err
-				}
-				if tok.value == terminator {
-					break
-				}
-				switch tok.value {
-				case "key":
-					if err := p.consumeToken(":"); err != nil {
-						return err
-					}
-					if err := p.readAny(key, props.MapKeyProp); err != nil {
-						return err
-					}
-					if err := p.consumeOptionalSeparator(); err != nil {
-						return err
-					}
-				case "value":
-					if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
-						return err
-					}
-					if err := p.readAny(val, props.MapValProp); err != nil {
-						return err
-					}
-					if err := p.consumeOptionalSeparator(); err != nil {
-						return err
-					}
-				default:
-					p.back()
-					return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
-				}
-			}
-
-			dst.SetMapIndex(key, val)
-			continue
-		}
-
-		// Check that it's not already set if it's not a repeated field.
-		if !props.Repeated && fieldSet[name] {
-			return p.errorf("non-repeated field %q was repeated", name)
-		}
-
-		if err := p.checkForColon(props, dst.Type()); err != nil {
-			return err
-		}
-
-		// Parse into the field.
-		fieldSet[name] = true
-		if err := p.readAny(dst, props); err != nil {
-			if _, ok := err.(*RequiredNotSetError); !ok {
-				return err
-			}
-			reqFieldErr = err
-		}
-		if props.Required {
-			reqCount--
-		}
-
-		if err := p.consumeOptionalSeparator(); err != nil {
-			return err
-		}
-
-	}
-
-	if reqCount > 0 {
-		return p.missingRequiredFieldError(sv)
-	}
-	return reqFieldErr
-}
-
-// consumeExtName consumes extension name or expanded Any type URL and the
-// following ']'. It returns the name or URL consumed.
-func (p *textParser) consumeExtName() (string, error) {
-	tok := p.next()
-	if tok.err != nil {
-		return "", tok.err
-	}
-
-	// If extension name or type url is quoted, it's a single token.
-	if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
-		name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
-		if err != nil {
-			return "", err
-		}
-		return name, p.consumeToken("]")
-	}
-
-	// Consume everything up to "]"
-	var parts []string
-	for tok.value != "]" {
-		parts = append(parts, tok.value)
-		tok = p.next()
-		if tok.err != nil {
-			return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
-		}
-		if p.done && tok.value != "]" {
-			return "", p.errorf("unclosed type_url or extension name")
-		}
-	}
-	return strings.Join(parts, ""), nil
-}
-
-// consumeOptionalSeparator consumes an optional semicolon or comma.
-// It is used in readStruct to provide backward compatibility.
-func (p *textParser) consumeOptionalSeparator() error {
-	tok := p.next()
-	if tok.err != nil {
-		return tok.err
-	}
-	if tok.value != ";" && tok.value != "," {
-		p.back()
-	}
-	return nil
-}
-
-func (p *textParser) readAny(v reflect.Value, props *Properties) error {
-	tok := p.next()
-	if tok.err != nil {
-		return tok.err
-	}
-	if tok.value == "" {
-		return p.errorf("unexpected EOF")
-	}
-
-	switch fv := v; fv.Kind() {
-	case reflect.Slice:
-		at := v.Type()
-		if at.Elem().Kind() == reflect.Uint8 {
-			// Special case for []byte
-			if tok.value[0] != '"' && tok.value[0] != '\'' {
-				// Deliberately written out here, as the error after
-				// this switch statement would write "invalid []byte: ...",
-				// which is not as user-friendly.
-				return p.errorf("invalid string: %v", tok.value)
-			}
-			bytes := []byte(tok.unquoted)
-			fv.Set(reflect.ValueOf(bytes))
-			return nil
-		}
-		// Repeated field.
-		if tok.value == "[" {
-			// Repeated field with list notation, like [1,2,3].
-			for {
-				fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
-				err := p.readAny(fv.Index(fv.Len()-1), props)
-				if err != nil {
-					return err
-				}
-				tok := p.next()
-				if tok.err != nil {
-					return tok.err
-				}
-				if tok.value == "]" {
-					break
-				}
-				if tok.value != "," {
-					return p.errorf("Expected ']' or ',' found %q", tok.value)
-				}
-			}
-			return nil
-		}
-		// One value of the repeated field.
-		p.back()
-		fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
-		return p.readAny(fv.Index(fv.Len()-1), props)
-	case reflect.Bool:
-		// true/1/t/True or false/f/0/False.
-		switch tok.value {
-		case "true", "1", "t", "True":
-			fv.SetBool(true)
-			return nil
-		case "false", "0", "f", "False":
-			fv.SetBool(false)
-			return nil
-		}
-	case reflect.Float32, reflect.Float64:
-		v := tok.value
-		// Ignore 'f' for compatibility with output generated by C++, but don't
-		// remove 'f' when the value is "-inf" or "inf".
-		if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
-			v = v[:len(v)-1]
-		}
-		if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
-			fv.SetFloat(f)
-			return nil
-		}
-	case reflect.Int32:
-		if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
-			fv.SetInt(x)
-			return nil
-		}
-
-		if len(props.Enum) == 0 {
-			break
-		}
-		m, ok := enumValueMaps[props.Enum]
-		if !ok {
-			break
-		}
-		x, ok := m[tok.value]
-		if !ok {
-			break
-		}
-		fv.SetInt(int64(x))
-		return nil
-	case reflect.Int64:
-		if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
-			fv.SetInt(x)
-			return nil
-		}
-
-	case reflect.Ptr:
-		// A basic field (indirected through pointer), or a repeated message/group
-		p.back()
-		fv.Set(reflect.New(fv.Type().Elem()))
-		return p.readAny(fv.Elem(), props)
-	case reflect.String:
-		if tok.value[0] == '"' || tok.value[0] == '\'' {
-			fv.SetString(tok.unquoted)
-			return nil
-		}
-	case reflect.Struct:
-		var terminator string
-		switch tok.value {
-		case "{":
-			terminator = "}"
-		case "<":
-			terminator = ">"
-		default:
-			return p.errorf("expected '{' or '<', found %q", tok.value)
-		}
-		// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
-		return p.readStruct(fv, terminator)
-	case reflect.Uint32:
-		if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
-			fv.SetUint(uint64(x))
-			return nil
-		}
-	case reflect.Uint64:
-		if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
-			fv.SetUint(x)
-			return nil
-		}
-	}
-	return p.errorf("invalid %v: %v", v.Type(), tok.value)
-}
-
-// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
-// before starting to unmarshal, so any existing data in pb is always removed.
-// If a required field is not set and no other error occurs,
-// UnmarshalText returns *RequiredNotSetError.
-func UnmarshalText(s string, pb Message) error {
-	if um, ok := pb.(encoding.TextUnmarshaler); ok {
-		return um.UnmarshalText([]byte(s))
-	}
-	pb.Reset()
-	v := reflect.ValueOf(pb)
-	return newTextParser(s).readStruct(v.Elem(), "")
-}
diff --git a/vendor/github.com/golang/protobuf/proto/wire.go b/vendor/github.com/golang/protobuf/proto/wire.go
new file mode 100644
index 0000000..d7c28da
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/wire.go
@@ -0,0 +1,78 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+	protoV2 "google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/runtime/protoiface"
+)
+
+// Size returns the size in bytes of the wire-format encoding of m.
+func Size(m Message) int {
+	if m == nil {
+		return 0
+	}
+	mi := MessageV2(m)
+	return protoV2.Size(mi)
+}
+
+// Marshal returns the wire-format encoding of m.
+func Marshal(m Message) ([]byte, error) {
+	b, err := marshalAppend(nil, m, false)
+	if b == nil {
+		b = zeroBytes
+	}
+	return b, err
+}
+
+var zeroBytes = make([]byte, 0, 0)
+
+func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) {
+	if m == nil {
+		return nil, ErrNil
+	}
+	mi := MessageV2(m)
+	nbuf, err := protoV2.MarshalOptions{
+		Deterministic: deterministic,
+		AllowPartial:  true,
+	}.MarshalAppend(buf, mi)
+	if err != nil {
+		return buf, err
+	}
+	if len(buf) == len(nbuf) {
+		if !mi.ProtoReflect().IsValid() {
+			return buf, ErrNil
+		}
+	}
+	return nbuf, checkRequiredNotSet(mi)
+}
+
+// Unmarshal parses a wire-format message in b and places the decoded results in m.
+//
+// Unmarshal resets m before starting to unmarshal, so any existing data in m is always
+// removed. Use UnmarshalMerge to preserve and append to existing data.
+func Unmarshal(b []byte, m Message) error {
+	m.Reset()
+	return UnmarshalMerge(b, m)
+}
+
+// UnmarshalMerge parses a wire-format message in b and places the decoded results in m.
+func UnmarshalMerge(b []byte, m Message) error {
+	mi := MessageV2(m)
+	out, err := protoV2.UnmarshalOptions{
+		AllowPartial: true,
+		Merge:        true,
+	}.UnmarshalState(protoiface.UnmarshalInput{
+		Buf:     b,
+		Message: mi.ProtoReflect(),
+	})
+	if err != nil {
+		return err
+	}
+	if out.Flags&protoiface.UnmarshalInitialized > 0 {
+		return nil
+	}
+	return checkRequiredNotSet(mi)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/wrappers.go b/vendor/github.com/golang/protobuf/proto/wrappers.go
new file mode 100644
index 0000000..398e348
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/wrappers.go
@@ -0,0 +1,34 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+// Bool stores v in a new bool value and returns a pointer to it.
+func Bool(v bool) *bool { return &v }
+
+// Int stores v in a new int32 value and returns a pointer to it.
+//
+// Deprecated: Use Int32 instead.
+func Int(v int) *int32 { return Int32(int32(v)) }
+
+// Int32 stores v in a new int32 value and returns a pointer to it.
+func Int32(v int32) *int32 { return &v }
+
+// Int64 stores v in a new int64 value and returns a pointer to it.
+func Int64(v int64) *int64 { return &v }
+
+// Uint32 stores v in a new uint32 value and returns a pointer to it.
+func Uint32(v uint32) *uint32 { return &v }
+
+// Uint64 stores v in a new uint64 value and returns a pointer to it.
+func Uint64(v uint64) *uint64 { return &v }
+
+// Float32 stores v in a new float32 value and returns a pointer to it.
+func Float32(v float32) *float32 { return &v }
+
+// Float64 stores v in a new float64 value and returns a pointer to it.
+func Float64(v float64) *float64 { return &v }
+
+// String stores v in a new string value and returns a pointer to it.
+func String(v string) *string { return &v }
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
index 1ded05b..63dc057 100644
--- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
@@ -1,2887 +1,200 @@
 // Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/descriptor.proto
+// source: github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
 
 package descriptor
 
 import (
-	fmt "fmt"
-	proto "github.com/golang/protobuf/proto"
-	math "math"
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+	reflect "reflect"
 )
 
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+// Symbols defined in public import of google/protobuf/descriptor.proto.
 
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+type FieldDescriptorProto_Type = descriptorpb.FieldDescriptorProto_Type
 
-type FieldDescriptorProto_Type int32
+const FieldDescriptorProto_TYPE_DOUBLE = descriptorpb.FieldDescriptorProto_TYPE_DOUBLE
+const FieldDescriptorProto_TYPE_FLOAT = descriptorpb.FieldDescriptorProto_TYPE_FLOAT
+const FieldDescriptorProto_TYPE_INT64 = descriptorpb.FieldDescriptorProto_TYPE_INT64
+const FieldDescriptorProto_TYPE_UINT64 = descriptorpb.FieldDescriptorProto_TYPE_UINT64
+const FieldDescriptorProto_TYPE_INT32 = descriptorpb.FieldDescriptorProto_TYPE_INT32
+const FieldDescriptorProto_TYPE_FIXED64 = descriptorpb.FieldDescriptorProto_TYPE_FIXED64
+const FieldDescriptorProto_TYPE_FIXED32 = descriptorpb.FieldDescriptorProto_TYPE_FIXED32
+const FieldDescriptorProto_TYPE_BOOL = descriptorpb.FieldDescriptorProto_TYPE_BOOL
+const FieldDescriptorProto_TYPE_STRING = descriptorpb.FieldDescriptorProto_TYPE_STRING
+const FieldDescriptorProto_TYPE_GROUP = descriptorpb.FieldDescriptorProto_TYPE_GROUP
+const FieldDescriptorProto_TYPE_MESSAGE = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE
+const FieldDescriptorProto_TYPE_BYTES = descriptorpb.FieldDescriptorProto_TYPE_BYTES
+const FieldDescriptorProto_TYPE_UINT32 = descriptorpb.FieldDescriptorProto_TYPE_UINT32
+const FieldDescriptorProto_TYPE_ENUM = descriptorpb.FieldDescriptorProto_TYPE_ENUM
+const FieldDescriptorProto_TYPE_SFIXED32 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED32
+const FieldDescriptorProto_TYPE_SFIXED64 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED64
+const FieldDescriptorProto_TYPE_SINT32 = descriptorpb.FieldDescriptorProto_TYPE_SINT32
+const FieldDescriptorProto_TYPE_SINT64 = descriptorpb.FieldDescriptorProto_TYPE_SINT64
 
-const (
-	// 0 is reserved for errors.
-	// Order is weird for historical reasons.
-	FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1
-	FieldDescriptorProto_TYPE_FLOAT  FieldDescriptorProto_Type = 2
-	// Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT64 if
-	// negative values are likely.
-	FieldDescriptorProto_TYPE_INT64  FieldDescriptorProto_Type = 3
-	FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4
-	// Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT32 if
-	// negative values are likely.
-	FieldDescriptorProto_TYPE_INT32   FieldDescriptorProto_Type = 5
-	FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6
-	FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7
-	FieldDescriptorProto_TYPE_BOOL    FieldDescriptorProto_Type = 8
-	FieldDescriptorProto_TYPE_STRING  FieldDescriptorProto_Type = 9
-	// Tag-delimited aggregate.
-	// Group type is deprecated and not supported in proto3. However, Proto3
-	// implementations should still be able to parse the group wire format and
-	// treat group fields as unknown fields.
-	FieldDescriptorProto_TYPE_GROUP   FieldDescriptorProto_Type = 10
-	FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11
-	// New in version 2.
-	FieldDescriptorProto_TYPE_BYTES    FieldDescriptorProto_Type = 12
-	FieldDescriptorProto_TYPE_UINT32   FieldDescriptorProto_Type = 13
-	FieldDescriptorProto_TYPE_ENUM     FieldDescriptorProto_Type = 14
-	FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15
-	FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16
-	FieldDescriptorProto_TYPE_SINT32   FieldDescriptorProto_Type = 17
-	FieldDescriptorProto_TYPE_SINT64   FieldDescriptorProto_Type = 18
-)
+var FieldDescriptorProto_Type_name = descriptorpb.FieldDescriptorProto_Type_name
+var FieldDescriptorProto_Type_value = descriptorpb.FieldDescriptorProto_Type_value
 
-var FieldDescriptorProto_Type_name = map[int32]string{
-	1:  "TYPE_DOUBLE",
-	2:  "TYPE_FLOAT",
-	3:  "TYPE_INT64",
-	4:  "TYPE_UINT64",
-	5:  "TYPE_INT32",
-	6:  "TYPE_FIXED64",
-	7:  "TYPE_FIXED32",
-	8:  "TYPE_BOOL",
-	9:  "TYPE_STRING",
-	10: "TYPE_GROUP",
-	11: "TYPE_MESSAGE",
-	12: "TYPE_BYTES",
-	13: "TYPE_UINT32",
-	14: "TYPE_ENUM",
-	15: "TYPE_SFIXED32",
-	16: "TYPE_SFIXED64",
-	17: "TYPE_SINT32",
-	18: "TYPE_SINT64",
-}
+type FieldDescriptorProto_Label = descriptorpb.FieldDescriptorProto_Label
 
-var FieldDescriptorProto_Type_value = map[string]int32{
-	"TYPE_DOUBLE":   1,
-	"TYPE_FLOAT":    2,
-	"TYPE_INT64":    3,
-	"TYPE_UINT64":   4,
-	"TYPE_INT32":    5,
-	"TYPE_FIXED64":  6,
-	"TYPE_FIXED32":  7,
-	"TYPE_BOOL":     8,
-	"TYPE_STRING":   9,
-	"TYPE_GROUP":    10,
-	"TYPE_MESSAGE":  11,
-	"TYPE_BYTES":    12,
-	"TYPE_UINT32":   13,
-	"TYPE_ENUM":     14,
-	"TYPE_SFIXED32": 15,
-	"TYPE_SFIXED64": 16,
-	"TYPE_SINT32":   17,
-	"TYPE_SINT64":   18,
-}
+const FieldDescriptorProto_LABEL_OPTIONAL = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL
+const FieldDescriptorProto_LABEL_REQUIRED = descriptorpb.FieldDescriptorProto_LABEL_REQUIRED
+const FieldDescriptorProto_LABEL_REPEATED = descriptorpb.FieldDescriptorProto_LABEL_REPEATED
 
-func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type {
-	p := new(FieldDescriptorProto_Type)
-	*p = x
-	return p
-}
+var FieldDescriptorProto_Label_name = descriptorpb.FieldDescriptorProto_Label_name
+var FieldDescriptorProto_Label_value = descriptorpb.FieldDescriptorProto_Label_value
 
-func (x FieldDescriptorProto_Type) String() string {
-	return proto.EnumName(FieldDescriptorProto_Type_name, int32(x))
-}
+type FileOptions_OptimizeMode = descriptorpb.FileOptions_OptimizeMode
 
-func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type")
-	if err != nil {
-		return err
-	}
-	*x = FieldDescriptorProto_Type(value)
-	return nil
-}
+const FileOptions_SPEED = descriptorpb.FileOptions_SPEED
+const FileOptions_CODE_SIZE = descriptorpb.FileOptions_CODE_SIZE
+const FileOptions_LITE_RUNTIME = descriptorpb.FileOptions_LITE_RUNTIME
 
-func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{4, 0}
-}
+var FileOptions_OptimizeMode_name = descriptorpb.FileOptions_OptimizeMode_name
+var FileOptions_OptimizeMode_value = descriptorpb.FileOptions_OptimizeMode_value
 
-type FieldDescriptorProto_Label int32
+type FieldOptions_CType = descriptorpb.FieldOptions_CType
 
-const (
-	// 0 is reserved for errors
-	FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1
-	FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2
-	FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3
-)
+const FieldOptions_STRING = descriptorpb.FieldOptions_STRING
+const FieldOptions_CORD = descriptorpb.FieldOptions_CORD
+const FieldOptions_STRING_PIECE = descriptorpb.FieldOptions_STRING_PIECE
 
-var FieldDescriptorProto_Label_name = map[int32]string{
-	1: "LABEL_OPTIONAL",
-	2: "LABEL_REQUIRED",
-	3: "LABEL_REPEATED",
-}
+var FieldOptions_CType_name = descriptorpb.FieldOptions_CType_name
+var FieldOptions_CType_value = descriptorpb.FieldOptions_CType_value
 
-var FieldDescriptorProto_Label_value = map[string]int32{
-	"LABEL_OPTIONAL": 1,
-	"LABEL_REQUIRED": 2,
-	"LABEL_REPEATED": 3,
-}
+type FieldOptions_JSType = descriptorpb.FieldOptions_JSType
 
-func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label {
-	p := new(FieldDescriptorProto_Label)
-	*p = x
-	return p
-}
+const FieldOptions_JS_NORMAL = descriptorpb.FieldOptions_JS_NORMAL
+const FieldOptions_JS_STRING = descriptorpb.FieldOptions_JS_STRING
+const FieldOptions_JS_NUMBER = descriptorpb.FieldOptions_JS_NUMBER
 
-func (x FieldDescriptorProto_Label) String() string {
-	return proto.EnumName(FieldDescriptorProto_Label_name, int32(x))
-}
+var FieldOptions_JSType_name = descriptorpb.FieldOptions_JSType_name
+var FieldOptions_JSType_value = descriptorpb.FieldOptions_JSType_value
 
-func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label")
-	if err != nil {
-		return err
-	}
-	*x = FieldDescriptorProto_Label(value)
-	return nil
-}
+type MethodOptions_IdempotencyLevel = descriptorpb.MethodOptions_IdempotencyLevel
 
-func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{4, 1}
-}
-
-// Generated classes can be optimized for speed or code size.
-type FileOptions_OptimizeMode int32
+const MethodOptions_IDEMPOTENCY_UNKNOWN = descriptorpb.MethodOptions_IDEMPOTENCY_UNKNOWN
+const MethodOptions_NO_SIDE_EFFECTS = descriptorpb.MethodOptions_NO_SIDE_EFFECTS
+const MethodOptions_IDEMPOTENT = descriptorpb.MethodOptions_IDEMPOTENT
 
-const (
-	FileOptions_SPEED FileOptions_OptimizeMode = 1
-	// etc.
-	FileOptions_CODE_SIZE    FileOptions_OptimizeMode = 2
-	FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3
-)
+var MethodOptions_IdempotencyLevel_name = descriptorpb.MethodOptions_IdempotencyLevel_name
+var MethodOptions_IdempotencyLevel_value = descriptorpb.MethodOptions_IdempotencyLevel_value
 
-var FileOptions_OptimizeMode_name = map[int32]string{
-	1: "SPEED",
-	2: "CODE_SIZE",
-	3: "LITE_RUNTIME",
-}
+type FileDescriptorSet = descriptorpb.FileDescriptorSet
+type FileDescriptorProto = descriptorpb.FileDescriptorProto
+type DescriptorProto = descriptorpb.DescriptorProto
+type ExtensionRangeOptions = descriptorpb.ExtensionRangeOptions
+type FieldDescriptorProto = descriptorpb.FieldDescriptorProto
+type OneofDescriptorProto = descriptorpb.OneofDescriptorProto
+type EnumDescriptorProto = descriptorpb.EnumDescriptorProto
+type EnumValueDescriptorProto = descriptorpb.EnumValueDescriptorProto
+type ServiceDescriptorProto = descriptorpb.ServiceDescriptorProto
+type MethodDescriptorProto = descriptorpb.MethodDescriptorProto
 
-var FileOptions_OptimizeMode_value = map[string]int32{
-	"SPEED":        1,
-	"CODE_SIZE":    2,
-	"LITE_RUNTIME": 3,
-}
+const Default_MethodDescriptorProto_ClientStreaming = descriptorpb.Default_MethodDescriptorProto_ClientStreaming
+const Default_MethodDescriptorProto_ServerStreaming = descriptorpb.Default_MethodDescriptorProto_ServerStreaming
 
-func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode {
-	p := new(FileOptions_OptimizeMode)
-	*p = x
-	return p
-}
+type FileOptions = descriptorpb.FileOptions
 
-func (x FileOptions_OptimizeMode) String() string {
-	return proto.EnumName(FileOptions_OptimizeMode_name, int32(x))
-}
+const Default_FileOptions_JavaMultipleFiles = descriptorpb.Default_FileOptions_JavaMultipleFiles
+const Default_FileOptions_JavaStringCheckUtf8 = descriptorpb.Default_FileOptions_JavaStringCheckUtf8
+const Default_FileOptions_OptimizeFor = descriptorpb.Default_FileOptions_OptimizeFor
+const Default_FileOptions_CcGenericServices = descriptorpb.Default_FileOptions_CcGenericServices
+const Default_FileOptions_JavaGenericServices = descriptorpb.Default_FileOptions_JavaGenericServices
+const Default_FileOptions_PyGenericServices = descriptorpb.Default_FileOptions_PyGenericServices
+const Default_FileOptions_PhpGenericServices = descriptorpb.Default_FileOptions_PhpGenericServices
+const Default_FileOptions_Deprecated = descriptorpb.Default_FileOptions_Deprecated
+const Default_FileOptions_CcEnableArenas = descriptorpb.Default_FileOptions_CcEnableArenas
 
-func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode")
-	if err != nil {
-		return err
-	}
-	*x = FileOptions_OptimizeMode(value)
-	return nil
-}
+type MessageOptions = descriptorpb.MessageOptions
 
-func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{10, 0}
-}
+const Default_MessageOptions_MessageSetWireFormat = descriptorpb.Default_MessageOptions_MessageSetWireFormat
+const Default_MessageOptions_NoStandardDescriptorAccessor = descriptorpb.Default_MessageOptions_NoStandardDescriptorAccessor
+const Default_MessageOptions_Deprecated = descriptorpb.Default_MessageOptions_Deprecated
 
-type FieldOptions_CType int32
+type FieldOptions = descriptorpb.FieldOptions
 
-const (
-	// Default mode.
-	FieldOptions_STRING       FieldOptions_CType = 0
-	FieldOptions_CORD         FieldOptions_CType = 1
-	FieldOptions_STRING_PIECE FieldOptions_CType = 2
-)
+const Default_FieldOptions_Ctype = descriptorpb.Default_FieldOptions_Ctype
+const Default_FieldOptions_Jstype = descriptorpb.Default_FieldOptions_Jstype
+const Default_FieldOptions_Lazy = descriptorpb.Default_FieldOptions_Lazy
+const Default_FieldOptions_Deprecated = descriptorpb.Default_FieldOptions_Deprecated
+const Default_FieldOptions_Weak = descriptorpb.Default_FieldOptions_Weak
 
-var FieldOptions_CType_name = map[int32]string{
-	0: "STRING",
-	1: "CORD",
-	2: "STRING_PIECE",
-}
+type OneofOptions = descriptorpb.OneofOptions
+type EnumOptions = descriptorpb.EnumOptions
 
-var FieldOptions_CType_value = map[string]int32{
-	"STRING":       0,
-	"CORD":         1,
-	"STRING_PIECE": 2,
-}
+const Default_EnumOptions_Deprecated = descriptorpb.Default_EnumOptions_Deprecated
 
-func (x FieldOptions_CType) Enum() *FieldOptions_CType {
-	p := new(FieldOptions_CType)
-	*p = x
-	return p
-}
+type EnumValueOptions = descriptorpb.EnumValueOptions
 
-func (x FieldOptions_CType) String() string {
-	return proto.EnumName(FieldOptions_CType_name, int32(x))
-}
+const Default_EnumValueOptions_Deprecated = descriptorpb.Default_EnumValueOptions_Deprecated
 
-func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType")
-	if err != nil {
-		return err
-	}
-	*x = FieldOptions_CType(value)
-	return nil
-}
+type ServiceOptions = descriptorpb.ServiceOptions
 
-func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{12, 0}
-}
+const Default_ServiceOptions_Deprecated = descriptorpb.Default_ServiceOptions_Deprecated
 
-type FieldOptions_JSType int32
+type MethodOptions = descriptorpb.MethodOptions
 
-const (
-	// Use the default type.
-	FieldOptions_JS_NORMAL FieldOptions_JSType = 0
-	// Use JavaScript strings.
-	FieldOptions_JS_STRING FieldOptions_JSType = 1
-	// Use JavaScript numbers.
-	FieldOptions_JS_NUMBER FieldOptions_JSType = 2
-)
+const Default_MethodOptions_Deprecated = descriptorpb.Default_MethodOptions_Deprecated
+const Default_MethodOptions_IdempotencyLevel = descriptorpb.Default_MethodOptions_IdempotencyLevel
 
-var FieldOptions_JSType_name = map[int32]string{
-	0: "JS_NORMAL",
-	1: "JS_STRING",
-	2: "JS_NUMBER",
-}
+type UninterpretedOption = descriptorpb.UninterpretedOption
+type SourceCodeInfo = descriptorpb.SourceCodeInfo
+type GeneratedCodeInfo = descriptorpb.GeneratedCodeInfo
+type DescriptorProto_ExtensionRange = descriptorpb.DescriptorProto_ExtensionRange
+type DescriptorProto_ReservedRange = descriptorpb.DescriptorProto_ReservedRange
+type EnumDescriptorProto_EnumReservedRange = descriptorpb.EnumDescriptorProto_EnumReservedRange
+type UninterpretedOption_NamePart = descriptorpb.UninterpretedOption_NamePart
+type SourceCodeInfo_Location = descriptorpb.SourceCodeInfo_Location
+type GeneratedCodeInfo_Annotation = descriptorpb.GeneratedCodeInfo_Annotation
 
-var FieldOptions_JSType_value = map[string]int32{
-	"JS_NORMAL": 0,
-	"JS_STRING": 1,
-	"JS_NUMBER": 2,
-}
+var File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto protoreflect.FileDescriptor
 
-func (x FieldOptions_JSType) Enum() *FieldOptions_JSType {
-	p := new(FieldOptions_JSType)
-	*p = x
-	return p
+var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = []byte{
+	0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72,
+	0x69, 0x70, 0x74, 0x6f, 0x72, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+	0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68,
+	0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65,
+	0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3b,
+	0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x32,
 }
 
-func (x FieldOptions_JSType) String() string {
-	return proto.EnumName(FieldOptions_JSType_name, int32(x))
+var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = []int32{
+	0, // [0:0] is the sub-list for method output_type
+	0, // [0:0] is the sub-list for method input_type
+	0, // [0:0] is the sub-list for extension type_name
+	0, // [0:0] is the sub-list for extension extendee
+	0, // [0:0] is the sub-list for field type_name
 }
 
-func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType")
-	if err != nil {
-		return err
+func init() { file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() }
+func file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() {
+	if File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto != nil {
+		return
 	}
-	*x = FieldOptions_JSType(value)
-	return nil
-}
-
-func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{12, 1}
-}
-
-// Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
-// or neither? HTTP based RPC implementation may choose GET verb for safe
-// methods, and PUT verb for idempotent methods instead of the default POST.
-type MethodOptions_IdempotencyLevel int32
-
-const (
-	MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0
-	MethodOptions_NO_SIDE_EFFECTS     MethodOptions_IdempotencyLevel = 1
-	MethodOptions_IDEMPOTENT          MethodOptions_IdempotencyLevel = 2
-)
-
-var MethodOptions_IdempotencyLevel_name = map[int32]string{
-	0: "IDEMPOTENCY_UNKNOWN",
-	1: "NO_SIDE_EFFECTS",
-	2: "IDEMPOTENT",
-}
-
-var MethodOptions_IdempotencyLevel_value = map[string]int32{
-	"IDEMPOTENCY_UNKNOWN": 0,
-	"NO_SIDE_EFFECTS":     1,
-	"IDEMPOTENT":          2,
-}
-
-func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel {
-	p := new(MethodOptions_IdempotencyLevel)
-	*p = x
-	return p
-}
-
-func (x MethodOptions_IdempotencyLevel) String() string {
-	return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x))
-}
-
-func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel")
-	if err != nil {
-		return err
-	}
-	*x = MethodOptions_IdempotencyLevel(value)
-	return nil
-}
-
-func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{17, 0}
-}
-
-// The protocol compiler can output a FileDescriptorSet containing the .proto
-// files it parses.
-type FileDescriptorSet struct {
-	File                 []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
-	XXX_unrecognized     []byte                 `json:"-"`
-	XXX_sizecache        int32                  `json:"-"`
-}
-
-func (m *FileDescriptorSet) Reset()         { *m = FileDescriptorSet{} }
-func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) }
-func (*FileDescriptorSet) ProtoMessage()    {}
-func (*FileDescriptorSet) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{0}
-}
-
-func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b)
-}
-func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic)
-}
-func (m *FileDescriptorSet) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_FileDescriptorSet.Merge(m, src)
-}
-func (m *FileDescriptorSet) XXX_Size() int {
-	return xxx_messageInfo_FileDescriptorSet.Size(m)
-}
-func (m *FileDescriptorSet) XXX_DiscardUnknown() {
-	xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo
-
-func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto {
-	if m != nil {
-		return m.File
-	}
-	return nil
-}
-
-// Describes a complete .proto file.
-type FileDescriptorProto struct {
-	Name    *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"`
-	// Names of files imported by this file.
-	Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"`
-	// Indexes of the public imported files in the dependency list above.
-	PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"`
-	// Indexes of the weak imported files in the dependency list.
-	// For Google-internal migration only. Do not use.
-	WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"`
-	// All top-level definitions in this file.
-	MessageType []*DescriptorProto        `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"`
-	EnumType    []*EnumDescriptorProto    `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
-	Service     []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"`
-	Extension   []*FieldDescriptorProto   `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"`
-	Options     *FileOptions              `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
-	// This field contains optional information about the original source code.
-	// You may safely remove this entire field without harming runtime
-	// functionality of the descriptors -- the information is needed only by
-	// development tools.
-	SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"`
-	// The syntax of the proto file.
-	// The supported values are "proto2" and "proto3".
-	Syntax               *string  `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *FileDescriptorProto) Reset()         { *m = FileDescriptorProto{} }
-func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*FileDescriptorProto) ProtoMessage()    {}
-func (*FileDescriptorProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{1}
-}
-
-func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b)
-}
-func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic)
-}
-func (m *FileDescriptorProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_FileDescriptorProto.Merge(m, src)
-}
-func (m *FileDescriptorProto) XXX_Size() int {
-	return xxx_messageInfo_FileDescriptorProto.Size(m)
-}
-func (m *FileDescriptorProto) XXX_DiscardUnknown() {
-	xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo
-
-func (m *FileDescriptorProto) GetName() string {
-	if m != nil && m.Name != nil {
-		return *m.Name
-	}
-	return ""
-}
-
-func (m *FileDescriptorProto) GetPackage() string {
-	if m != nil && m.Package != nil {
-		return *m.Package
-	}
-	return ""
-}
-
-func (m *FileDescriptorProto) GetDependency() []string {
-	if m != nil {
-		return m.Dependency
-	}
-	return nil
-}
-
-func (m *FileDescriptorProto) GetPublicDependency() []int32 {
-	if m != nil {
-		return m.PublicDependency
-	}
-	return nil
-}
-
-func (m *FileDescriptorProto) GetWeakDependency() []int32 {
-	if m != nil {
-		return m.WeakDependency
-	}
-	return nil
-}
-
-func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto {
-	if m != nil {
-		return m.MessageType
-	}
-	return nil
-}
-
-func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto {
-	if m != nil {
-		return m.EnumType
-	}
-	return nil
-}
-
-func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto {
-	if m != nil {
-		return m.Service
-	}
-	return nil
-}
-
-func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto {
-	if m != nil {
-		return m.Extension
-	}
-	return nil
-}
-
-func (m *FileDescriptorProto) GetOptions() *FileOptions {
-	if m != nil {
-		return m.Options
-	}
-	return nil
-}
-
-func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo {
-	if m != nil {
-		return m.SourceCodeInfo
-	}
-	return nil
-}
-
-func (m *FileDescriptorProto) GetSyntax() string {
-	if m != nil && m.Syntax != nil {
-		return *m.Syntax
-	}
-	return ""
-}
-
-// Describes a message type.
-type DescriptorProto struct {
-	Name           *string                           `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Field          []*FieldDescriptorProto           `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
-	Extension      []*FieldDescriptorProto           `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"`
-	NestedType     []*DescriptorProto                `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"`
-	EnumType       []*EnumDescriptorProto            `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
-	ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"`
-	OneofDecl      []*OneofDescriptorProto           `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"`
-	Options        *MessageOptions                   `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"`
-	ReservedRange  []*DescriptorProto_ReservedRange  `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
-	// Reserved field names, which may not be used by fields in the same message.
-	// A given name may only be reserved once.
-	ReservedName         []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *DescriptorProto) Reset()         { *m = DescriptorProto{} }
-func (m *DescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*DescriptorProto) ProtoMessage()    {}
-func (*DescriptorProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{2}
-}
-
-func (m *DescriptorProto) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_DescriptorProto.Unmarshal(m, b)
-}
-func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic)
-}
-func (m *DescriptorProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_DescriptorProto.Merge(m, src)
-}
-func (m *DescriptorProto) XXX_Size() int {
-	return xxx_messageInfo_DescriptorProto.Size(m)
-}
-func (m *DescriptorProto) XXX_DiscardUnknown() {
-	xxx_messageInfo_DescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo
-
-func (m *DescriptorProto) GetName() string {
-	if m != nil && m.Name != nil {
-		return *m.Name
-	}
-	return ""
-}
-
-func (m *DescriptorProto) GetField() []*FieldDescriptorProto {
-	if m != nil {
-		return m.Field
-	}
-	return nil
-}
-
-func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto {
-	if m != nil {
-		return m.Extension
-	}
-	return nil
-}
-
-func (m *DescriptorProto) GetNestedType() []*DescriptorProto {
-	if m != nil {
-		return m.NestedType
-	}
-	return nil
-}
-
-func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto {
-	if m != nil {
-		return m.EnumType
-	}
-	return nil
-}
-
-func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange {
-	if m != nil {
-		return m.ExtensionRange
-	}
-	return nil
-}
-
-func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto {
-	if m != nil {
-		return m.OneofDecl
-	}
-	return nil
-}
-
-func (m *DescriptorProto) GetOptions() *MessageOptions {
-	if m != nil {
-		return m.Options
-	}
-	return nil
-}
-
-func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange {
-	if m != nil {
-		return m.ReservedRange
-	}
-	return nil
-}
-
-func (m *DescriptorProto) GetReservedName() []string {
-	if m != nil {
-		return m.ReservedName
-	}
-	return nil
-}
-
-type DescriptorProto_ExtensionRange struct {
-	Start                *int32                 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
-	End                  *int32                 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
-	Options              *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
-	XXX_unrecognized     []byte                 `json:"-"`
-	XXX_sizecache        int32                  `json:"-"`
-}
-
-func (m *DescriptorProto_ExtensionRange) Reset()         { *m = DescriptorProto_ExtensionRange{} }
-func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) }
-func (*DescriptorProto_ExtensionRange) ProtoMessage()    {}
-func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{2, 0}
-}
-
-func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b)
-}
-func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic)
-}
-func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src)
-}
-func (m *DescriptorProto_ExtensionRange) XXX_Size() int {
-	return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m)
-}
-func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() {
-	xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo
-
-func (m *DescriptorProto_ExtensionRange) GetStart() int32 {
-	if m != nil && m.Start != nil {
-		return *m.Start
-	}
-	return 0
-}
-
-func (m *DescriptorProto_ExtensionRange) GetEnd() int32 {
-	if m != nil && m.End != nil {
-		return *m.End
-	}
-	return 0
-}
-
-func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions {
-	if m != nil {
-		return m.Options
-	}
-	return nil
-}
-
-// Range of reserved tag numbers. Reserved tag numbers may not be used by
-// fields or extension ranges in the same message. Reserved ranges may
-// not overlap.
-type DescriptorProto_ReservedRange struct {
-	Start                *int32   `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
-	End                  *int32   `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *DescriptorProto_ReservedRange) Reset()         { *m = DescriptorProto_ReservedRange{} }
-func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) }
-func (*DescriptorProto_ReservedRange) ProtoMessage()    {}
-func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{2, 1}
-}
-
-func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b)
-}
-func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic)
-}
-func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src)
-}
-func (m *DescriptorProto_ReservedRange) XXX_Size() int {
-	return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m)
-}
-func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() {
-	xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo
-
-func (m *DescriptorProto_ReservedRange) GetStart() int32 {
-	if m != nil && m.Start != nil {
-		return *m.Start
-	}
-	return 0
-}
-
-func (m *DescriptorProto_ReservedRange) GetEnd() int32 {
-	if m != nil && m.End != nil {
-		return *m.End
-	}
-	return 0
-}
-
-type ExtensionRangeOptions struct {
-	// The parser stores options it doesn't recognize here. See above.
-	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
-	proto.XXX_InternalExtensions `json:"-"`
-	XXX_unrecognized             []byte `json:"-"`
-	XXX_sizecache                int32  `json:"-"`
-}
-
-func (m *ExtensionRangeOptions) Reset()         { *m = ExtensionRangeOptions{} }
-func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) }
-func (*ExtensionRangeOptions) ProtoMessage()    {}
-func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{3}
-}
-
-var extRange_ExtensionRangeOptions = []proto.ExtensionRange{
-	{Start: 1000, End: 536870911},
-}
-
-func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange {
-	return extRange_ExtensionRangeOptions
-}
-
-func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b)
-}
-func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic)
-}
-func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ExtensionRangeOptions.Merge(m, src)
-}
-func (m *ExtensionRangeOptions) XXX_Size() int {
-	return xxx_messageInfo_ExtensionRangeOptions.Size(m)
-}
-func (m *ExtensionRangeOptions) XXX_DiscardUnknown() {
-	xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo
-
-func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption {
-	if m != nil {
-		return m.UninterpretedOption
-	}
-	return nil
-}
-
-// Describes a field within a message.
-type FieldDescriptorProto struct {
-	Name   *string                     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Number *int32                      `protobuf:"varint,3,opt,name=number" json:"number,omitempty"`
-	Label  *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"`
-	// If type_name is set, this need not be set.  If both this and type_name
-	// are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
-	Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"`
-	// For message and enum types, this is the name of the type.  If the name
-	// starts with a '.', it is fully-qualified.  Otherwise, C++-like scoping
-	// rules are used to find the type (i.e. first the nested types within this
-	// message are searched, then within the parent, on up to the root
-	// namespace).
-	TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"`
-	// For extensions, this is the name of the type being extended.  It is
-	// resolved in the same manner as type_name.
-	Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"`
-	// For numeric types, contains the original text representation of the value.
-	// For booleans, "true" or "false".
-	// For strings, contains the default text contents (not escaped in any way).
-	// For bytes, contains the C escaped value.  All bytes >= 128 are escaped.
-	// TODO(kenton):  Base-64 encode?
-	DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"`
-	// If set, gives the index of a oneof in the containing type's oneof_decl
-	// list.  This field is a member of that oneof.
-	OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"`
-	// JSON name of this field. The value is set by protocol compiler. If the
-	// user has set a "json_name" option on this field, that option's value
-	// will be used. Otherwise, it's deduced from the field's name by converting
-	// it to camelCase.
-	JsonName             *string       `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"`
-	Options              *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
-	XXX_unrecognized     []byte        `json:"-"`
-	XXX_sizecache        int32         `json:"-"`
-}
-
-func (m *FieldDescriptorProto) Reset()         { *m = FieldDescriptorProto{} }
-func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*FieldDescriptorProto) ProtoMessage()    {}
-func (*FieldDescriptorProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{4}
-}
-
-func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b)
-}
-func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic)
-}
-func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_FieldDescriptorProto.Merge(m, src)
-}
-func (m *FieldDescriptorProto) XXX_Size() int {
-	return xxx_messageInfo_FieldDescriptorProto.Size(m)
-}
-func (m *FieldDescriptorProto) XXX_DiscardUnknown() {
-	xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo
-
-func (m *FieldDescriptorProto) GetName() string {
-	if m != nil && m.Name != nil {
-		return *m.Name
-	}
-	return ""
-}
-
-func (m *FieldDescriptorProto) GetNumber() int32 {
-	if m != nil && m.Number != nil {
-		return *m.Number
-	}
-	return 0
-}
-
-func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label {
-	if m != nil && m.Label != nil {
-		return *m.Label
-	}
-	return FieldDescriptorProto_LABEL_OPTIONAL
-}
-
-func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type {
-	if m != nil && m.Type != nil {
-		return *m.Type
-	}
-	return FieldDescriptorProto_TYPE_DOUBLE
-}
-
-func (m *FieldDescriptorProto) GetTypeName() string {
-	if m != nil && m.TypeName != nil {
-		return *m.TypeName
-	}
-	return ""
-}
-
-func (m *FieldDescriptorProto) GetExtendee() string {
-	if m != nil && m.Extendee != nil {
-		return *m.Extendee
-	}
-	return ""
-}
-
-func (m *FieldDescriptorProto) GetDefaultValue() string {
-	if m != nil && m.DefaultValue != nil {
-		return *m.DefaultValue
-	}
-	return ""
-}
-
-func (m *FieldDescriptorProto) GetOneofIndex() int32 {
-	if m != nil && m.OneofIndex != nil {
-		return *m.OneofIndex
-	}
-	return 0
-}
-
-func (m *FieldDescriptorProto) GetJsonName() string {
-	if m != nil && m.JsonName != nil {
-		return *m.JsonName
-	}
-	return ""
-}
-
-func (m *FieldDescriptorProto) GetOptions() *FieldOptions {
-	if m != nil {
-		return m.Options
-	}
-	return nil
-}
-
-// Describes a oneof.
-type OneofDescriptorProto struct {
-	Name                 *string       `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Options              *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
-	XXX_unrecognized     []byte        `json:"-"`
-	XXX_sizecache        int32         `json:"-"`
-}
-
-func (m *OneofDescriptorProto) Reset()         { *m = OneofDescriptorProto{} }
-func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*OneofDescriptorProto) ProtoMessage()    {}
-func (*OneofDescriptorProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{5}
-}
-
-func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b)
-}
-func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic)
-}
-func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_OneofDescriptorProto.Merge(m, src)
-}
-func (m *OneofDescriptorProto) XXX_Size() int {
-	return xxx_messageInfo_OneofDescriptorProto.Size(m)
-}
-func (m *OneofDescriptorProto) XXX_DiscardUnknown() {
-	xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo
-
-func (m *OneofDescriptorProto) GetName() string {
-	if m != nil && m.Name != nil {
-		return *m.Name
-	}
-	return ""
-}
-
-func (m *OneofDescriptorProto) GetOptions() *OneofOptions {
-	if m != nil {
-		return m.Options
-	}
-	return nil
-}
-
-// Describes an enum type.
-type EnumDescriptorProto struct {
-	Name    *string                     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Value   []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
-	Options *EnumOptions                `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
-	// Range of reserved numeric values. Reserved numeric values may not be used
-	// by enum values in the same enum declaration. Reserved ranges may not
-	// overlap.
-	ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
-	// Reserved enum value names, which may not be reused. A given name may only
-	// be reserved once.
-	ReservedName         []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *EnumDescriptorProto) Reset()         { *m = EnumDescriptorProto{} }
-func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*EnumDescriptorProto) ProtoMessage()    {}
-func (*EnumDescriptorProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{6}
-}
-
-func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b)
-}
-func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic)
-}
-func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_EnumDescriptorProto.Merge(m, src)
-}
-func (m *EnumDescriptorProto) XXX_Size() int {
-	return xxx_messageInfo_EnumDescriptorProto.Size(m)
-}
-func (m *EnumDescriptorProto) XXX_DiscardUnknown() {
-	xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo
-
-func (m *EnumDescriptorProto) GetName() string {
-	if m != nil && m.Name != nil {
-		return *m.Name
-	}
-	return ""
-}
-
-func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto {
-	if m != nil {
-		return m.Value
-	}
-	return nil
-}
-
-func (m *EnumDescriptorProto) GetOptions() *EnumOptions {
-	if m != nil {
-		return m.Options
-	}
-	return nil
-}
-
-func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange {
-	if m != nil {
-		return m.ReservedRange
-	}
-	return nil
-}
-
-func (m *EnumDescriptorProto) GetReservedName() []string {
-	if m != nil {
-		return m.ReservedName
-	}
-	return nil
-}
-
-// Range of reserved numeric values. Reserved values may not be used by
-// entries in the same enum. Reserved ranges may not overlap.
-//
-// Note that this is distinct from DescriptorProto.ReservedRange in that it
-// is inclusive such that it can appropriately represent the entire int32
-// domain.
-type EnumDescriptorProto_EnumReservedRange struct {
-	Start                *int32   `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
-	End                  *int32   `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *EnumDescriptorProto_EnumReservedRange) Reset()         { *m = EnumDescriptorProto_EnumReservedRange{} }
-func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) }
-func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage()    {}
-func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{6, 0}
-}
-
-func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b)
-}
-func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic)
-}
-func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src)
-}
-func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int {
-	return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m)
-}
-func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() {
-	xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo
-
-func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 {
-	if m != nil && m.Start != nil {
-		return *m.Start
-	}
-	return 0
-}
-
-func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 {
-	if m != nil && m.End != nil {
-		return *m.End
-	}
-	return 0
-}
-
-// Describes a value within an enum.
-type EnumValueDescriptorProto struct {
-	Name                 *string           `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Number               *int32            `protobuf:"varint,2,opt,name=number" json:"number,omitempty"`
-	Options              *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
-	XXX_unrecognized     []byte            `json:"-"`
-	XXX_sizecache        int32             `json:"-"`
-}
-
-func (m *EnumValueDescriptorProto) Reset()         { *m = EnumValueDescriptorProto{} }
-func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*EnumValueDescriptorProto) ProtoMessage()    {}
-func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{7}
-}
-
-func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b)
-}
-func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic)
-}
-func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src)
-}
-func (m *EnumValueDescriptorProto) XXX_Size() int {
-	return xxx_messageInfo_EnumValueDescriptorProto.Size(m)
-}
-func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() {
-	xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo
-
-func (m *EnumValueDescriptorProto) GetName() string {
-	if m != nil && m.Name != nil {
-		return *m.Name
-	}
-	return ""
-}
-
-func (m *EnumValueDescriptorProto) GetNumber() int32 {
-	if m != nil && m.Number != nil {
-		return *m.Number
-	}
-	return 0
-}
-
-func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions {
-	if m != nil {
-		return m.Options
-	}
-	return nil
-}
-
-// Describes a service.
-type ServiceDescriptorProto struct {
-	Name                 *string                  `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Method               []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"`
-	Options              *ServiceOptions          `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}                 `json:"-"`
-	XXX_unrecognized     []byte                   `json:"-"`
-	XXX_sizecache        int32                    `json:"-"`
-}
-
-func (m *ServiceDescriptorProto) Reset()         { *m = ServiceDescriptorProto{} }
-func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*ServiceDescriptorProto) ProtoMessage()    {}
-func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{8}
-}
-
-func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b)
-}
-func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic)
-}
-func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ServiceDescriptorProto.Merge(m, src)
-}
-func (m *ServiceDescriptorProto) XXX_Size() int {
-	return xxx_messageInfo_ServiceDescriptorProto.Size(m)
-}
-func (m *ServiceDescriptorProto) XXX_DiscardUnknown() {
-	xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo
-
-func (m *ServiceDescriptorProto) GetName() string {
-	if m != nil && m.Name != nil {
-		return *m.Name
-	}
-	return ""
-}
-
-func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto {
-	if m != nil {
-		return m.Method
-	}
-	return nil
-}
-
-func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions {
-	if m != nil {
-		return m.Options
-	}
-	return nil
-}
-
-// Describes a method of a service.
-type MethodDescriptorProto struct {
-	Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	// Input and output type names.  These are resolved in the same way as
-	// FieldDescriptorProto.type_name, but must refer to a message type.
-	InputType  *string        `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"`
-	OutputType *string        `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"`
-	Options    *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"`
-	// Identifies if client streams multiple client messages
-	ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"`
-	// Identifies if server streams multiple server messages
-	ServerStreaming      *bool    `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *MethodDescriptorProto) Reset()         { *m = MethodDescriptorProto{} }
-func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*MethodDescriptorProto) ProtoMessage()    {}
-func (*MethodDescriptorProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{9}
-}
-
-func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b)
-}
-func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic)
-}
-func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MethodDescriptorProto.Merge(m, src)
-}
-func (m *MethodDescriptorProto) XXX_Size() int {
-	return xxx_messageInfo_MethodDescriptorProto.Size(m)
-}
-func (m *MethodDescriptorProto) XXX_DiscardUnknown() {
-	xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo
-
-const Default_MethodDescriptorProto_ClientStreaming bool = false
-const Default_MethodDescriptorProto_ServerStreaming bool = false
-
-func (m *MethodDescriptorProto) GetName() string {
-	if m != nil && m.Name != nil {
-		return *m.Name
-	}
-	return ""
-}
-
-func (m *MethodDescriptorProto) GetInputType() string {
-	if m != nil && m.InputType != nil {
-		return *m.InputType
-	}
-	return ""
-}
-
-func (m *MethodDescriptorProto) GetOutputType() string {
-	if m != nil && m.OutputType != nil {
-		return *m.OutputType
-	}
-	return ""
-}
-
-func (m *MethodDescriptorProto) GetOptions() *MethodOptions {
-	if m != nil {
-		return m.Options
-	}
-	return nil
-}
-
-func (m *MethodDescriptorProto) GetClientStreaming() bool {
-	if m != nil && m.ClientStreaming != nil {
-		return *m.ClientStreaming
-	}
-	return Default_MethodDescriptorProto_ClientStreaming
-}
-
-func (m *MethodDescriptorProto) GetServerStreaming() bool {
-	if m != nil && m.ServerStreaming != nil {
-		return *m.ServerStreaming
-	}
-	return Default_MethodDescriptorProto_ServerStreaming
-}
-
-type FileOptions struct {
-	// Sets the Java package where classes generated from this .proto will be
-	// placed.  By default, the proto package is used, but this is often
-	// inappropriate because proto packages do not normally start with backwards
-	// domain names.
-	JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"`
-	// If set, all the classes from the .proto file are wrapped in a single
-	// outer class with the given name.  This applies to both Proto1
-	// (equivalent to the old "--one_java_file" option) and Proto2 (where
-	// a .proto always translates to a single class, but you may want to
-	// explicitly choose the class name).
-	JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"`
-	// If set true, then the Java code generator will generate a separate .java
-	// file for each top-level message, enum, and service defined in the .proto
-	// file.  Thus, these types will *not* be nested inside the outer class
-	// named by java_outer_classname.  However, the outer class will still be
-	// generated to contain the file's getDescriptor() method as well as any
-	// top-level extensions defined in the file.
-	JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"`
-	// This option does nothing.
-	JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use.
-	// If set true, then the Java2 code generator will generate code that
-	// throws an exception whenever an attempt is made to assign a non-UTF-8
-	// byte sequence to a string field.
-	// Message reflection will do the same.
-	// However, an extension field still accepts non-UTF-8 byte sequences.
-	// This option has no effect on when used with the lite runtime.
-	JavaStringCheckUtf8 *bool                     `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"`
-	OptimizeFor         *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"`
-	// Sets the Go package where structs generated from this .proto will be
-	// placed. If omitted, the Go package will be derived from the following:
-	//   - The basename of the package import path, if provided.
-	//   - Otherwise, the package statement in the .proto file, if present.
-	//   - Otherwise, the basename of the .proto file, without extension.
-	GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"`
-	// Should generic services be generated in each language?  "Generic" services
-	// are not specific to any particular RPC system.  They are generated by the
-	// main code generators in each language (without additional plugins).
-	// Generic services were the only kind of service generation supported by
-	// early versions of google.protobuf.
-	//
-	// Generic services are now considered deprecated in favor of using plugins
-	// that generate code specific to your particular RPC system.  Therefore,
-	// these default to false.  Old code which depends on generic services should
-	// explicitly set them to true.
-	CcGenericServices   *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"`
-	JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"`
-	PyGenericServices   *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"`
-	PhpGenericServices  *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"`
-	// Is this file deprecated?
-	// Depending on the target platform, this can emit Deprecated annotations
-	// for everything in the file, or it will be completely ignored; in the very
-	// least, this is a formalization for deprecating files.
-	Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
-	// Enables the use of arenas for the proto messages in this file. This applies
-	// only to generated classes for C++.
-	CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"`
-	// Sets the objective c class prefix which is prepended to all objective c
-	// generated classes from this .proto. There is no default.
-	ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"`
-	// Namespace for generated classes; defaults to the package.
-	CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"`
-	// By default Swift generators will take the proto package and CamelCase it
-	// replacing '.' with underscore and use that to prefix the types/symbols
-	// defined. When this options is provided, they will use this value instead
-	// to prefix the types/symbols defined.
-	SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"`
-	// Sets the php class prefix which is prepended to all php generated classes
-	// from this .proto. Default is empty.
-	PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"`
-	// Use this option to change the namespace of php generated classes. Default
-	// is empty. When this option is empty, the package name will be used for
-	// determining the namespace.
-	PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"`
-	// Use this option to change the namespace of php generated metadata classes.
-	// Default is empty. When this option is empty, the proto file name will be used
-	// for determining the namespace.
-	PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"`
-	// Use this option to change the package of ruby generated classes. Default
-	// is empty. When this option is not set, the package name will be used for
-	// determining the ruby package.
-	RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"`
-	// The parser stores options it doesn't recognize here.
-	// See the documentation for the "Options" section above.
-	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
-	proto.XXX_InternalExtensions `json:"-"`
-	XXX_unrecognized             []byte `json:"-"`
-	XXX_sizecache                int32  `json:"-"`
-}
-
-func (m *FileOptions) Reset()         { *m = FileOptions{} }
-func (m *FileOptions) String() string { return proto.CompactTextString(m) }
-func (*FileOptions) ProtoMessage()    {}
-func (*FileOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{10}
-}
-
-var extRange_FileOptions = []proto.ExtensionRange{
-	{Start: 1000, End: 536870911},
-}
-
-func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange {
-	return extRange_FileOptions
-}
-
-func (m *FileOptions) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_FileOptions.Unmarshal(m, b)
-}
-func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic)
-}
-func (m *FileOptions) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_FileOptions.Merge(m, src)
-}
-func (m *FileOptions) XXX_Size() int {
-	return xxx_messageInfo_FileOptions.Size(m)
-}
-func (m *FileOptions) XXX_DiscardUnknown() {
-	xxx_messageInfo_FileOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FileOptions proto.InternalMessageInfo
-
-const Default_FileOptions_JavaMultipleFiles bool = false
-const Default_FileOptions_JavaStringCheckUtf8 bool = false
-const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED
-const Default_FileOptions_CcGenericServices bool = false
-const Default_FileOptions_JavaGenericServices bool = false
-const Default_FileOptions_PyGenericServices bool = false
-const Default_FileOptions_PhpGenericServices bool = false
-const Default_FileOptions_Deprecated bool = false
-const Default_FileOptions_CcEnableArenas bool = false
-
-func (m *FileOptions) GetJavaPackage() string {
-	if m != nil && m.JavaPackage != nil {
-		return *m.JavaPackage
-	}
-	return ""
-}
-
-func (m *FileOptions) GetJavaOuterClassname() string {
-	if m != nil && m.JavaOuterClassname != nil {
-		return *m.JavaOuterClassname
-	}
-	return ""
-}
-
-func (m *FileOptions) GetJavaMultipleFiles() bool {
-	if m != nil && m.JavaMultipleFiles != nil {
-		return *m.JavaMultipleFiles
-	}
-	return Default_FileOptions_JavaMultipleFiles
-}
-
-// Deprecated: Do not use.
-func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool {
-	if m != nil && m.JavaGenerateEqualsAndHash != nil {
-		return *m.JavaGenerateEqualsAndHash
-	}
-	return false
-}
-
-func (m *FileOptions) GetJavaStringCheckUtf8() bool {
-	if m != nil && m.JavaStringCheckUtf8 != nil {
-		return *m.JavaStringCheckUtf8
-	}
-	return Default_FileOptions_JavaStringCheckUtf8
-}
-
-func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode {
-	if m != nil && m.OptimizeFor != nil {
-		return *m.OptimizeFor
-	}
-	return Default_FileOptions_OptimizeFor
-}
-
-func (m *FileOptions) GetGoPackage() string {
-	if m != nil && m.GoPackage != nil {
-		return *m.GoPackage
-	}
-	return ""
-}
-
-func (m *FileOptions) GetCcGenericServices() bool {
-	if m != nil && m.CcGenericServices != nil {
-		return *m.CcGenericServices
-	}
-	return Default_FileOptions_CcGenericServices
-}
-
-func (m *FileOptions) GetJavaGenericServices() bool {
-	if m != nil && m.JavaGenericServices != nil {
-		return *m.JavaGenericServices
-	}
-	return Default_FileOptions_JavaGenericServices
-}
-
-func (m *FileOptions) GetPyGenericServices() bool {
-	if m != nil && m.PyGenericServices != nil {
-		return *m.PyGenericServices
-	}
-	return Default_FileOptions_PyGenericServices
-}
-
-func (m *FileOptions) GetPhpGenericServices() bool {
-	if m != nil && m.PhpGenericServices != nil {
-		return *m.PhpGenericServices
-	}
-	return Default_FileOptions_PhpGenericServices
-}
-
-func (m *FileOptions) GetDeprecated() bool {
-	if m != nil && m.Deprecated != nil {
-		return *m.Deprecated
-	}
-	return Default_FileOptions_Deprecated
-}
-
-func (m *FileOptions) GetCcEnableArenas() bool {
-	if m != nil && m.CcEnableArenas != nil {
-		return *m.CcEnableArenas
-	}
-	return Default_FileOptions_CcEnableArenas
-}
-
-func (m *FileOptions) GetObjcClassPrefix() string {
-	if m != nil && m.ObjcClassPrefix != nil {
-		return *m.ObjcClassPrefix
-	}
-	return ""
-}
-
-func (m *FileOptions) GetCsharpNamespace() string {
-	if m != nil && m.CsharpNamespace != nil {
-		return *m.CsharpNamespace
-	}
-	return ""
-}
-
-func (m *FileOptions) GetSwiftPrefix() string {
-	if m != nil && m.SwiftPrefix != nil {
-		return *m.SwiftPrefix
-	}
-	return ""
-}
-
-func (m *FileOptions) GetPhpClassPrefix() string {
-	if m != nil && m.PhpClassPrefix != nil {
-		return *m.PhpClassPrefix
-	}
-	return ""
-}
-
-func (m *FileOptions) GetPhpNamespace() string {
-	if m != nil && m.PhpNamespace != nil {
-		return *m.PhpNamespace
-	}
-	return ""
-}
-
-func (m *FileOptions) GetPhpMetadataNamespace() string {
-	if m != nil && m.PhpMetadataNamespace != nil {
-		return *m.PhpMetadataNamespace
-	}
-	return ""
-}
-
-func (m *FileOptions) GetRubyPackage() string {
-	if m != nil && m.RubyPackage != nil {
-		return *m.RubyPackage
-	}
-	return ""
-}
-
-func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption {
-	if m != nil {
-		return m.UninterpretedOption
-	}
-	return nil
-}
-
-type MessageOptions struct {
-	// Set true to use the old proto1 MessageSet wire format for extensions.
-	// This is provided for backwards-compatibility with the MessageSet wire
-	// format.  You should not use this for any other reason:  It's less
-	// efficient, has fewer features, and is more complicated.
-	//
-	// The message must be defined exactly as follows:
-	//   message Foo {
-	//     option message_set_wire_format = true;
-	//     extensions 4 to max;
-	//   }
-	// Note that the message cannot have any defined fields; MessageSets only
-	// have extensions.
-	//
-	// All extensions of your type must be singular messages; e.g. they cannot
-	// be int32s, enums, or repeated messages.
-	//
-	// Because this is an option, the above two restrictions are not enforced by
-	// the protocol compiler.
-	MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"`
-	// Disables the generation of the standard "descriptor()" accessor, which can
-	// conflict with a field of the same name.  This is meant to make migration
-	// from proto1 easier; new code should avoid fields named "descriptor".
-	NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"`
-	// Is this message deprecated?
-	// Depending on the target platform, this can emit Deprecated annotations
-	// for the message, or it will be completely ignored; in the very least,
-	// this is a formalization for deprecating messages.
-	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
-	// Whether the message is an automatically generated map entry type for the
-	// maps field.
-	//
-	// For maps fields:
-	//     map<KeyType, ValueType> map_field = 1;
-	// The parsed descriptor looks like:
-	//     message MapFieldEntry {
-	//         option map_entry = true;
-	//         optional KeyType key = 1;
-	//         optional ValueType value = 2;
-	//     }
-	//     repeated MapFieldEntry map_field = 1;
-	//
-	// Implementations may choose not to generate the map_entry=true message, but
-	// use a native map in the target language to hold the keys and values.
-	// The reflection APIs in such implementions still need to work as
-	// if the field is a repeated message field.
-	//
-	// NOTE: Do not set the option in .proto files. Always use the maps syntax
-	// instead. The option should only be implicitly set by the proto compiler
-	// parser.
-	MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"`
-	// The parser stores options it doesn't recognize here. See above.
-	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
-	proto.XXX_InternalExtensions `json:"-"`
-	XXX_unrecognized             []byte `json:"-"`
-	XXX_sizecache                int32  `json:"-"`
-}
-
-func (m *MessageOptions) Reset()         { *m = MessageOptions{} }
-func (m *MessageOptions) String() string { return proto.CompactTextString(m) }
-func (*MessageOptions) ProtoMessage()    {}
-func (*MessageOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{11}
-}
-
-var extRange_MessageOptions = []proto.ExtensionRange{
-	{Start: 1000, End: 536870911},
-}
-
-func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange {
-	return extRange_MessageOptions
-}
-
-func (m *MessageOptions) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_MessageOptions.Unmarshal(m, b)
-}
-func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic)
-}
-func (m *MessageOptions) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MessageOptions.Merge(m, src)
-}
-func (m *MessageOptions) XXX_Size() int {
-	return xxx_messageInfo_MessageOptions.Size(m)
-}
-func (m *MessageOptions) XXX_DiscardUnknown() {
-	xxx_messageInfo_MessageOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MessageOptions proto.InternalMessageInfo
-
-const Default_MessageOptions_MessageSetWireFormat bool = false
-const Default_MessageOptions_NoStandardDescriptorAccessor bool = false
-const Default_MessageOptions_Deprecated bool = false
-
-func (m *MessageOptions) GetMessageSetWireFormat() bool {
-	if m != nil && m.MessageSetWireFormat != nil {
-		return *m.MessageSetWireFormat
-	}
-	return Default_MessageOptions_MessageSetWireFormat
-}
-
-func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool {
-	if m != nil && m.NoStandardDescriptorAccessor != nil {
-		return *m.NoStandardDescriptorAccessor
-	}
-	return Default_MessageOptions_NoStandardDescriptorAccessor
-}
-
-func (m *MessageOptions) GetDeprecated() bool {
-	if m != nil && m.Deprecated != nil {
-		return *m.Deprecated
-	}
-	return Default_MessageOptions_Deprecated
-}
-
-func (m *MessageOptions) GetMapEntry() bool {
-	if m != nil && m.MapEntry != nil {
-		return *m.MapEntry
-	}
-	return false
-}
-
-func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption {
-	if m != nil {
-		return m.UninterpretedOption
-	}
-	return nil
-}
-
-type FieldOptions struct {
-	// The ctype option instructs the C++ code generator to use a different
-	// representation of the field than it normally would.  See the specific
-	// options below.  This option is not yet implemented in the open source
-	// release -- sorry, we'll try to include it in a future version!
-	Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"`
-	// The packed option can be enabled for repeated primitive fields to enable
-	// a more efficient representation on the wire. Rather than repeatedly
-	// writing the tag and type for each element, the entire array is encoded as
-	// a single length-delimited blob. In proto3, only explicit setting it to
-	// false will avoid using packed encoding.
-	Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"`
-	// The jstype option determines the JavaScript type used for values of the
-	// field.  The option is permitted only for 64 bit integral and fixed types
-	// (int64, uint64, sint64, fixed64, sfixed64).  A field with jstype JS_STRING
-	// is represented as JavaScript string, which avoids loss of precision that
-	// can happen when a large value is converted to a floating point JavaScript.
-	// Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
-	// use the JavaScript "number" type.  The behavior of the default option
-	// JS_NORMAL is implementation dependent.
-	//
-	// This option is an enum to permit additional types to be added, e.g.
-	// goog.math.Integer.
-	Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"`
-	// Should this field be parsed lazily?  Lazy applies only to message-type
-	// fields.  It means that when the outer message is initially parsed, the
-	// inner message's contents will not be parsed but instead stored in encoded
-	// form.  The inner message will actually be parsed when it is first accessed.
-	//
-	// This is only a hint.  Implementations are free to choose whether to use
-	// eager or lazy parsing regardless of the value of this option.  However,
-	// setting this option true suggests that the protocol author believes that
-	// using lazy parsing on this field is worth the additional bookkeeping
-	// overhead typically needed to implement it.
-	//
-	// This option does not affect the public interface of any generated code;
-	// all method signatures remain the same.  Furthermore, thread-safety of the
-	// interface is not affected by this option; const methods remain safe to
-	// call from multiple threads concurrently, while non-const methods continue
-	// to require exclusive access.
-	//
-	//
-	// Note that implementations may choose not to check required fields within
-	// a lazy sub-message.  That is, calling IsInitialized() on the outer message
-	// may return true even if the inner message has missing required fields.
-	// This is necessary because otherwise the inner message would have to be
-	// parsed in order to perform the check, defeating the purpose of lazy
-	// parsing.  An implementation which chooses not to check required fields
-	// must be consistent about it.  That is, for any particular sub-message, the
-	// implementation must either *always* check its required fields, or *never*
-	// check its required fields, regardless of whether or not the message has
-	// been parsed.
-	Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"`
-	// Is this field deprecated?
-	// Depending on the target platform, this can emit Deprecated annotations
-	// for accessors, or it will be completely ignored; in the very least, this
-	// is a formalization for deprecating fields.
-	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
-	// For Google-internal migration only. Do not use.
-	Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
-	// The parser stores options it doesn't recognize here. See above.
-	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
-	proto.XXX_InternalExtensions `json:"-"`
-	XXX_unrecognized             []byte `json:"-"`
-	XXX_sizecache                int32  `json:"-"`
-}
-
-func (m *FieldOptions) Reset()         { *m = FieldOptions{} }
-func (m *FieldOptions) String() string { return proto.CompactTextString(m) }
-func (*FieldOptions) ProtoMessage()    {}
-func (*FieldOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{12}
-}
-
-var extRange_FieldOptions = []proto.ExtensionRange{
-	{Start: 1000, End: 536870911},
-}
-
-func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange {
-	return extRange_FieldOptions
-}
-
-func (m *FieldOptions) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_FieldOptions.Unmarshal(m, b)
-}
-func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic)
-}
-func (m *FieldOptions) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_FieldOptions.Merge(m, src)
-}
-func (m *FieldOptions) XXX_Size() int {
-	return xxx_messageInfo_FieldOptions.Size(m)
-}
-func (m *FieldOptions) XXX_DiscardUnknown() {
-	xxx_messageInfo_FieldOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FieldOptions proto.InternalMessageInfo
-
-const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING
-const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL
-const Default_FieldOptions_Lazy bool = false
-const Default_FieldOptions_Deprecated bool = false
-const Default_FieldOptions_Weak bool = false
-
-func (m *FieldOptions) GetCtype() FieldOptions_CType {
-	if m != nil && m.Ctype != nil {
-		return *m.Ctype
-	}
-	return Default_FieldOptions_Ctype
-}
-
-func (m *FieldOptions) GetPacked() bool {
-	if m != nil && m.Packed != nil {
-		return *m.Packed
-	}
-	return false
-}
-
-func (m *FieldOptions) GetJstype() FieldOptions_JSType {
-	if m != nil && m.Jstype != nil {
-		return *m.Jstype
-	}
-	return Default_FieldOptions_Jstype
-}
-
-func (m *FieldOptions) GetLazy() bool {
-	if m != nil && m.Lazy != nil {
-		return *m.Lazy
-	}
-	return Default_FieldOptions_Lazy
-}
-
-func (m *FieldOptions) GetDeprecated() bool {
-	if m != nil && m.Deprecated != nil {
-		return *m.Deprecated
-	}
-	return Default_FieldOptions_Deprecated
-}
-
-func (m *FieldOptions) GetWeak() bool {
-	if m != nil && m.Weak != nil {
-		return *m.Weak
-	}
-	return Default_FieldOptions_Weak
-}
-
-func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
-	if m != nil {
-		return m.UninterpretedOption
-	}
-	return nil
-}
-
-type OneofOptions struct {
-	// The parser stores options it doesn't recognize here. See above.
-	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
-	proto.XXX_InternalExtensions `json:"-"`
-	XXX_unrecognized             []byte `json:"-"`
-	XXX_sizecache                int32  `json:"-"`
-}
-
-func (m *OneofOptions) Reset()         { *m = OneofOptions{} }
-func (m *OneofOptions) String() string { return proto.CompactTextString(m) }
-func (*OneofOptions) ProtoMessage()    {}
-func (*OneofOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{13}
-}
-
-var extRange_OneofOptions = []proto.ExtensionRange{
-	{Start: 1000, End: 536870911},
-}
-
-func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange {
-	return extRange_OneofOptions
-}
-
-func (m *OneofOptions) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_OneofOptions.Unmarshal(m, b)
-}
-func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic)
-}
-func (m *OneofOptions) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_OneofOptions.Merge(m, src)
-}
-func (m *OneofOptions) XXX_Size() int {
-	return xxx_messageInfo_OneofOptions.Size(m)
-}
-func (m *OneofOptions) XXX_DiscardUnknown() {
-	xxx_messageInfo_OneofOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_OneofOptions proto.InternalMessageInfo
-
-func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption {
-	if m != nil {
-		return m.UninterpretedOption
-	}
-	return nil
-}
-
-type EnumOptions struct {
-	// Set this option to true to allow mapping different tag names to the same
-	// value.
-	AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"`
-	// Is this enum deprecated?
-	// Depending on the target platform, this can emit Deprecated annotations
-	// for the enum, or it will be completely ignored; in the very least, this
-	// is a formalization for deprecating enums.
-	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
-	// The parser stores options it doesn't recognize here. See above.
-	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
-	proto.XXX_InternalExtensions `json:"-"`
-	XXX_unrecognized             []byte `json:"-"`
-	XXX_sizecache                int32  `json:"-"`
-}
-
-func (m *EnumOptions) Reset()         { *m = EnumOptions{} }
-func (m *EnumOptions) String() string { return proto.CompactTextString(m) }
-func (*EnumOptions) ProtoMessage()    {}
-func (*EnumOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{14}
-}
-
-var extRange_EnumOptions = []proto.ExtensionRange{
-	{Start: 1000, End: 536870911},
-}
-
-func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange {
-	return extRange_EnumOptions
-}
-
-func (m *EnumOptions) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_EnumOptions.Unmarshal(m, b)
-}
-func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic)
-}
-func (m *EnumOptions) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_EnumOptions.Merge(m, src)
-}
-func (m *EnumOptions) XXX_Size() int {
-	return xxx_messageInfo_EnumOptions.Size(m)
-}
-func (m *EnumOptions) XXX_DiscardUnknown() {
-	xxx_messageInfo_EnumOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EnumOptions proto.InternalMessageInfo
-
-const Default_EnumOptions_Deprecated bool = false
-
-func (m *EnumOptions) GetAllowAlias() bool {
-	if m != nil && m.AllowAlias != nil {
-		return *m.AllowAlias
-	}
-	return false
-}
-
-func (m *EnumOptions) GetDeprecated() bool {
-	if m != nil && m.Deprecated != nil {
-		return *m.Deprecated
-	}
-	return Default_EnumOptions_Deprecated
-}
-
-func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption {
-	if m != nil {
-		return m.UninterpretedOption
-	}
-	return nil
-}
-
-type EnumValueOptions struct {
-	// Is this enum value deprecated?
-	// Depending on the target platform, this can emit Deprecated annotations
-	// for the enum value, or it will be completely ignored; in the very least,
-	// this is a formalization for deprecating enum values.
-	Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
-	// The parser stores options it doesn't recognize here. See above.
-	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
-	proto.XXX_InternalExtensions `json:"-"`
-	XXX_unrecognized             []byte `json:"-"`
-	XXX_sizecache                int32  `json:"-"`
-}
-
-func (m *EnumValueOptions) Reset()         { *m = EnumValueOptions{} }
-func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) }
-func (*EnumValueOptions) ProtoMessage()    {}
-func (*EnumValueOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{15}
-}
-
-var extRange_EnumValueOptions = []proto.ExtensionRange{
-	{Start: 1000, End: 536870911},
-}
-
-func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange {
-	return extRange_EnumValueOptions
-}
-
-func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b)
-}
-func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic)
-}
-func (m *EnumValueOptions) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_EnumValueOptions.Merge(m, src)
-}
-func (m *EnumValueOptions) XXX_Size() int {
-	return xxx_messageInfo_EnumValueOptions.Size(m)
-}
-func (m *EnumValueOptions) XXX_DiscardUnknown() {
-	xxx_messageInfo_EnumValueOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo
-
-const Default_EnumValueOptions_Deprecated bool = false
-
-func (m *EnumValueOptions) GetDeprecated() bool {
-	if m != nil && m.Deprecated != nil {
-		return *m.Deprecated
-	}
-	return Default_EnumValueOptions_Deprecated
-}
-
-func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
-	if m != nil {
-		return m.UninterpretedOption
-	}
-	return nil
-}
-
-type ServiceOptions struct {
-	// Is this service deprecated?
-	// Depending on the target platform, this can emit Deprecated annotations
-	// for the service, or it will be completely ignored; in the very least,
-	// this is a formalization for deprecating services.
-	Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
-	// The parser stores options it doesn't recognize here. See above.
-	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
-	proto.XXX_InternalExtensions `json:"-"`
-	XXX_unrecognized             []byte `json:"-"`
-	XXX_sizecache                int32  `json:"-"`
-}
-
-func (m *ServiceOptions) Reset()         { *m = ServiceOptions{} }
-func (m *ServiceOptions) String() string { return proto.CompactTextString(m) }
-func (*ServiceOptions) ProtoMessage()    {}
-func (*ServiceOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{16}
-}
-
-var extRange_ServiceOptions = []proto.ExtensionRange{
-	{Start: 1000, End: 536870911},
-}
-
-func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange {
-	return extRange_ServiceOptions
-}
-
-func (m *ServiceOptions) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_ServiceOptions.Unmarshal(m, b)
-}
-func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic)
-}
-func (m *ServiceOptions) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ServiceOptions.Merge(m, src)
-}
-func (m *ServiceOptions) XXX_Size() int {
-	return xxx_messageInfo_ServiceOptions.Size(m)
-}
-func (m *ServiceOptions) XXX_DiscardUnknown() {
-	xxx_messageInfo_ServiceOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo
-
-const Default_ServiceOptions_Deprecated bool = false
-
-func (m *ServiceOptions) GetDeprecated() bool {
-	if m != nil && m.Deprecated != nil {
-		return *m.Deprecated
-	}
-	return Default_ServiceOptions_Deprecated
-}
-
-func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption {
-	if m != nil {
-		return m.UninterpretedOption
-	}
-	return nil
-}
-
-type MethodOptions struct {
-	// Is this method deprecated?
-	// Depending on the target platform, this can emit Deprecated annotations
-	// for the method, or it will be completely ignored; in the very least,
-	// this is a formalization for deprecating methods.
-	Deprecated       *bool                           `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
-	IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"`
-	// The parser stores options it doesn't recognize here. See above.
-	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
-	proto.XXX_InternalExtensions `json:"-"`
-	XXX_unrecognized             []byte `json:"-"`
-	XXX_sizecache                int32  `json:"-"`
-}
-
-func (m *MethodOptions) Reset()         { *m = MethodOptions{} }
-func (m *MethodOptions) String() string { return proto.CompactTextString(m) }
-func (*MethodOptions) ProtoMessage()    {}
-func (*MethodOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{17}
-}
-
-var extRange_MethodOptions = []proto.ExtensionRange{
-	{Start: 1000, End: 536870911},
-}
-
-func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange {
-	return extRange_MethodOptions
-}
-
-func (m *MethodOptions) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_MethodOptions.Unmarshal(m, b)
-}
-func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic)
-}
-func (m *MethodOptions) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MethodOptions.Merge(m, src)
-}
-func (m *MethodOptions) XXX_Size() int {
-	return xxx_messageInfo_MethodOptions.Size(m)
-}
-func (m *MethodOptions) XXX_DiscardUnknown() {
-	xxx_messageInfo_MethodOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MethodOptions proto.InternalMessageInfo
-
-const Default_MethodOptions_Deprecated bool = false
-const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN
-
-func (m *MethodOptions) GetDeprecated() bool {
-	if m != nil && m.Deprecated != nil {
-		return *m.Deprecated
-	}
-	return Default_MethodOptions_Deprecated
-}
-
-func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel {
-	if m != nil && m.IdempotencyLevel != nil {
-		return *m.IdempotencyLevel
-	}
-	return Default_MethodOptions_IdempotencyLevel
-}
-
-func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption {
-	if m != nil {
-		return m.UninterpretedOption
-	}
-	return nil
-}
-
-// A message representing a option the parser does not recognize. This only
-// appears in options protos created by the compiler::Parser class.
-// DescriptorPool resolves these when building Descriptor objects. Therefore,
-// options protos in descriptor objects (e.g. returned by Descriptor::options(),
-// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
-// in them.
-type UninterpretedOption struct {
-	Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"`
-	// The value of the uninterpreted option, in whatever type the tokenizer
-	// identified it as during parsing. Exactly one of these should be set.
-	IdentifierValue      *string  `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"`
-	PositiveIntValue     *uint64  `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"`
-	NegativeIntValue     *int64   `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"`
-	DoubleValue          *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"`
-	StringValue          []byte   `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"`
-	AggregateValue       *string  `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *UninterpretedOption) Reset()         { *m = UninterpretedOption{} }
-func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) }
-func (*UninterpretedOption) ProtoMessage()    {}
-func (*UninterpretedOption) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{18}
-}
-
-func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b)
-}
-func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic)
-}
-func (m *UninterpretedOption) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_UninterpretedOption.Merge(m, src)
-}
-func (m *UninterpretedOption) XXX_Size() int {
-	return xxx_messageInfo_UninterpretedOption.Size(m)
-}
-func (m *UninterpretedOption) XXX_DiscardUnknown() {
-	xxx_messageInfo_UninterpretedOption.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo
-
-func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart {
-	if m != nil {
-		return m.Name
-	}
-	return nil
-}
-
-func (m *UninterpretedOption) GetIdentifierValue() string {
-	if m != nil && m.IdentifierValue != nil {
-		return *m.IdentifierValue
-	}
-	return ""
-}
-
-func (m *UninterpretedOption) GetPositiveIntValue() uint64 {
-	if m != nil && m.PositiveIntValue != nil {
-		return *m.PositiveIntValue
-	}
-	return 0
-}
-
-func (m *UninterpretedOption) GetNegativeIntValue() int64 {
-	if m != nil && m.NegativeIntValue != nil {
-		return *m.NegativeIntValue
-	}
-	return 0
-}
-
-func (m *UninterpretedOption) GetDoubleValue() float64 {
-	if m != nil && m.DoubleValue != nil {
-		return *m.DoubleValue
-	}
-	return 0
-}
-
-func (m *UninterpretedOption) GetStringValue() []byte {
-	if m != nil {
-		return m.StringValue
-	}
-	return nil
-}
-
-func (m *UninterpretedOption) GetAggregateValue() string {
-	if m != nil && m.AggregateValue != nil {
-		return *m.AggregateValue
-	}
-	return ""
-}
-
-// The name of the uninterpreted option.  Each string represents a segment in
-// a dot-separated name.  is_extension is true iff a segment represents an
-// extension (denoted with parentheses in options specs in .proto files).
-// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
-// "foo.(bar.baz).qux".
-type UninterpretedOption_NamePart struct {
-	NamePart             *string  `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"`
-	IsExtension          *bool    `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *UninterpretedOption_NamePart) Reset()         { *m = UninterpretedOption_NamePart{} }
-func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) }
-func (*UninterpretedOption_NamePart) ProtoMessage()    {}
-func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{18, 0}
-}
-
-func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b)
-}
-func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic)
-}
-func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src)
-}
-func (m *UninterpretedOption_NamePart) XXX_Size() int {
-	return xxx_messageInfo_UninterpretedOption_NamePart.Size(m)
-}
-func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() {
-	xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo
-
-func (m *UninterpretedOption_NamePart) GetNamePart() string {
-	if m != nil && m.NamePart != nil {
-		return *m.NamePart
-	}
-	return ""
-}
-
-func (m *UninterpretedOption_NamePart) GetIsExtension() bool {
-	if m != nil && m.IsExtension != nil {
-		return *m.IsExtension
-	}
-	return false
-}
-
-// Encapsulates information about the original source file from which a
-// FileDescriptorProto was generated.
-type SourceCodeInfo struct {
-	// A Location identifies a piece of source code in a .proto file which
-	// corresponds to a particular definition.  This information is intended
-	// to be useful to IDEs, code indexers, documentation generators, and similar
-	// tools.
-	//
-	// For example, say we have a file like:
-	//   message Foo {
-	//     optional string foo = 1;
-	//   }
-	// Let's look at just the field definition:
-	//   optional string foo = 1;
-	//   ^       ^^     ^^  ^  ^^^
-	//   a       bc     de  f  ghi
-	// We have the following locations:
-	//   span   path               represents
-	//   [a,i)  [ 4, 0, 2, 0 ]     The whole field definition.
-	//   [a,b)  [ 4, 0, 2, 0, 4 ]  The label (optional).
-	//   [c,d)  [ 4, 0, 2, 0, 5 ]  The type (string).
-	//   [e,f)  [ 4, 0, 2, 0, 1 ]  The name (foo).
-	//   [g,h)  [ 4, 0, 2, 0, 3 ]  The number (1).
-	//
-	// Notes:
-	// - A location may refer to a repeated field itself (i.e. not to any
-	//   particular index within it).  This is used whenever a set of elements are
-	//   logically enclosed in a single code segment.  For example, an entire
-	//   extend block (possibly containing multiple extension definitions) will
-	//   have an outer location whose path refers to the "extensions" repeated
-	//   field without an index.
-	// - Multiple locations may have the same path.  This happens when a single
-	//   logical declaration is spread out across multiple places.  The most
-	//   obvious example is the "extend" block again -- there may be multiple
-	//   extend blocks in the same scope, each of which will have the same path.
-	// - A location's span is not always a subset of its parent's span.  For
-	//   example, the "extendee" of an extension declaration appears at the
-	//   beginning of the "extend" block and is shared by all extensions within
-	//   the block.
-	// - Just because a location's span is a subset of some other location's span
-	//   does not mean that it is a descendent.  For example, a "group" defines
-	//   both a type and a field in a single declaration.  Thus, the locations
-	//   corresponding to the type and field and their components will overlap.
-	// - Code which tries to interpret locations should probably be designed to
-	//   ignore those that it doesn't understand, as more types of locations could
-	//   be recorded in the future.
-	Location             []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}                   `json:"-"`
-	XXX_unrecognized     []byte                     `json:"-"`
-	XXX_sizecache        int32                      `json:"-"`
-}
-
-func (m *SourceCodeInfo) Reset()         { *m = SourceCodeInfo{} }
-func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) }
-func (*SourceCodeInfo) ProtoMessage()    {}
-func (*SourceCodeInfo) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{19}
-}
-
-func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b)
-}
-func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic)
-}
-func (m *SourceCodeInfo) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_SourceCodeInfo.Merge(m, src)
-}
-func (m *SourceCodeInfo) XXX_Size() int {
-	return xxx_messageInfo_SourceCodeInfo.Size(m)
-}
-func (m *SourceCodeInfo) XXX_DiscardUnknown() {
-	xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo
-
-func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location {
-	if m != nil {
-		return m.Location
-	}
-	return nil
-}
-
-type SourceCodeInfo_Location struct {
-	// Identifies which part of the FileDescriptorProto was defined at this
-	// location.
-	//
-	// Each element is a field number or an index.  They form a path from
-	// the root FileDescriptorProto to the place where the definition.  For
-	// example, this path:
-	//   [ 4, 3, 2, 7, 1 ]
-	// refers to:
-	//   file.message_type(3)  // 4, 3
-	//       .field(7)         // 2, 7
-	//       .name()           // 1
-	// This is because FileDescriptorProto.message_type has field number 4:
-	//   repeated DescriptorProto message_type = 4;
-	// and DescriptorProto.field has field number 2:
-	//   repeated FieldDescriptorProto field = 2;
-	// and FieldDescriptorProto.name has field number 1:
-	//   optional string name = 1;
-	//
-	// Thus, the above path gives the location of a field name.  If we removed
-	// the last element:
-	//   [ 4, 3, 2, 7 ]
-	// this path refers to the whole field declaration (from the beginning
-	// of the label to the terminating semicolon).
-	Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
-	// Always has exactly three or four elements: start line, start column,
-	// end line (optional, otherwise assumed same as start line), end column.
-	// These are packed into a single field for efficiency.  Note that line
-	// and column numbers are zero-based -- typically you will want to add
-	// 1 to each before displaying to a user.
-	Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"`
-	// If this SourceCodeInfo represents a complete declaration, these are any
-	// comments appearing before and after the declaration which appear to be
-	// attached to the declaration.
-	//
-	// A series of line comments appearing on consecutive lines, with no other
-	// tokens appearing on those lines, will be treated as a single comment.
-	//
-	// leading_detached_comments will keep paragraphs of comments that appear
-	// before (but not connected to) the current element. Each paragraph,
-	// separated by empty lines, will be one comment element in the repeated
-	// field.
-	//
-	// Only the comment content is provided; comment markers (e.g. //) are
-	// stripped out.  For block comments, leading whitespace and an asterisk
-	// will be stripped from the beginning of each line other than the first.
-	// Newlines are included in the output.
-	//
-	// Examples:
-	//
-	//   optional int32 foo = 1;  // Comment attached to foo.
-	//   // Comment attached to bar.
-	//   optional int32 bar = 2;
-	//
-	//   optional string baz = 3;
-	//   // Comment attached to baz.
-	//   // Another line attached to baz.
-	//
-	//   // Comment attached to qux.
-	//   //
-	//   // Another line attached to qux.
-	//   optional double qux = 4;
-	//
-	//   // Detached comment for corge. This is not leading or trailing comments
-	//   // to qux or corge because there are blank lines separating it from
-	//   // both.
-	//
-	//   // Detached comment for corge paragraph 2.
-	//
-	//   optional string corge = 5;
-	//   /* Block comment attached
-	//    * to corge.  Leading asterisks
-	//    * will be removed. */
-	//   /* Block comment attached to
-	//    * grault. */
-	//   optional int32 grault = 6;
-	//
-	//   // ignored detached comments.
-	LeadingComments         *string  `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"`
-	TrailingComments        *string  `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"`
-	LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"`
-	XXX_NoUnkeyedLiteral    struct{} `json:"-"`
-	XXX_unrecognized        []byte   `json:"-"`
-	XXX_sizecache           int32    `json:"-"`
-}
-
-func (m *SourceCodeInfo_Location) Reset()         { *m = SourceCodeInfo_Location{} }
-func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) }
-func (*SourceCodeInfo_Location) ProtoMessage()    {}
-func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{19, 0}
-}
-
-func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b)
-}
-func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic)
-}
-func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src)
-}
-func (m *SourceCodeInfo_Location) XXX_Size() int {
-	return xxx_messageInfo_SourceCodeInfo_Location.Size(m)
-}
-func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() {
-	xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo
-
-func (m *SourceCodeInfo_Location) GetPath() []int32 {
-	if m != nil {
-		return m.Path
-	}
-	return nil
-}
-
-func (m *SourceCodeInfo_Location) GetSpan() []int32 {
-	if m != nil {
-		return m.Span
-	}
-	return nil
-}
-
-func (m *SourceCodeInfo_Location) GetLeadingComments() string {
-	if m != nil && m.LeadingComments != nil {
-		return *m.LeadingComments
-	}
-	return ""
-}
-
-func (m *SourceCodeInfo_Location) GetTrailingComments() string {
-	if m != nil && m.TrailingComments != nil {
-		return *m.TrailingComments
-	}
-	return ""
-}
-
-func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string {
-	if m != nil {
-		return m.LeadingDetachedComments
-	}
-	return nil
-}
-
-// Describes the relationship between generated code and its original source
-// file. A GeneratedCodeInfo message is associated with only one generated
-// source file, but may contain references to different source .proto files.
-type GeneratedCodeInfo struct {
-	// An Annotation connects some span of text in generated code to an element
-	// of its generating .proto file.
-	Annotation           []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}                        `json:"-"`
-	XXX_unrecognized     []byte                          `json:"-"`
-	XXX_sizecache        int32                           `json:"-"`
-}
-
-func (m *GeneratedCodeInfo) Reset()         { *m = GeneratedCodeInfo{} }
-func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) }
-func (*GeneratedCodeInfo) ProtoMessage()    {}
-func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{20}
-}
-
-func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b)
-}
-func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic)
-}
-func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_GeneratedCodeInfo.Merge(m, src)
-}
-func (m *GeneratedCodeInfo) XXX_Size() int {
-	return xxx_messageInfo_GeneratedCodeInfo.Size(m)
-}
-func (m *GeneratedCodeInfo) XXX_DiscardUnknown() {
-	xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo
-
-func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation {
-	if m != nil {
-		return m.Annotation
-	}
-	return nil
-}
-
-type GeneratedCodeInfo_Annotation struct {
-	// Identifies the element in the original source .proto file. This field
-	// is formatted the same as SourceCodeInfo.Location.path.
-	Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
-	// Identifies the filesystem path to the original source .proto.
-	SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"`
-	// Identifies the starting offset in bytes in the generated code
-	// that relates to the identified object.
-	Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"`
-	// Identifies the ending offset in bytes in the generated code that
-	// relates to the identified offset. The end offset should be one past
-	// the last relevant byte (so the length of the text = end - begin).
-	End                  *int32   `protobuf:"varint,4,opt,name=end" json:"end,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *GeneratedCodeInfo_Annotation) Reset()         { *m = GeneratedCodeInfo_Annotation{} }
-func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) }
-func (*GeneratedCodeInfo_Annotation) ProtoMessage()    {}
-func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) {
-	return fileDescriptor_e5baabe45344a177, []int{20, 0}
-}
-
-func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b)
-}
-func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic)
-}
-func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src)
-}
-func (m *GeneratedCodeInfo_Annotation) XXX_Size() int {
-	return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m)
-}
-func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() {
-	xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo
-
-func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 {
-	if m != nil {
-		return m.Path
-	}
-	return nil
-}
-
-func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string {
-	if m != nil && m.SourceFile != nil {
-		return *m.SourceFile
-	}
-	return ""
-}
-
-func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 {
-	if m != nil && m.Begin != nil {
-		return *m.Begin
-	}
-	return 0
-}
-
-func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 {
-	if m != nil && m.End != nil {
-		return *m.End
-	}
-	return 0
-}
-
-func init() {
-	proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value)
-	proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value)
-	proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value)
-	proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value)
-	proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value)
-	proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value)
-	proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet")
-	proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto")
-	proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto")
-	proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange")
-	proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange")
-	proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions")
-	proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto")
-	proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto")
-	proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto")
-	proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange")
-	proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto")
-	proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto")
-	proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto")
-	proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions")
-	proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions")
-	proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions")
-	proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions")
-	proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions")
-	proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions")
-	proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions")
-	proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions")
-	proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption")
-	proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart")
-	proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo")
-	proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location")
-	proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo")
-	proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation")
-}
-
-func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor_e5baabe45344a177) }
-
-var fileDescriptor_e5baabe45344a177 = []byte{
-	// 2589 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x8e, 0xdb, 0xc6,
-	0x15, 0x0e, 0xf5, 0xb7, 0xd2, 0x91, 0x56, 0x3b, 0x3b, 0xbb, 0xb1, 0xe9, 0xcd, 0x8f, 0xd7, 0xca,
-	0x8f, 0xd7, 0x4e, 0xac, 0x0d, 0x1c, 0xdb, 0x71, 0xd6, 0x45, 0x5a, 0xad, 0x44, 0x6f, 0xe4, 0xee,
-	0x4a, 0x2a, 0xa5, 0x6d, 0x7e, 0x80, 0x82, 0x98, 0x25, 0x47, 0x12, 0x6d, 0x8a, 0x64, 0x48, 0xca,
-	0xf6, 0x06, 0xbd, 0x30, 0xd0, 0xab, 0x5e, 0x15, 0xe8, 0x55, 0x51, 0x14, 0xbd, 0xe8, 0x4d, 0x80,
-	0x3e, 0x40, 0x81, 0xde, 0xf5, 0x09, 0x0a, 0xe4, 0x0d, 0x8a, 0xb6, 0x40, 0xfb, 0x08, 0xbd, 0x2c,
-	0x66, 0x86, 0xa4, 0x48, 0x49, 0x1b, 0x6f, 0x02, 0xc4, 0xb9, 0x92, 0xe6, 0x3b, 0xdf, 0x39, 0x73,
-	0xe6, 0xcc, 0x99, 0x99, 0x33, 0x43, 0xd8, 0x1e, 0x39, 0xce, 0xc8, 0xa2, 0xbb, 0xae, 0xe7, 0x04,
-	0xce, 0xc9, 0x74, 0xb8, 0x6b, 0x50, 0x5f, 0xf7, 0x4c, 0x37, 0x70, 0xbc, 0x3a, 0xc7, 0xf0, 0x9a,
-	0x60, 0xd4, 0x23, 0x46, 0xed, 0x08, 0xd6, 0xef, 0x9b, 0x16, 0x6d, 0xc5, 0xc4, 0x3e, 0x0d, 0xf0,
-	0x5d, 0xc8, 0x0d, 0x4d, 0x8b, 0xca, 0xd2, 0x76, 0x76, 0xa7, 0x7c, 0xf3, 0xcd, 0xfa, 0x9c, 0x52,
-	0x3d, 0xad, 0xd1, 0x63, 0xb0, 0xca, 0x35, 0x6a, 0xff, 0xce, 0xc1, 0xc6, 0x12, 0x29, 0xc6, 0x90,
-	0xb3, 0xc9, 0x84, 0x59, 0x94, 0x76, 0x4a, 0x2a, 0xff, 0x8f, 0x65, 0x58, 0x71, 0x89, 0xfe, 0x88,
-	0x8c, 0xa8, 0x9c, 0xe1, 0x70, 0xd4, 0xc4, 0xaf, 0x03, 0x18, 0xd4, 0xa5, 0xb6, 0x41, 0x6d, 0xfd,
-	0x54, 0xce, 0x6e, 0x67, 0x77, 0x4a, 0x6a, 0x02, 0xc1, 0xef, 0xc0, 0xba, 0x3b, 0x3d, 0xb1, 0x4c,
-	0x5d, 0x4b, 0xd0, 0x60, 0x3b, 0xbb, 0x93, 0x57, 0x91, 0x10, 0xb4, 0x66, 0xe4, 0xab, 0xb0, 0xf6,
-	0x84, 0x92, 0x47, 0x49, 0x6a, 0x99, 0x53, 0xab, 0x0c, 0x4e, 0x10, 0x9b, 0x50, 0x99, 0x50, 0xdf,
-	0x27, 0x23, 0xaa, 0x05, 0xa7, 0x2e, 0x95, 0x73, 0x7c, 0xf4, 0xdb, 0x0b, 0xa3, 0x9f, 0x1f, 0x79,
-	0x39, 0xd4, 0x1a, 0x9c, 0xba, 0x14, 0x37, 0xa0, 0x44, 0xed, 0xe9, 0x44, 0x58, 0xc8, 0x9f, 0x11,
-	0x3f, 0xc5, 0x9e, 0x4e, 0xe6, 0xad, 0x14, 0x99, 0x5a, 0x68, 0x62, 0xc5, 0xa7, 0xde, 0x63, 0x53,
-	0xa7, 0x72, 0x81, 0x1b, 0xb8, 0xba, 0x60, 0xa0, 0x2f, 0xe4, 0xf3, 0x36, 0x22, 0x3d, 0xdc, 0x84,
-	0x12, 0x7d, 0x1a, 0x50, 0xdb, 0x37, 0x1d, 0x5b, 0x5e, 0xe1, 0x46, 0xde, 0x5a, 0x32, 0x8b, 0xd4,
-	0x32, 0xe6, 0x4d, 0xcc, 0xf4, 0xf0, 0x1d, 0x58, 0x71, 0xdc, 0xc0, 0x74, 0x6c, 0x5f, 0x2e, 0x6e,
-	0x4b, 0x3b, 0xe5, 0x9b, 0xaf, 0x2e, 0x4d, 0x84, 0xae, 0xe0, 0xa8, 0x11, 0x19, 0xb7, 0x01, 0xf9,
-	0xce, 0xd4, 0xd3, 0xa9, 0xa6, 0x3b, 0x06, 0xd5, 0x4c, 0x7b, 0xe8, 0xc8, 0x25, 0x6e, 0xe0, 0xf2,
-	0xe2, 0x40, 0x38, 0xb1, 0xe9, 0x18, 0xb4, 0x6d, 0x0f, 0x1d, 0xb5, 0xea, 0xa7, 0xda, 0xf8, 0x02,
-	0x14, 0xfc, 0x53, 0x3b, 0x20, 0x4f, 0xe5, 0x0a, 0xcf, 0x90, 0xb0, 0x55, 0xfb, 0x6b, 0x01, 0xd6,
-	0xce, 0x93, 0x62, 0xf7, 0x20, 0x3f, 0x64, 0xa3, 0x94, 0x33, 0xdf, 0x26, 0x06, 0x42, 0x27, 0x1d,
-	0xc4, 0xc2, 0x77, 0x0c, 0x62, 0x03, 0xca, 0x36, 0xf5, 0x03, 0x6a, 0x88, 0x8c, 0xc8, 0x9e, 0x33,
-	0xa7, 0x40, 0x28, 0x2d, 0xa6, 0x54, 0xee, 0x3b, 0xa5, 0xd4, 0xa7, 0xb0, 0x16, 0xbb, 0xa4, 0x79,
-	0xc4, 0x1e, 0x45, 0xb9, 0xb9, 0xfb, 0x3c, 0x4f, 0xea, 0x4a, 0xa4, 0xa7, 0x32, 0x35, 0xb5, 0x4a,
-	0x53, 0x6d, 0xdc, 0x02, 0x70, 0x6c, 0xea, 0x0c, 0x35, 0x83, 0xea, 0x96, 0x5c, 0x3c, 0x23, 0x4a,
-	0x5d, 0x46, 0x59, 0x88, 0x92, 0x23, 0x50, 0xdd, 0xc2, 0x1f, 0xce, 0x52, 0x6d, 0xe5, 0x8c, 0x4c,
-	0x39, 0x12, 0x8b, 0x6c, 0x21, 0xdb, 0x8e, 0xa1, 0xea, 0x51, 0x96, 0xf7, 0xd4, 0x08, 0x47, 0x56,
-	0xe2, 0x4e, 0xd4, 0x9f, 0x3b, 0x32, 0x35, 0x54, 0x13, 0x03, 0x5b, 0xf5, 0x92, 0x4d, 0xfc, 0x06,
-	0xc4, 0x80, 0xc6, 0xd3, 0x0a, 0xf8, 0x2e, 0x54, 0x89, 0xc0, 0x0e, 0x99, 0xd0, 0xad, 0x2f, 0xa1,
-	0x9a, 0x0e, 0x0f, 0xde, 0x84, 0xbc, 0x1f, 0x10, 0x2f, 0xe0, 0x59, 0x98, 0x57, 0x45, 0x03, 0x23,
-	0xc8, 0x52, 0xdb, 0xe0, 0xbb, 0x5c, 0x5e, 0x65, 0x7f, 0xf1, 0x4f, 0x66, 0x03, 0xce, 0xf2, 0x01,
-	0xbf, 0xbd, 0x38, 0xa3, 0x29, 0xcb, 0xf3, 0xe3, 0xde, 0xfa, 0x00, 0x56, 0x53, 0x03, 0x38, 0x6f,
-	0xd7, 0xb5, 0x5f, 0xc2, 0xcb, 0x4b, 0x4d, 0xe3, 0x4f, 0x61, 0x73, 0x6a, 0x9b, 0x76, 0x40, 0x3d,
-	0xd7, 0xa3, 0x2c, 0x63, 0x45, 0x57, 0xf2, 0x7f, 0x56, 0xce, 0xc8, 0xb9, 0xe3, 0x24, 0x5b, 0x58,
-	0x51, 0x37, 0xa6, 0x8b, 0xe0, 0xf5, 0x52, 0xf1, 0xbf, 0x2b, 0xe8, 0xd9, 0xb3, 0x67, 0xcf, 0x32,
-	0xb5, 0xdf, 0x15, 0x60, 0x73, 0xd9, 0x9a, 0x59, 0xba, 0x7c, 0x2f, 0x40, 0xc1, 0x9e, 0x4e, 0x4e,
-	0xa8, 0xc7, 0x83, 0x94, 0x57, 0xc3, 0x16, 0x6e, 0x40, 0xde, 0x22, 0x27, 0xd4, 0x92, 0x73, 0xdb,
-	0xd2, 0x4e, 0xf5, 0xe6, 0x3b, 0xe7, 0x5a, 0x95, 0xf5, 0x43, 0xa6, 0xa2, 0x0a, 0x4d, 0xfc, 0x11,
-	0xe4, 0xc2, 0x2d, 0x9a, 0x59, 0xb8, 0x7e, 0x3e, 0x0b, 0x6c, 0x2d, 0xa9, 0x5c, 0x0f, 0xbf, 0x02,
-	0x25, 0xf6, 0x2b, 0x72, 0xa3, 0xc0, 0x7d, 0x2e, 0x32, 0x80, 0xe5, 0x05, 0xde, 0x82, 0x22, 0x5f,
-	0x26, 0x06, 0x8d, 0x8e, 0xb6, 0xb8, 0xcd, 0x12, 0xcb, 0xa0, 0x43, 0x32, 0xb5, 0x02, 0xed, 0x31,
-	0xb1, 0xa6, 0x94, 0x27, 0x7c, 0x49, 0xad, 0x84, 0xe0, 0xcf, 0x19, 0x86, 0x2f, 0x43, 0x59, 0xac,
-	0x2a, 0xd3, 0x36, 0xe8, 0x53, 0xbe, 0x7b, 0xe6, 0x55, 0xb1, 0xd0, 0xda, 0x0c, 0x61, 0xdd, 0x3f,
-	0xf4, 0x1d, 0x3b, 0x4a, 0x4d, 0xde, 0x05, 0x03, 0x78, 0xf7, 0x1f, 0xcc, 0x6f, 0xdc, 0xaf, 0x2d,
-	0x1f, 0xde, 0x7c, 0x4e, 0xd5, 0xfe, 0x92, 0x81, 0x1c, 0xdf, 0x2f, 0xd6, 0xa0, 0x3c, 0xf8, 0xac,
-	0xa7, 0x68, 0xad, 0xee, 0xf1, 0xfe, 0xa1, 0x82, 0x24, 0x5c, 0x05, 0xe0, 0xc0, 0xfd, 0xc3, 0x6e,
-	0x63, 0x80, 0x32, 0x71, 0xbb, 0xdd, 0x19, 0xdc, 0xb9, 0x85, 0xb2, 0xb1, 0xc2, 0xb1, 0x00, 0x72,
-	0x49, 0xc2, 0xfb, 0x37, 0x51, 0x1e, 0x23, 0xa8, 0x08, 0x03, 0xed, 0x4f, 0x95, 0xd6, 0x9d, 0x5b,
-	0xa8, 0x90, 0x46, 0xde, 0xbf, 0x89, 0x56, 0xf0, 0x2a, 0x94, 0x38, 0xb2, 0xdf, 0xed, 0x1e, 0xa2,
-	0x62, 0x6c, 0xb3, 0x3f, 0x50, 0xdb, 0x9d, 0x03, 0x54, 0x8a, 0x6d, 0x1e, 0xa8, 0xdd, 0xe3, 0x1e,
-	0x82, 0xd8, 0xc2, 0x91, 0xd2, 0xef, 0x37, 0x0e, 0x14, 0x54, 0x8e, 0x19, 0xfb, 0x9f, 0x0d, 0x94,
-	0x3e, 0xaa, 0xa4, 0xdc, 0x7a, 0xff, 0x26, 0x5a, 0x8d, 0xbb, 0x50, 0x3a, 0xc7, 0x47, 0xa8, 0x8a,
-	0xd7, 0x61, 0x55, 0x74, 0x11, 0x39, 0xb1, 0x36, 0x07, 0xdd, 0xb9, 0x85, 0xd0, 0xcc, 0x11, 0x61,
-	0x65, 0x3d, 0x05, 0xdc, 0xb9, 0x85, 0x70, 0xad, 0x09, 0x79, 0x9e, 0x5d, 0x18, 0x43, 0xf5, 0xb0,
-	0xb1, 0xaf, 0x1c, 0x6a, 0xdd, 0xde, 0xa0, 0xdd, 0xed, 0x34, 0x0e, 0x91, 0x34, 0xc3, 0x54, 0xe5,
-	0x67, 0xc7, 0x6d, 0x55, 0x69, 0xa1, 0x4c, 0x12, 0xeb, 0x29, 0x8d, 0x81, 0xd2, 0x42, 0xd9, 0x9a,
-	0x0e, 0x9b, 0xcb, 0xf6, 0xc9, 0xa5, 0x2b, 0x23, 0x31, 0xc5, 0x99, 0x33, 0xa6, 0x98, 0xdb, 0x5a,
-	0x98, 0xe2, 0x7f, 0x65, 0x60, 0x63, 0xc9, 0x59, 0xb1, 0xb4, 0x93, 0x1f, 0x43, 0x5e, 0xa4, 0xa8,
-	0x38, 0x3d, 0xaf, 0x2d, 0x3d, 0x74, 0x78, 0xc2, 0x2e, 0x9c, 0xa0, 0x5c, 0x2f, 0x59, 0x41, 0x64,
-	0xcf, 0xa8, 0x20, 0x98, 0x89, 0x85, 0x3d, 0xfd, 0x17, 0x0b, 0x7b, 0xba, 0x38, 0xf6, 0xee, 0x9c,
-	0xe7, 0xd8, 0xe3, 0xd8, 0xb7, 0xdb, 0xdb, 0xf3, 0x4b, 0xf6, 0xf6, 0x7b, 0xb0, 0xbe, 0x60, 0xe8,
-	0xdc, 0x7b, 0xec, 0xaf, 0x24, 0x90, 0xcf, 0x0a, 0xce, 0x73, 0x76, 0xba, 0x4c, 0x6a, 0xa7, 0xbb,
-	0x37, 0x1f, 0xc1, 0x2b, 0x67, 0x4f, 0xc2, 0xc2, 0x5c, 0x7f, 0x25, 0xc1, 0x85, 0xe5, 0x95, 0xe2,
-	0x52, 0x1f, 0x3e, 0x82, 0xc2, 0x84, 0x06, 0x63, 0x27, 0xaa, 0x96, 0xde, 0x5e, 0x72, 0x06, 0x33,
-	0xf1, 0xfc, 0x64, 0x87, 0x5a, 0xc9, 0x43, 0x3c, 0x7b, 0x56, 0xb9, 0x27, 0xbc, 0x59, 0xf0, 0xf4,
-	0xd7, 0x19, 0x78, 0x79, 0xa9, 0xf1, 0xa5, 0x8e, 0xbe, 0x06, 0x60, 0xda, 0xee, 0x34, 0x10, 0x15,
-	0x91, 0xd8, 0x60, 0x4b, 0x1c, 0xe1, 0x9b, 0x17, 0xdb, 0x3c, 0xa7, 0x41, 0x2c, 0xcf, 0x72, 0x39,
-	0x08, 0x88, 0x13, 0xee, 0xce, 0x1c, 0xcd, 0x71, 0x47, 0x5f, 0x3f, 0x63, 0xa4, 0x0b, 0x89, 0xf9,
-	0x1e, 0x20, 0xdd, 0x32, 0xa9, 0x1d, 0x68, 0x7e, 0xe0, 0x51, 0x32, 0x31, 0xed, 0x11, 0x3f, 0x41,
-	0x8a, 0x7b, 0xf9, 0x21, 0xb1, 0x7c, 0xaa, 0xae, 0x09, 0x71, 0x3f, 0x92, 0x32, 0x0d, 0x9e, 0x40,
-	0x5e, 0x42, 0xa3, 0x90, 0xd2, 0x10, 0xe2, 0x58, 0xa3, 0xf6, 0xdb, 0x12, 0x94, 0x13, 0x75, 0x35,
-	0xbe, 0x02, 0x95, 0x87, 0xe4, 0x31, 0xd1, 0xa2, 0xbb, 0x92, 0x88, 0x44, 0x99, 0x61, 0xbd, 0xf0,
-	0xbe, 0xf4, 0x1e, 0x6c, 0x72, 0x8a, 0x33, 0x0d, 0xa8, 0xa7, 0xe9, 0x16, 0xf1, 0x7d, 0x1e, 0xb4,
-	0x22, 0xa7, 0x62, 0x26, 0xeb, 0x32, 0x51, 0x33, 0x92, 0xe0, 0xdb, 0xb0, 0xc1, 0x35, 0x26, 0x53,
-	0x2b, 0x30, 0x5d, 0x8b, 0x6a, 0xec, 0xf6, 0xe6, 0xf3, 0x93, 0x24, 0xf6, 0x6c, 0x9d, 0x31, 0x8e,
-	0x42, 0x02, 0xf3, 0xc8, 0xc7, 0x2d, 0x78, 0x8d, 0xab, 0x8d, 0xa8, 0x4d, 0x3d, 0x12, 0x50, 0x8d,
-	0x7e, 0x31, 0x25, 0x96, 0xaf, 0x11, 0xdb, 0xd0, 0xc6, 0xc4, 0x1f, 0xcb, 0x9b, 0xcc, 0xc0, 0x7e,
-	0x46, 0x96, 0xd4, 0x4b, 0x8c, 0x78, 0x10, 0xf2, 0x14, 0x4e, 0x6b, 0xd8, 0xc6, 0xc7, 0xc4, 0x1f,
-	0xe3, 0x3d, 0xb8, 0xc0, 0xad, 0xf8, 0x81, 0x67, 0xda, 0x23, 0x4d, 0x1f, 0x53, 0xfd, 0x91, 0x36,
-	0x0d, 0x86, 0x77, 0xe5, 0x57, 0x92, 0xfd, 0x73, 0x0f, 0xfb, 0x9c, 0xd3, 0x64, 0x94, 0xe3, 0x60,
-	0x78, 0x17, 0xf7, 0xa1, 0xc2, 0x26, 0x63, 0x62, 0x7e, 0x49, 0xb5, 0xa1, 0xe3, 0xf1, 0xa3, 0xb1,
-	0xba, 0x64, 0x6b, 0x4a, 0x44, 0xb0, 0xde, 0x0d, 0x15, 0x8e, 0x1c, 0x83, 0xee, 0xe5, 0xfb, 0x3d,
-	0x45, 0x69, 0xa9, 0xe5, 0xc8, 0xca, 0x7d, 0xc7, 0x63, 0x09, 0x35, 0x72, 0xe2, 0x00, 0x97, 0x45,
-	0x42, 0x8d, 0x9c, 0x28, 0xbc, 0xb7, 0x61, 0x43, 0xd7, 0xc5, 0x98, 0x4d, 0x5d, 0x0b, 0xef, 0x58,
-	0xbe, 0x8c, 0x52, 0xc1, 0xd2, 0xf5, 0x03, 0x41, 0x08, 0x73, 0xdc, 0xc7, 0x1f, 0xc2, 0xcb, 0xb3,
-	0x60, 0x25, 0x15, 0xd7, 0x17, 0x46, 0x39, 0xaf, 0x7a, 0x1b, 0x36, 0xdc, 0xd3, 0x45, 0x45, 0x9c,
-	0xea, 0xd1, 0x3d, 0x9d, 0x57, 0xfb, 0x00, 0x36, 0xdd, 0xb1, 0xbb, 0xa8, 0x77, 0x3d, 0xa9, 0x87,
-	0xdd, 0xb1, 0x3b, 0xaf, 0xf8, 0x16, 0xbf, 0x70, 0x7b, 0x54, 0x27, 0x01, 0x35, 0xe4, 0x8b, 0x49,
-	0x7a, 0x42, 0x80, 0x77, 0x01, 0xe9, 0xba, 0x46, 0x6d, 0x72, 0x62, 0x51, 0x8d, 0x78, 0xd4, 0x26,
-	0xbe, 0x7c, 0x39, 0x49, 0xae, 0xea, 0xba, 0xc2, 0xa5, 0x0d, 0x2e, 0xc4, 0xd7, 0x61, 0xdd, 0x39,
-	0x79, 0xa8, 0x8b, 0x94, 0xd4, 0x5c, 0x8f, 0x0e, 0xcd, 0xa7, 0xf2, 0x9b, 0x3c, 0xbe, 0x6b, 0x4c,
-	0xc0, 0x13, 0xb2, 0xc7, 0x61, 0x7c, 0x0d, 0x90, 0xee, 0x8f, 0x89, 0xe7, 0xf2, 0x3d, 0xd9, 0x77,
-	0x89, 0x4e, 0xe5, 0xb7, 0x04, 0x55, 0xe0, 0x9d, 0x08, 0x66, 0x4b, 0xc2, 0x7f, 0x62, 0x0e, 0x83,
-	0xc8, 0xe2, 0x55, 0xb1, 0x24, 0x38, 0x16, 0x5a, 0xdb, 0x01, 0xc4, 0x42, 0x91, 0xea, 0x78, 0x87,
-	0xd3, 0xaa, 0xee, 0xd8, 0x4d, 0xf6, 0xfb, 0x06, 0xac, 0x32, 0xe6, 0xac, 0xd3, 0x6b, 0xa2, 0x20,
-	0x73, 0xc7, 0x89, 0x1e, 0x6f, 0xc1, 0x05, 0x46, 0x9a, 0xd0, 0x80, 0x18, 0x24, 0x20, 0x09, 0xf6,
-	0xbb, 0x9c, 0xcd, 0xe2, 0x7e, 0x14, 0x0a, 0x53, 0x7e, 0x7a, 0xd3, 0x93, 0xd3, 0x38, 0xb3, 0x6e,
-	0x08, 0x3f, 0x19, 0x16, 0xe5, 0xd6, 0xf7, 0x56, 0x74, 0xd7, 0xf6, 0xa0, 0x92, 0x4c, 0x7c, 0x5c,
-	0x02, 0x91, 0xfa, 0x48, 0x62, 0x55, 0x50, 0xb3, 0xdb, 0x62, 0xf5, 0xcb, 0xe7, 0x0a, 0xca, 0xb0,
-	0x3a, 0xea, 0xb0, 0x3d, 0x50, 0x34, 0xf5, 0xb8, 0x33, 0x68, 0x1f, 0x29, 0x28, 0x9b, 0x28, 0xd8,
-	0x1f, 0xe4, 0x8a, 0x6f, 0xa3, 0xab, 0xb5, 0xaf, 0x33, 0x50, 0x4d, 0xdf, 0xc0, 0xf0, 0x8f, 0xe0,
-	0x62, 0xf4, 0x5c, 0xe2, 0xd3, 0x40, 0x7b, 0x62, 0x7a, 0x7c, 0x45, 0x4e, 0x88, 0x38, 0x1d, 0xe3,
-	0x9c, 0xd8, 0x0c, 0x59, 0x7d, 0x1a, 0x7c, 0x62, 0x7a, 0x6c, 0xbd, 0x4d, 0x48, 0x80, 0x0f, 0xe1,
-	0xb2, 0xed, 0x68, 0x7e, 0x40, 0x6c, 0x83, 0x78, 0x86, 0x36, 0x7b, 0xa8, 0xd2, 0x88, 0xae, 0x53,
-	0xdf, 0x77, 0xc4, 0x49, 0x18, 0x5b, 0x79, 0xd5, 0x76, 0xfa, 0x21, 0x79, 0x76, 0x44, 0x34, 0x42,
-	0xea, 0x5c, 0xfe, 0x66, 0xcf, 0xca, 0xdf, 0x57, 0xa0, 0x34, 0x21, 0xae, 0x46, 0xed, 0xc0, 0x3b,
-	0xe5, 0x75, 0x77, 0x51, 0x2d, 0x4e, 0x88, 0xab, 0xb0, 0xf6, 0x0b, 0xb9, 0xfe, 0x3c, 0xc8, 0x15,
-	0x8b, 0xa8, 0xf4, 0x20, 0x57, 0x2c, 0x21, 0xa8, 0xfd, 0x33, 0x0b, 0x95, 0x64, 0x1d, 0xce, 0xae,
-	0x35, 0x3a, 0x3f, 0xb2, 0x24, 0xbe, 0xa9, 0xbd, 0xf1, 0x8d, 0x55, 0x7b, 0xbd, 0xc9, 0xce, 0xb2,
-	0xbd, 0x82, 0xa8, 0x8e, 0x55, 0xa1, 0xc9, 0xea, 0x08, 0x96, 0x6c, 0x54, 0x54, 0x23, 0x45, 0x35,
-	0x6c, 0xe1, 0x03, 0x28, 0x3c, 0xf4, 0xb9, 0xed, 0x02, 0xb7, 0xfd, 0xe6, 0x37, 0xdb, 0x7e, 0xd0,
-	0xe7, 0xc6, 0x4b, 0x0f, 0xfa, 0x5a, 0xa7, 0xab, 0x1e, 0x35, 0x0e, 0xd5, 0x50, 0x1d, 0x5f, 0x82,
-	0x9c, 0x45, 0xbe, 0x3c, 0x4d, 0x9f, 0x7a, 0x1c, 0x3a, 0xef, 0x24, 0x5c, 0x82, 0xdc, 0x13, 0x4a,
-	0x1e, 0xa5, 0xcf, 0x1a, 0x0e, 0x7d, 0x8f, 0x8b, 0x61, 0x17, 0xf2, 0x3c, 0x5e, 0x18, 0x20, 0x8c,
-	0x18, 0x7a, 0x09, 0x17, 0x21, 0xd7, 0xec, 0xaa, 0x6c, 0x41, 0x20, 0xa8, 0x08, 0x54, 0xeb, 0xb5,
-	0x95, 0xa6, 0x82, 0x32, 0xb5, 0xdb, 0x50, 0x10, 0x41, 0x60, 0x8b, 0x25, 0x0e, 0x03, 0x7a, 0x29,
-	0x6c, 0x86, 0x36, 0xa4, 0x48, 0x7a, 0x7c, 0xb4, 0xaf, 0xa8, 0x28, 0x93, 0x9e, 0xea, 0x1c, 0xca,
-	0xd7, 0x7c, 0xa8, 0x24, 0x0b, 0xf1, 0x17, 0x73, 0xc9, 0xfe, 0x9b, 0x04, 0xe5, 0x44, 0x61, 0xcd,
-	0x2a, 0x22, 0x62, 0x59, 0xce, 0x13, 0x8d, 0x58, 0x26, 0xf1, 0xc3, 0xd4, 0x00, 0x0e, 0x35, 0x18,
-	0x72, 0xde, 0xa9, 0x7b, 0x41, 0x4b, 0x24, 0x8f, 0x0a, 0xb5, 0x3f, 0x4a, 0x80, 0xe6, 0x2b, 0xdb,
-	0x39, 0x37, 0xa5, 0x1f, 0xd2, 0xcd, 0xda, 0x1f, 0x24, 0xa8, 0xa6, 0xcb, 0xd9, 0x39, 0xf7, 0xae,
-	0xfc, 0xa0, 0xee, 0xfd, 0x23, 0x03, 0xab, 0xa9, 0x22, 0xf6, 0xbc, 0xde, 0x7d, 0x01, 0xeb, 0xa6,
-	0x41, 0x27, 0xae, 0x13, 0x50, 0x5b, 0x3f, 0xd5, 0x2c, 0xfa, 0x98, 0x5a, 0x72, 0x8d, 0x6f, 0x1a,
-	0xbb, 0xdf, 0x5c, 0x26, 0xd7, 0xdb, 0x33, 0xbd, 0x43, 0xa6, 0xb6, 0xb7, 0xd1, 0x6e, 0x29, 0x47,
-	0xbd, 0xee, 0x40, 0xe9, 0x34, 0x3f, 0xd3, 0x8e, 0x3b, 0x3f, 0xed, 0x74, 0x3f, 0xe9, 0xa8, 0xc8,
-	0x9c, 0xa3, 0x7d, 0x8f, 0xcb, 0xbe, 0x07, 0x68, 0xde, 0x29, 0x7c, 0x11, 0x96, 0xb9, 0x85, 0x5e,
-	0xc2, 0x1b, 0xb0, 0xd6, 0xe9, 0x6a, 0xfd, 0x76, 0x4b, 0xd1, 0x94, 0xfb, 0xf7, 0x95, 0xe6, 0xa0,
-	0x2f, 0x1e, 0x3e, 0x62, 0xf6, 0x20, 0xb5, 0xc0, 0x6b, 0xbf, 0xcf, 0xc2, 0xc6, 0x12, 0x4f, 0x70,
-	0x23, 0xbc, 0xb2, 0x88, 0x5b, 0xd4, 0x8d, 0xf3, 0x78, 0x5f, 0x67, 0x35, 0x43, 0x8f, 0x78, 0x41,
-	0x78, 0xc3, 0xb9, 0x06, 0x2c, 0x4a, 0x76, 0x60, 0x0e, 0x4d, 0xea, 0x85, 0xef, 0x44, 0xe2, 0x1e,
-	0xb3, 0x36, 0xc3, 0xc5, 0x53, 0xd1, 0xbb, 0x80, 0x5d, 0xc7, 0x37, 0x03, 0xf3, 0x31, 0xd5, 0x4c,
-	0x3b, 0x7a, 0x54, 0x62, 0xf7, 0x9a, 0x9c, 0x8a, 0x22, 0x49, 0xdb, 0x0e, 0x62, 0xb6, 0x4d, 0x47,
-	0x64, 0x8e, 0xcd, 0x36, 0xf3, 0xac, 0x8a, 0x22, 0x49, 0xcc, 0xbe, 0x02, 0x15, 0xc3, 0x99, 0xb2,
-	0x62, 0x4f, 0xf0, 0xd8, 0xd9, 0x21, 0xa9, 0x65, 0x81, 0xc5, 0x94, 0xb0, 0x8c, 0x9f, 0xbd, 0x66,
-	0x55, 0xd4, 0xb2, 0xc0, 0x04, 0xe5, 0x2a, 0xac, 0x91, 0xd1, 0xc8, 0x63, 0xc6, 0x23, 0x43, 0xe2,
-	0x62, 0x52, 0x8d, 0x61, 0x4e, 0xdc, 0x7a, 0x00, 0xc5, 0x28, 0x0e, 0xec, 0xa8, 0x66, 0x91, 0xd0,
-	0x5c, 0x71, 0xdb, 0xce, 0xec, 0x94, 0xd4, 0xa2, 0x1d, 0x09, 0xaf, 0x40, 0xc5, 0xf4, 0xb5, 0xd9,
-	0xe3, 0x7c, 0x66, 0x3b, 0xb3, 0x53, 0x54, 0xcb, 0xa6, 0x1f, 0x3f, 0x6c, 0xd6, 0xbe, 0xca, 0x40,
-	0x35, 0xfd, 0x71, 0x01, 0xb7, 0xa0, 0x68, 0x39, 0x3a, 0xe1, 0xa9, 0x25, 0xbe, 0x6c, 0xed, 0x3c,
-	0xe7, 0x7b, 0x44, 0xfd, 0x30, 0xe4, 0xab, 0xb1, 0xe6, 0xd6, 0xdf, 0x25, 0x28, 0x46, 0x30, 0xbe,
-	0x00, 0x39, 0x97, 0x04, 0x63, 0x6e, 0x2e, 0xbf, 0x9f, 0x41, 0x92, 0xca, 0xdb, 0x0c, 0xf7, 0x5d,
-	0x62, 0xf3, 0x14, 0x08, 0x71, 0xd6, 0x66, 0xf3, 0x6a, 0x51, 0x62, 0xf0, 0x5b, 0x8f, 0x33, 0x99,
-	0x50, 0x3b, 0xf0, 0xa3, 0x79, 0x0d, 0xf1, 0x66, 0x08, 0xe3, 0x77, 0x60, 0x3d, 0xf0, 0x88, 0x69,
-	0xa5, 0xb8, 0x39, 0xce, 0x45, 0x91, 0x20, 0x26, 0xef, 0xc1, 0xa5, 0xc8, 0xae, 0x41, 0x03, 0xa2,
-	0x8f, 0xa9, 0x31, 0x53, 0x2a, 0xf0, 0xd7, 0x8d, 0x8b, 0x21, 0xa1, 0x15, 0xca, 0x23, 0xdd, 0xda,
-	0xd7, 0x12, 0xac, 0x47, 0xf7, 0x34, 0x23, 0x0e, 0xd6, 0x11, 0x00, 0xb1, 0x6d, 0x27, 0x48, 0x86,
-	0x6b, 0x31, 0x95, 0x17, 0xf4, 0xea, 0x8d, 0x58, 0x49, 0x4d, 0x18, 0xd8, 0x9a, 0x00, 0xcc, 0x24,
-	0x67, 0x86, 0xed, 0x32, 0x94, 0xc3, 0x2f, 0x47, 0xfc, 0xf3, 0xa3, 0xb8, 0xd9, 0x83, 0x80, 0xd8,
-	0x85, 0x0e, 0x6f, 0x42, 0xfe, 0x84, 0x8e, 0x4c, 0x3b, 0x7c, 0x0f, 0x16, 0x8d, 0xe8, 0xfd, 0x25,
-	0x17, 0xbf, 0xbf, 0xec, 0xff, 0x46, 0x82, 0x0d, 0xdd, 0x99, 0xcc, 0xfb, 0xbb, 0x8f, 0xe6, 0x9e,
-	0x17, 0xfc, 0x8f, 0xa5, 0xcf, 0x3f, 0x1a, 0x99, 0xc1, 0x78, 0x7a, 0x52, 0xd7, 0x9d, 0xc9, 0xee,
-	0xc8, 0xb1, 0x88, 0x3d, 0x9a, 0x7d, 0x3f, 0xe5, 0x7f, 0xf4, 0x1b, 0x23, 0x6a, 0xdf, 0x18, 0x39,
-	0x89, 0xaf, 0xa9, 0xf7, 0x66, 0x7f, 0xff, 0x27, 0x49, 0x7f, 0xca, 0x64, 0x0f, 0x7a, 0xfb, 0x7f,
-	0xce, 0x6c, 0x1d, 0x88, 0xee, 0x7a, 0x51, 0x78, 0x54, 0x3a, 0xb4, 0xa8, 0xce, 0x86, 0xfc, 0xff,
-	0x00, 0x00, 0x00, 0xff, 0xff, 0x3e, 0xe8, 0xef, 0xc4, 0x9b, 0x1d, 0x00, 0x00,
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   0,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes,
+		DependencyIndexes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs,
+	}.Build()
+	File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto = out.File
+	file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = nil
+	file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = nil
+	file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = nil
 }
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
deleted file mode 100644
index ed08fcb..0000000
--- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
+++ /dev/null
@@ -1,883 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc.  All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Author: kenton@google.com (Kenton Varda)
-//  Based on original Protocol Buffers design by
-//  Sanjay Ghemawat, Jeff Dean, and others.
-//
-// The messages in this file describe the definitions found in .proto files.
-// A valid .proto file can be translated directly to a FileDescriptorProto
-// without any other information (e.g. without reading its imports).
-
-
-syntax = "proto2";
-
-package google.protobuf;
-option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "DescriptorProtos";
-option csharp_namespace = "Google.Protobuf.Reflection";
-option objc_class_prefix = "GPB";
-option cc_enable_arenas = true;
-
-// descriptor.proto must be optimized for speed because reflection-based
-// algorithms don't work during bootstrapping.
-option optimize_for = SPEED;
-
-// The protocol compiler can output a FileDescriptorSet containing the .proto
-// files it parses.
-message FileDescriptorSet {
-  repeated FileDescriptorProto file = 1;
-}
-
-// Describes a complete .proto file.
-message FileDescriptorProto {
-  optional string name = 1;       // file name, relative to root of source tree
-  optional string package = 2;    // e.g. "foo", "foo.bar", etc.
-
-  // Names of files imported by this file.
-  repeated string dependency = 3;
-  // Indexes of the public imported files in the dependency list above.
-  repeated int32 public_dependency = 10;
-  // Indexes of the weak imported files in the dependency list.
-  // For Google-internal migration only. Do not use.
-  repeated int32 weak_dependency = 11;
-
-  // All top-level definitions in this file.
-  repeated DescriptorProto message_type = 4;
-  repeated EnumDescriptorProto enum_type = 5;
-  repeated ServiceDescriptorProto service = 6;
-  repeated FieldDescriptorProto extension = 7;
-
-  optional FileOptions options = 8;
-
-  // This field contains optional information about the original source code.
-  // You may safely remove this entire field without harming runtime
-  // functionality of the descriptors -- the information is needed only by
-  // development tools.
-  optional SourceCodeInfo source_code_info = 9;
-
-  // The syntax of the proto file.
-  // The supported values are "proto2" and "proto3".
-  optional string syntax = 12;
-}
-
-// Describes a message type.
-message DescriptorProto {
-  optional string name = 1;
-
-  repeated FieldDescriptorProto field = 2;
-  repeated FieldDescriptorProto extension = 6;
-
-  repeated DescriptorProto nested_type = 3;
-  repeated EnumDescriptorProto enum_type = 4;
-
-  message ExtensionRange {
-    optional int32 start = 1;
-    optional int32 end = 2;
-
-    optional ExtensionRangeOptions options = 3;
-  }
-  repeated ExtensionRange extension_range = 5;
-
-  repeated OneofDescriptorProto oneof_decl = 8;
-
-  optional MessageOptions options = 7;
-
-  // Range of reserved tag numbers. Reserved tag numbers may not be used by
-  // fields or extension ranges in the same message. Reserved ranges may
-  // not overlap.
-  message ReservedRange {
-    optional int32 start = 1; // Inclusive.
-    optional int32 end = 2;   // Exclusive.
-  }
-  repeated ReservedRange reserved_range = 9;
-  // Reserved field names, which may not be used by fields in the same message.
-  // A given name may only be reserved once.
-  repeated string reserved_name = 10;
-}
-
-message ExtensionRangeOptions {
-  // The parser stores options it doesn't recognize here. See above.
-  repeated UninterpretedOption uninterpreted_option = 999;
-
-  // Clients can define custom options in extensions of this message. See above.
-  extensions 1000 to max;
-}
-
-// Describes a field within a message.
-message FieldDescriptorProto {
-  enum Type {
-    // 0 is reserved for errors.
-    // Order is weird for historical reasons.
-    TYPE_DOUBLE         = 1;
-    TYPE_FLOAT          = 2;
-    // Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT64 if
-    // negative values are likely.
-    TYPE_INT64          = 3;
-    TYPE_UINT64         = 4;
-    // Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT32 if
-    // negative values are likely.
-    TYPE_INT32          = 5;
-    TYPE_FIXED64        = 6;
-    TYPE_FIXED32        = 7;
-    TYPE_BOOL           = 8;
-    TYPE_STRING         = 9;
-    // Tag-delimited aggregate.
-    // Group type is deprecated and not supported in proto3. However, Proto3
-    // implementations should still be able to parse the group wire format and
-    // treat group fields as unknown fields.
-    TYPE_GROUP          = 10;
-    TYPE_MESSAGE        = 11;  // Length-delimited aggregate.
-
-    // New in version 2.
-    TYPE_BYTES          = 12;
-    TYPE_UINT32         = 13;
-    TYPE_ENUM           = 14;
-    TYPE_SFIXED32       = 15;
-    TYPE_SFIXED64       = 16;
-    TYPE_SINT32         = 17;  // Uses ZigZag encoding.
-    TYPE_SINT64         = 18;  // Uses ZigZag encoding.
-  };
-
-  enum Label {
-    // 0 is reserved for errors
-    LABEL_OPTIONAL      = 1;
-    LABEL_REQUIRED      = 2;
-    LABEL_REPEATED      = 3;
-  };
-
-  optional string name = 1;
-  optional int32 number = 3;
-  optional Label label = 4;
-
-  // If type_name is set, this need not be set.  If both this and type_name
-  // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
-  optional Type type = 5;
-
-  // For message and enum types, this is the name of the type.  If the name
-  // starts with a '.', it is fully-qualified.  Otherwise, C++-like scoping
-  // rules are used to find the type (i.e. first the nested types within this
-  // message are searched, then within the parent, on up to the root
-  // namespace).
-  optional string type_name = 6;
-
-  // For extensions, this is the name of the type being extended.  It is
-  // resolved in the same manner as type_name.
-  optional string extendee = 2;
-
-  // For numeric types, contains the original text representation of the value.
-  // For booleans, "true" or "false".
-  // For strings, contains the default text contents (not escaped in any way).
-  // For bytes, contains the C escaped value.  All bytes >= 128 are escaped.
-  // TODO(kenton):  Base-64 encode?
-  optional string default_value = 7;
-
-  // If set, gives the index of a oneof in the containing type's oneof_decl
-  // list.  This field is a member of that oneof.
-  optional int32 oneof_index = 9;
-
-  // JSON name of this field. The value is set by protocol compiler. If the
-  // user has set a "json_name" option on this field, that option's value
-  // will be used. Otherwise, it's deduced from the field's name by converting
-  // it to camelCase.
-  optional string json_name = 10;
-
-  optional FieldOptions options = 8;
-}
-
-// Describes a oneof.
-message OneofDescriptorProto {
-  optional string name = 1;
-  optional OneofOptions options = 2;
-}
-
-// Describes an enum type.
-message EnumDescriptorProto {
-  optional string name = 1;
-
-  repeated EnumValueDescriptorProto value = 2;
-
-  optional EnumOptions options = 3;
-
-  // Range of reserved numeric values. Reserved values may not be used by
-  // entries in the same enum. Reserved ranges may not overlap.
-  //
-  // Note that this is distinct from DescriptorProto.ReservedRange in that it
-  // is inclusive such that it can appropriately represent the entire int32
-  // domain.
-  message EnumReservedRange {
-    optional int32 start = 1; // Inclusive.
-    optional int32 end = 2;   // Inclusive.
-  }
-
-  // Range of reserved numeric values. Reserved numeric values may not be used
-  // by enum values in the same enum declaration. Reserved ranges may not
-  // overlap.
-  repeated EnumReservedRange reserved_range = 4;
-
-  // Reserved enum value names, which may not be reused. A given name may only
-  // be reserved once.
-  repeated string reserved_name = 5;
-}
-
-// Describes a value within an enum.
-message EnumValueDescriptorProto {
-  optional string name = 1;
-  optional int32 number = 2;
-
-  optional EnumValueOptions options = 3;
-}
-
-// Describes a service.
-message ServiceDescriptorProto {
-  optional string name = 1;
-  repeated MethodDescriptorProto method = 2;
-
-  optional ServiceOptions options = 3;
-}
-
-// Describes a method of a service.
-message MethodDescriptorProto {
-  optional string name = 1;
-
-  // Input and output type names.  These are resolved in the same way as
-  // FieldDescriptorProto.type_name, but must refer to a message type.
-  optional string input_type = 2;
-  optional string output_type = 3;
-
-  optional MethodOptions options = 4;
-
-  // Identifies if client streams multiple client messages
-  optional bool client_streaming = 5 [default=false];
-  // Identifies if server streams multiple server messages
-  optional bool server_streaming = 6 [default=false];
-}
-
-
-// ===================================================================
-// Options
-
-// Each of the definitions above may have "options" attached.  These are
-// just annotations which may cause code to be generated slightly differently
-// or may contain hints for code that manipulates protocol messages.
-//
-// Clients may define custom options as extensions of the *Options messages.
-// These extensions may not yet be known at parsing time, so the parser cannot
-// store the values in them.  Instead it stores them in a field in the *Options
-// message called uninterpreted_option. This field must have the same name
-// across all *Options messages. We then use this field to populate the
-// extensions when we build a descriptor, at which point all protos have been
-// parsed and so all extensions are known.
-//
-// Extension numbers for custom options may be chosen as follows:
-// * For options which will only be used within a single application or
-//   organization, or for experimental options, use field numbers 50000
-//   through 99999.  It is up to you to ensure that you do not use the
-//   same number for multiple options.
-// * For options which will be published and used publicly by multiple
-//   independent entities, e-mail protobuf-global-extension-registry@google.com
-//   to reserve extension numbers. Simply provide your project name (e.g.
-//   Objective-C plugin) and your project website (if available) -- there's no
-//   need to explain how you intend to use them. Usually you only need one
-//   extension number. You can declare multiple options with only one extension
-//   number by putting them in a sub-message. See the Custom Options section of
-//   the docs for examples:
-//   https://developers.google.com/protocol-buffers/docs/proto#options
-//   If this turns out to be popular, a web service will be set up
-//   to automatically assign option numbers.
-
-
-message FileOptions {
-
-  // Sets the Java package where classes generated from this .proto will be
-  // placed.  By default, the proto package is used, but this is often
-  // inappropriate because proto packages do not normally start with backwards
-  // domain names.
-  optional string java_package = 1;
-
-
-  // If set, all the classes from the .proto file are wrapped in a single
-  // outer class with the given name.  This applies to both Proto1
-  // (equivalent to the old "--one_java_file" option) and Proto2 (where
-  // a .proto always translates to a single class, but you may want to
-  // explicitly choose the class name).
-  optional string java_outer_classname = 8;
-
-  // If set true, then the Java code generator will generate a separate .java
-  // file for each top-level message, enum, and service defined in the .proto
-  // file.  Thus, these types will *not* be nested inside the outer class
-  // named by java_outer_classname.  However, the outer class will still be
-  // generated to contain the file's getDescriptor() method as well as any
-  // top-level extensions defined in the file.
-  optional bool java_multiple_files = 10 [default=false];
-
-  // This option does nothing.
-  optional bool java_generate_equals_and_hash = 20 [deprecated=true];
-
-  // If set true, then the Java2 code generator will generate code that
-  // throws an exception whenever an attempt is made to assign a non-UTF-8
-  // byte sequence to a string field.
-  // Message reflection will do the same.
-  // However, an extension field still accepts non-UTF-8 byte sequences.
-  // This option has no effect on when used with the lite runtime.
-  optional bool java_string_check_utf8 = 27 [default=false];
-
-
-  // Generated classes can be optimized for speed or code size.
-  enum OptimizeMode {
-    SPEED = 1;        // Generate complete code for parsing, serialization,
-                      // etc.
-    CODE_SIZE = 2;    // Use ReflectionOps to implement these methods.
-    LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
-  }
-  optional OptimizeMode optimize_for = 9 [default=SPEED];
-
-  // Sets the Go package where structs generated from this .proto will be
-  // placed. If omitted, the Go package will be derived from the following:
-  //   - The basename of the package import path, if provided.
-  //   - Otherwise, the package statement in the .proto file, if present.
-  //   - Otherwise, the basename of the .proto file, without extension.
-  optional string go_package = 11;
-
-
-
-  // Should generic services be generated in each language?  "Generic" services
-  // are not specific to any particular RPC system.  They are generated by the
-  // main code generators in each language (without additional plugins).
-  // Generic services were the only kind of service generation supported by
-  // early versions of google.protobuf.
-  //
-  // Generic services are now considered deprecated in favor of using plugins
-  // that generate code specific to your particular RPC system.  Therefore,
-  // these default to false.  Old code which depends on generic services should
-  // explicitly set them to true.
-  optional bool cc_generic_services = 16 [default=false];
-  optional bool java_generic_services = 17 [default=false];
-  optional bool py_generic_services = 18 [default=false];
-  optional bool php_generic_services = 42 [default=false];
-
-  // Is this file deprecated?
-  // Depending on the target platform, this can emit Deprecated annotations
-  // for everything in the file, or it will be completely ignored; in the very
-  // least, this is a formalization for deprecating files.
-  optional bool deprecated = 23 [default=false];
-
-  // Enables the use of arenas for the proto messages in this file. This applies
-  // only to generated classes for C++.
-  optional bool cc_enable_arenas = 31 [default=false];
-
-
-  // Sets the objective c class prefix which is prepended to all objective c
-  // generated classes from this .proto. There is no default.
-  optional string objc_class_prefix = 36;
-
-  // Namespace for generated classes; defaults to the package.
-  optional string csharp_namespace = 37;
-
-  // By default Swift generators will take the proto package and CamelCase it
-  // replacing '.' with underscore and use that to prefix the types/symbols
-  // defined. When this options is provided, they will use this value instead
-  // to prefix the types/symbols defined.
-  optional string swift_prefix = 39;
-
-  // Sets the php class prefix which is prepended to all php generated classes
-  // from this .proto. Default is empty.
-  optional string php_class_prefix = 40;
-
-  // Use this option to change the namespace of php generated classes. Default
-  // is empty. When this option is empty, the package name will be used for
-  // determining the namespace.
-  optional string php_namespace = 41;
-
-
-  // Use this option to change the namespace of php generated metadata classes.
-  // Default is empty. When this option is empty, the proto file name will be used
-  // for determining the namespace.
-  optional string php_metadata_namespace = 44;
-
-  // Use this option to change the package of ruby generated classes. Default
-  // is empty. When this option is not set, the package name will be used for
-  // determining the ruby package.
-  optional string ruby_package = 45;
-
-  // The parser stores options it doesn't recognize here.
-  // See the documentation for the "Options" section above.
-  repeated UninterpretedOption uninterpreted_option = 999;
-
-  // Clients can define custom options in extensions of this message.
-  // See the documentation for the "Options" section above.
-  extensions 1000 to max;
-
-  reserved 38;
-}
-
-message MessageOptions {
-  // Set true to use the old proto1 MessageSet wire format for extensions.
-  // This is provided for backwards-compatibility with the MessageSet wire
-  // format.  You should not use this for any other reason:  It's less
-  // efficient, has fewer features, and is more complicated.
-  //
-  // The message must be defined exactly as follows:
-  //   message Foo {
-  //     option message_set_wire_format = true;
-  //     extensions 4 to max;
-  //   }
-  // Note that the message cannot have any defined fields; MessageSets only
-  // have extensions.
-  //
-  // All extensions of your type must be singular messages; e.g. they cannot
-  // be int32s, enums, or repeated messages.
-  //
-  // Because this is an option, the above two restrictions are not enforced by
-  // the protocol compiler.
-  optional bool message_set_wire_format = 1 [default=false];
-
-  // Disables the generation of the standard "descriptor()" accessor, which can
-  // conflict with a field of the same name.  This is meant to make migration
-  // from proto1 easier; new code should avoid fields named "descriptor".
-  optional bool no_standard_descriptor_accessor = 2 [default=false];
-
-  // Is this message deprecated?
-  // Depending on the target platform, this can emit Deprecated annotations
-  // for the message, or it will be completely ignored; in the very least,
-  // this is a formalization for deprecating messages.
-  optional bool deprecated = 3 [default=false];
-
-  // Whether the message is an automatically generated map entry type for the
-  // maps field.
-  //
-  // For maps fields:
-  //     map<KeyType, ValueType> map_field = 1;
-  // The parsed descriptor looks like:
-  //     message MapFieldEntry {
-  //         option map_entry = true;
-  //         optional KeyType key = 1;
-  //         optional ValueType value = 2;
-  //     }
-  //     repeated MapFieldEntry map_field = 1;
-  //
-  // Implementations may choose not to generate the map_entry=true message, but
-  // use a native map in the target language to hold the keys and values.
-  // The reflection APIs in such implementions still need to work as
-  // if the field is a repeated message field.
-  //
-  // NOTE: Do not set the option in .proto files. Always use the maps syntax
-  // instead. The option should only be implicitly set by the proto compiler
-  // parser.
-  optional bool map_entry = 7;
-
-  reserved 8;  // javalite_serializable
-  reserved 9;  // javanano_as_lite
-
-  // The parser stores options it doesn't recognize here. See above.
-  repeated UninterpretedOption uninterpreted_option = 999;
-
-  // Clients can define custom options in extensions of this message. See above.
-  extensions 1000 to max;
-}
-
-message FieldOptions {
-  // The ctype option instructs the C++ code generator to use a different
-  // representation of the field than it normally would.  See the specific
-  // options below.  This option is not yet implemented in the open source
-  // release -- sorry, we'll try to include it in a future version!
-  optional CType ctype = 1 [default = STRING];
-  enum CType {
-    // Default mode.
-    STRING = 0;
-
-    CORD = 1;
-
-    STRING_PIECE = 2;
-  }
-  // The packed option can be enabled for repeated primitive fields to enable
-  // a more efficient representation on the wire. Rather than repeatedly
-  // writing the tag and type for each element, the entire array is encoded as
-  // a single length-delimited blob. In proto3, only explicit setting it to
-  // false will avoid using packed encoding.
-  optional bool packed = 2;
-
-  // The jstype option determines the JavaScript type used for values of the
-  // field.  The option is permitted only for 64 bit integral and fixed types
-  // (int64, uint64, sint64, fixed64, sfixed64).  A field with jstype JS_STRING
-  // is represented as JavaScript string, which avoids loss of precision that
-  // can happen when a large value is converted to a floating point JavaScript.
-  // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
-  // use the JavaScript "number" type.  The behavior of the default option
-  // JS_NORMAL is implementation dependent.
-  //
-  // This option is an enum to permit additional types to be added, e.g.
-  // goog.math.Integer.
-  optional JSType jstype = 6 [default = JS_NORMAL];
-  enum JSType {
-    // Use the default type.
-    JS_NORMAL = 0;
-
-    // Use JavaScript strings.
-    JS_STRING = 1;
-
-    // Use JavaScript numbers.
-    JS_NUMBER = 2;
-  }
-
-  // Should this field be parsed lazily?  Lazy applies only to message-type
-  // fields.  It means that when the outer message is initially parsed, the
-  // inner message's contents will not be parsed but instead stored in encoded
-  // form.  The inner message will actually be parsed when it is first accessed.
-  //
-  // This is only a hint.  Implementations are free to choose whether to use
-  // eager or lazy parsing regardless of the value of this option.  However,
-  // setting this option true suggests that the protocol author believes that
-  // using lazy parsing on this field is worth the additional bookkeeping
-  // overhead typically needed to implement it.
-  //
-  // This option does not affect the public interface of any generated code;
-  // all method signatures remain the same.  Furthermore, thread-safety of the
-  // interface is not affected by this option; const methods remain safe to
-  // call from multiple threads concurrently, while non-const methods continue
-  // to require exclusive access.
-  //
-  //
-  // Note that implementations may choose not to check required fields within
-  // a lazy sub-message.  That is, calling IsInitialized() on the outer message
-  // may return true even if the inner message has missing required fields.
-  // This is necessary because otherwise the inner message would have to be
-  // parsed in order to perform the check, defeating the purpose of lazy
-  // parsing.  An implementation which chooses not to check required fields
-  // must be consistent about it.  That is, for any particular sub-message, the
-  // implementation must either *always* check its required fields, or *never*
-  // check its required fields, regardless of whether or not the message has
-  // been parsed.
-  optional bool lazy = 5 [default=false];
-
-  // Is this field deprecated?
-  // Depending on the target platform, this can emit Deprecated annotations
-  // for accessors, or it will be completely ignored; in the very least, this
-  // is a formalization for deprecating fields.
-  optional bool deprecated = 3 [default=false];
-
-  // For Google-internal migration only. Do not use.
-  optional bool weak = 10 [default=false];
-
-
-  // The parser stores options it doesn't recognize here. See above.
-  repeated UninterpretedOption uninterpreted_option = 999;
-
-  // Clients can define custom options in extensions of this message. See above.
-  extensions 1000 to max;
-
-  reserved 4;  // removed jtype
-}
-
-message OneofOptions {
-  // The parser stores options it doesn't recognize here. See above.
-  repeated UninterpretedOption uninterpreted_option = 999;
-
-  // Clients can define custom options in extensions of this message. See above.
-  extensions 1000 to max;
-}
-
-message EnumOptions {
-
-  // Set this option to true to allow mapping different tag names to the same
-  // value.
-  optional bool allow_alias = 2;
-
-  // Is this enum deprecated?
-  // Depending on the target platform, this can emit Deprecated annotations
-  // for the enum, or it will be completely ignored; in the very least, this
-  // is a formalization for deprecating enums.
-  optional bool deprecated = 3 [default=false];
-
-  reserved 5;  // javanano_as_lite
-
-  // The parser stores options it doesn't recognize here. See above.
-  repeated UninterpretedOption uninterpreted_option = 999;
-
-  // Clients can define custom options in extensions of this message. See above.
-  extensions 1000 to max;
-}
-
-message EnumValueOptions {
-  // Is this enum value deprecated?
-  // Depending on the target platform, this can emit Deprecated annotations
-  // for the enum value, or it will be completely ignored; in the very least,
-  // this is a formalization for deprecating enum values.
-  optional bool deprecated = 1 [default=false];
-
-  // The parser stores options it doesn't recognize here. See above.
-  repeated UninterpretedOption uninterpreted_option = 999;
-
-  // Clients can define custom options in extensions of this message. See above.
-  extensions 1000 to max;
-}
-
-message ServiceOptions {
-
-  // Note:  Field numbers 1 through 32 are reserved for Google's internal RPC
-  //   framework.  We apologize for hoarding these numbers to ourselves, but
-  //   we were already using them long before we decided to release Protocol
-  //   Buffers.
-
-  // Is this service deprecated?
-  // Depending on the target platform, this can emit Deprecated annotations
-  // for the service, or it will be completely ignored; in the very least,
-  // this is a formalization for deprecating services.
-  optional bool deprecated = 33 [default=false];
-
-  // The parser stores options it doesn't recognize here. See above.
-  repeated UninterpretedOption uninterpreted_option = 999;
-
-  // Clients can define custom options in extensions of this message. See above.
-  extensions 1000 to max;
-}
-
-message MethodOptions {
-
-  // Note:  Field numbers 1 through 32 are reserved for Google's internal RPC
-  //   framework.  We apologize for hoarding these numbers to ourselves, but
-  //   we were already using them long before we decided to release Protocol
-  //   Buffers.
-
-  // Is this method deprecated?
-  // Depending on the target platform, this can emit Deprecated annotations
-  // for the method, or it will be completely ignored; in the very least,
-  // this is a formalization for deprecating methods.
-  optional bool deprecated = 33 [default=false];
-
-  // Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
-  // or neither? HTTP based RPC implementation may choose GET verb for safe
-  // methods, and PUT verb for idempotent methods instead of the default POST.
-  enum IdempotencyLevel {
-    IDEMPOTENCY_UNKNOWN = 0;
-    NO_SIDE_EFFECTS     = 1; // implies idempotent
-    IDEMPOTENT          = 2; // idempotent, but may have side effects
-  }
-  optional IdempotencyLevel idempotency_level =
-      34 [default=IDEMPOTENCY_UNKNOWN];
-
-  // The parser stores options it doesn't recognize here. See above.
-  repeated UninterpretedOption uninterpreted_option = 999;
-
-  // Clients can define custom options in extensions of this message. See above.
-  extensions 1000 to max;
-}
-
-
-// A message representing a option the parser does not recognize. This only
-// appears in options protos created by the compiler::Parser class.
-// DescriptorPool resolves these when building Descriptor objects. Therefore,
-// options protos in descriptor objects (e.g. returned by Descriptor::options(),
-// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
-// in them.
-message UninterpretedOption {
-  // The name of the uninterpreted option.  Each string represents a segment in
-  // a dot-separated name.  is_extension is true iff a segment represents an
-  // extension (denoted with parentheses in options specs in .proto files).
-  // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
-  // "foo.(bar.baz).qux".
-  message NamePart {
-    required string name_part = 1;
-    required bool is_extension = 2;
-  }
-  repeated NamePart name = 2;
-
-  // The value of the uninterpreted option, in whatever type the tokenizer
-  // identified it as during parsing. Exactly one of these should be set.
-  optional string identifier_value = 3;
-  optional uint64 positive_int_value = 4;
-  optional int64 negative_int_value = 5;
-  optional double double_value = 6;
-  optional bytes string_value = 7;
-  optional string aggregate_value = 8;
-}
-
-// ===================================================================
-// Optional source code info
-
-// Encapsulates information about the original source file from which a
-// FileDescriptorProto was generated.
-message SourceCodeInfo {
-  // A Location identifies a piece of source code in a .proto file which
-  // corresponds to a particular definition.  This information is intended
-  // to be useful to IDEs, code indexers, documentation generators, and similar
-  // tools.
-  //
-  // For example, say we have a file like:
-  //   message Foo {
-  //     optional string foo = 1;
-  //   }
-  // Let's look at just the field definition:
-  //   optional string foo = 1;
-  //   ^       ^^     ^^  ^  ^^^
-  //   a       bc     de  f  ghi
-  // We have the following locations:
-  //   span   path               represents
-  //   [a,i)  [ 4, 0, 2, 0 ]     The whole field definition.
-  //   [a,b)  [ 4, 0, 2, 0, 4 ]  The label (optional).
-  //   [c,d)  [ 4, 0, 2, 0, 5 ]  The type (string).
-  //   [e,f)  [ 4, 0, 2, 0, 1 ]  The name (foo).
-  //   [g,h)  [ 4, 0, 2, 0, 3 ]  The number (1).
-  //
-  // Notes:
-  // - A location may refer to a repeated field itself (i.e. not to any
-  //   particular index within it).  This is used whenever a set of elements are
-  //   logically enclosed in a single code segment.  For example, an entire
-  //   extend block (possibly containing multiple extension definitions) will
-  //   have an outer location whose path refers to the "extensions" repeated
-  //   field without an index.
-  // - Multiple locations may have the same path.  This happens when a single
-  //   logical declaration is spread out across multiple places.  The most
-  //   obvious example is the "extend" block again -- there may be multiple
-  //   extend blocks in the same scope, each of which will have the same path.
-  // - A location's span is not always a subset of its parent's span.  For
-  //   example, the "extendee" of an extension declaration appears at the
-  //   beginning of the "extend" block and is shared by all extensions within
-  //   the block.
-  // - Just because a location's span is a subset of some other location's span
-  //   does not mean that it is a descendent.  For example, a "group" defines
-  //   both a type and a field in a single declaration.  Thus, the locations
-  //   corresponding to the type and field and their components will overlap.
-  // - Code which tries to interpret locations should probably be designed to
-  //   ignore those that it doesn't understand, as more types of locations could
-  //   be recorded in the future.
-  repeated Location location = 1;
-  message Location {
-    // Identifies which part of the FileDescriptorProto was defined at this
-    // location.
-    //
-    // Each element is a field number or an index.  They form a path from
-    // the root FileDescriptorProto to the place where the definition.  For
-    // example, this path:
-    //   [ 4, 3, 2, 7, 1 ]
-    // refers to:
-    //   file.message_type(3)  // 4, 3
-    //       .field(7)         // 2, 7
-    //       .name()           // 1
-    // This is because FileDescriptorProto.message_type has field number 4:
-    //   repeated DescriptorProto message_type = 4;
-    // and DescriptorProto.field has field number 2:
-    //   repeated FieldDescriptorProto field = 2;
-    // and FieldDescriptorProto.name has field number 1:
-    //   optional string name = 1;
-    //
-    // Thus, the above path gives the location of a field name.  If we removed
-    // the last element:
-    //   [ 4, 3, 2, 7 ]
-    // this path refers to the whole field declaration (from the beginning
-    // of the label to the terminating semicolon).
-    repeated int32 path = 1 [packed=true];
-
-    // Always has exactly three or four elements: start line, start column,
-    // end line (optional, otherwise assumed same as start line), end column.
-    // These are packed into a single field for efficiency.  Note that line
-    // and column numbers are zero-based -- typically you will want to add
-    // 1 to each before displaying to a user.
-    repeated int32 span = 2 [packed=true];
-
-    // If this SourceCodeInfo represents a complete declaration, these are any
-    // comments appearing before and after the declaration which appear to be
-    // attached to the declaration.
-    //
-    // A series of line comments appearing on consecutive lines, with no other
-    // tokens appearing on those lines, will be treated as a single comment.
-    //
-    // leading_detached_comments will keep paragraphs of comments that appear
-    // before (but not connected to) the current element. Each paragraph,
-    // separated by empty lines, will be one comment element in the repeated
-    // field.
-    //
-    // Only the comment content is provided; comment markers (e.g. //) are
-    // stripped out.  For block comments, leading whitespace and an asterisk
-    // will be stripped from the beginning of each line other than the first.
-    // Newlines are included in the output.
-    //
-    // Examples:
-    //
-    //   optional int32 foo = 1;  // Comment attached to foo.
-    //   // Comment attached to bar.
-    //   optional int32 bar = 2;
-    //
-    //   optional string baz = 3;
-    //   // Comment attached to baz.
-    //   // Another line attached to baz.
-    //
-    //   // Comment attached to qux.
-    //   //
-    //   // Another line attached to qux.
-    //   optional double qux = 4;
-    //
-    //   // Detached comment for corge. This is not leading or trailing comments
-    //   // to qux or corge because there are blank lines separating it from
-    //   // both.
-    //
-    //   // Detached comment for corge paragraph 2.
-    //
-    //   optional string corge = 5;
-    //   /* Block comment attached
-    //    * to corge.  Leading asterisks
-    //    * will be removed. */
-    //   /* Block comment attached to
-    //    * grault. */
-    //   optional int32 grault = 6;
-    //
-    //   // ignored detached comments.
-    optional string leading_comments = 3;
-    optional string trailing_comments = 4;
-    repeated string leading_detached_comments = 6;
-  }
-}
-
-// Describes the relationship between generated code and its original source
-// file. A GeneratedCodeInfo message is associated with only one generated
-// source file, but may contain references to different source .proto files.
-message GeneratedCodeInfo {
-  // An Annotation connects some span of text in generated code to an element
-  // of its generating .proto file.
-  repeated Annotation annotation = 1;
-  message Annotation {
-    // Identifies the element in the original source .proto file. This field
-    // is formatted the same as SourceCodeInfo.Location.path.
-    repeated int32 path = 1 [packed=true];
-
-    // Identifies the filesystem path to the original source .proto.
-    optional string source_file = 2;
-
-    // Identifies the starting offset in bytes in the generated code
-    // that relates to the identified object.
-    optional int32 begin = 3;
-
-    // Identifies the ending offset in bytes in the generated code that
-    // relates to the identified offset. The end offset should be one past
-    // the last relevant byte (so the length of the text = end - begin).
-    optional int32 end = 4;
-  }
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
index 70276e8..85f9f57 100644
--- a/vendor/github.com/golang/protobuf/ptypes/any.go
+++ b/vendor/github.com/golang/protobuf/ptypes/any.go
@@ -1,141 +1,179 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
 
 package ptypes
 
-// This file implements functions to marshal proto.Message to/from
-// google.protobuf.Any message.
-
 import (
 	"fmt"
-	"reflect"
 	"strings"
 
 	"github.com/golang/protobuf/proto"
-	"github.com/golang/protobuf/ptypes/any"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+
+	anypb "github.com/golang/protobuf/ptypes/any"
 )
 
-const googleApis = "type.googleapis.com/"
+const urlPrefix = "type.googleapis.com/"
 
-// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
+// AnyMessageName returns the message name contained in an anypb.Any message.
+// Most type assertions should use the Is function instead.
 //
-// Note that regular type assertions should be done using the Is
-// function. AnyMessageName is provided for less common use cases like filtering a
-// sequence of Any messages based on a set of allowed message type names.
-func AnyMessageName(any *any.Any) (string, error) {
+// Deprecated: Call the any.MessageName method instead.
+func AnyMessageName(any *anypb.Any) (string, error) {
+	name, err := anyMessageName(any)
+	return string(name), err
+}
+func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
 	if any == nil {
 		return "", fmt.Errorf("message is nil")
 	}
-	slash := strings.LastIndex(any.TypeUrl, "/")
-	if slash < 0 {
+	name := protoreflect.FullName(any.TypeUrl)
+	if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 {
+		name = name[i+len("/"):]
+	}
+	if !name.IsValid() {
 		return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
 	}
-	return any.TypeUrl[slash+1:], nil
+	return name, nil
 }
 
-// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
-func MarshalAny(pb proto.Message) (*any.Any, error) {
-	value, err := proto.Marshal(pb)
+// MarshalAny marshals the given message m into an anypb.Any message.
+//
+// Deprecated: Call the anypb.New function instead.
+func MarshalAny(m proto.Message) (*anypb.Any, error) {
+	switch dm := m.(type) {
+	case DynamicAny:
+		m = dm.Message
+	case *DynamicAny:
+		if dm == nil {
+			return nil, proto.ErrNil
+		}
+		m = dm.Message
+	}
+	b, err := proto.Marshal(m)
 	if err != nil {
 		return nil, err
 	}
-	return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
+	return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil
 }
 
-// DynamicAny is a value that can be passed to UnmarshalAny to automatically
-// allocate a proto.Message for the type specified in a google.protobuf.Any
-// message. The allocated message is stored in the embedded proto.Message.
+// Empty returns a new message of the type specified in an anypb.Any message.
+// It returns protoregistry.NotFound if the corresponding message type could not
+// be resolved in the global registry.
 //
-// Example:
-//
-//   var x ptypes.DynamicAny
-//   if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
-//   fmt.Printf("unmarshaled message: %v", x.Message)
-type DynamicAny struct {
-	proto.Message
-}
-
-// Empty returns a new proto.Message of the type specified in a
-// google.protobuf.Any message. It returns an error if corresponding message
-// type isn't linked in.
-func Empty(any *any.Any) (proto.Message, error) {
-	aname, err := AnyMessageName(any)
+// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead
+// to resolve the message name and create a new instance of it.
+func Empty(any *anypb.Any) (proto.Message, error) {
+	name, err := anyMessageName(any)
 	if err != nil {
 		return nil, err
 	}
-
-	t := proto.MessageType(aname)
-	if t == nil {
-		return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
+	mt, err := protoregistry.GlobalTypes.FindMessageByName(name)
+	if err != nil {
+		return nil, err
 	}
-	return reflect.New(t.Elem()).Interface().(proto.Message), nil
+	return proto.MessageV1(mt.New().Interface()), nil
 }
 
-// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
-// message and places the decoded result in pb. It returns an error if type of
-// contents of Any message does not match type of pb message.
+// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
+// into the provided message m. It returns an error if the target message
+// does not match the type in the Any message or if an unmarshal error occurs.
 //
-// pb can be a proto.Message, or a *DynamicAny.
-func UnmarshalAny(any *any.Any, pb proto.Message) error {
-	if d, ok := pb.(*DynamicAny); ok {
-		if d.Message == nil {
+// The target message m may be a *DynamicAny message. If the underlying message
+// type could not be resolved, then this returns protoregistry.NotFound.
+//
+// Deprecated: Call the any.UnmarshalTo method instead.
+func UnmarshalAny(any *anypb.Any, m proto.Message) error {
+	if dm, ok := m.(*DynamicAny); ok {
+		if dm.Message == nil {
 			var err error
-			d.Message, err = Empty(any)
+			dm.Message, err = Empty(any)
 			if err != nil {
 				return err
 			}
 		}
-		return UnmarshalAny(any, d.Message)
+		m = dm.Message
 	}
 
-	aname, err := AnyMessageName(any)
+	anyName, err := AnyMessageName(any)
 	if err != nil {
 		return err
 	}
-
-	mname := proto.MessageName(pb)
-	if aname != mname {
-		return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
+	msgName := proto.MessageName(m)
+	if anyName != msgName {
+		return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName)
 	}
-	return proto.Unmarshal(any.Value, pb)
+	return proto.Unmarshal(any.Value, m)
 }
 
-// Is returns true if any value contains a given message type.
-func Is(any *any.Any, pb proto.Message) bool {
-	// The following is equivalent to AnyMessageName(any) == proto.MessageName(pb),
-	// but it avoids scanning TypeUrl for the slash.
-	if any == nil {
+// Is reports whether the Any message contains a message of the specified type.
+//
+// Deprecated: Call the any.MessageIs method instead.
+func Is(any *anypb.Any, m proto.Message) bool {
+	if any == nil || m == nil {
 		return false
 	}
-	name := proto.MessageName(pb)
-	prefix := len(any.TypeUrl) - len(name)
-	return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name
+	name := proto.MessageName(m)
+	if !strings.HasSuffix(any.TypeUrl, name) {
+		return false
+	}
+	return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/'
+}
+
+// DynamicAny is a value that can be passed to UnmarshalAny to automatically
+// allocate a proto.Message for the type specified in an anypb.Any message.
+// The allocated message is stored in the embedded proto.Message.
+//
+// Example:
+//   var x ptypes.DynamicAny
+//   if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
+//   fmt.Printf("unmarshaled message: %v", x.Message)
+//
+// Deprecated: Use the any.UnmarshalNew method instead to unmarshal
+// the any message contents into a new instance of the underlying message.
+type DynamicAny struct{ proto.Message }
+
+func (m DynamicAny) String() string {
+	if m.Message == nil {
+		return "<nil>"
+	}
+	return m.Message.String()
+}
+func (m DynamicAny) Reset() {
+	if m.Message == nil {
+		return
+	}
+	m.Message.Reset()
+}
+func (m DynamicAny) ProtoMessage() {
+	return
+}
+func (m DynamicAny) ProtoReflect() protoreflect.Message {
+	if m.Message == nil {
+		return nil
+	}
+	return dynamicAny{proto.MessageReflect(m.Message)}
+}
+
+type dynamicAny struct{ protoreflect.Message }
+
+func (m dynamicAny) Type() protoreflect.MessageType {
+	return dynamicAnyType{m.Message.Type()}
+}
+func (m dynamicAny) New() protoreflect.Message {
+	return dynamicAnyType{m.Message.Type()}.New()
+}
+func (m dynamicAny) Interface() protoreflect.ProtoMessage {
+	return DynamicAny{proto.MessageV1(m.Message.Interface())}
+}
+
+type dynamicAnyType struct{ protoreflect.MessageType }
+
+func (t dynamicAnyType) New() protoreflect.Message {
+	return dynamicAny{t.MessageType.New()}
+}
+func (t dynamicAnyType) Zero() protoreflect.Message {
+	return dynamicAny{t.MessageType.Zero()}
 }
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
index 78ee523..0ef27d3 100644
--- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
@@ -1,200 +1,62 @@
 // Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/any.proto
+// source: github.com/golang/protobuf/ptypes/any/any.proto
 
 package any
 
 import (
-	fmt "fmt"
-	proto "github.com/golang/protobuf/proto"
-	math "math"
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	anypb "google.golang.org/protobuf/types/known/anypb"
+	reflect "reflect"
 )
 
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+// Symbols defined in public import of google/protobuf/any.proto.
 
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+type Any = anypb.Any
 
-// `Any` contains an arbitrary serialized protocol buffer message along with a
-// URL that describes the type of the serialized message.
-//
-// Protobuf library provides support to pack/unpack Any values in the form
-// of utility functions or additional generated methods of the Any type.
-//
-// Example 1: Pack and unpack a message in C++.
-//
-//     Foo foo = ...;
-//     Any any;
-//     any.PackFrom(foo);
-//     ...
-//     if (any.UnpackTo(&foo)) {
-//       ...
-//     }
-//
-// Example 2: Pack and unpack a message in Java.
-//
-//     Foo foo = ...;
-//     Any any = Any.pack(foo);
-//     ...
-//     if (any.is(Foo.class)) {
-//       foo = any.unpack(Foo.class);
-//     }
-//
-//  Example 3: Pack and unpack a message in Python.
-//
-//     foo = Foo(...)
-//     any = Any()
-//     any.Pack(foo)
-//     ...
-//     if any.Is(Foo.DESCRIPTOR):
-//       any.Unpack(foo)
-//       ...
-//
-//  Example 4: Pack and unpack a message in Go
-//
-//      foo := &pb.Foo{...}
-//      any, err := ptypes.MarshalAny(foo)
-//      ...
-//      foo := &pb.Foo{}
-//      if err := ptypes.UnmarshalAny(any, foo); err != nil {
-//        ...
-//      }
-//
-// The pack methods provided by protobuf library will by default use
-// 'type.googleapis.com/full.type.name' as the type URL and the unpack
-// methods only use the fully qualified type name after the last '/'
-// in the type URL, for example "foo.bar.com/x/y.z" will yield type
-// name "y.z".
-//
-//
-// JSON
-// ====
-// The JSON representation of an `Any` value uses the regular
-// representation of the deserialized, embedded message, with an
-// additional field `@type` which contains the type URL. Example:
-//
-//     package google.profile;
-//     message Person {
-//       string first_name = 1;
-//       string last_name = 2;
-//     }
-//
-//     {
-//       "@type": "type.googleapis.com/google.profile.Person",
-//       "firstName": <string>,
-//       "lastName": <string>
-//     }
-//
-// If the embedded message type is well-known and has a custom JSON
-// representation, that representation will be embedded adding a field
-// `value` which holds the custom JSON in addition to the `@type`
-// field. Example (for message [google.protobuf.Duration][]):
-//
-//     {
-//       "@type": "type.googleapis.com/google.protobuf.Duration",
-//       "value": "1.212s"
-//     }
-//
-type Any struct {
-	// A URL/resource name that uniquely identifies the type of the serialized
-	// protocol buffer message. The last segment of the URL's path must represent
-	// the fully qualified name of the type (as in
-	// `path/google.protobuf.Duration`). The name should be in a canonical form
-	// (e.g., leading "." is not accepted).
-	//
-	// In practice, teams usually precompile into the binary all types that they
-	// expect it to use in the context of Any. However, for URLs which use the
-	// scheme `http`, `https`, or no scheme, one can optionally set up a type
-	// server that maps type URLs to message definitions as follows:
-	//
-	// * If no scheme is provided, `https` is assumed.
-	// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
-	//   value in binary format, or produce an error.
-	// * Applications are allowed to cache lookup results based on the
-	//   URL, or have them precompiled into a binary to avoid any
-	//   lookup. Therefore, binary compatibility needs to be preserved
-	//   on changes to types. (Use versioned type names to manage
-	//   breaking changes.)
-	//
-	// Note: this functionality is not currently available in the official
-	// protobuf release, and it is not used for type URLs beginning with
-	// type.googleapis.com.
-	//
-	// Schemes other than `http`, `https` (or the empty scheme) might be
-	// used with implementation specific semantics.
-	//
-	TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
-	// Must be a valid serialized protocol buffer of the above specified type.
-	Value                []byte   `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
+var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor
+
+var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{
+	0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+	0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+	0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29,
+	0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
+	0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65,
+	0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x33,
 }
 
-func (m *Any) Reset()         { *m = Any{} }
-func (m *Any) String() string { return proto.CompactTextString(m) }
-func (*Any) ProtoMessage()    {}
-func (*Any) Descriptor() ([]byte, []int) {
-	return fileDescriptor_b53526c13ae22eb4, []int{0}
+var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{
+	0, // [0:0] is the sub-list for method output_type
+	0, // [0:0] is the sub-list for method input_type
+	0, // [0:0] is the sub-list for extension type_name
+	0, // [0:0] is the sub-list for extension extendee
+	0, // [0:0] is the sub-list for field type_name
 }
 
-func (*Any) XXX_WellKnownType() string { return "Any" }
-
-func (m *Any) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Any.Unmarshal(m, b)
-}
-func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Any.Marshal(b, m, deterministic)
-}
-func (m *Any) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Any.Merge(m, src)
-}
-func (m *Any) XXX_Size() int {
-	return xxx_messageInfo_Any.Size(m)
-}
-func (m *Any) XXX_DiscardUnknown() {
-	xxx_messageInfo_Any.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Any proto.InternalMessageInfo
-
-func (m *Any) GetTypeUrl() string {
-	if m != nil {
-		return m.TypeUrl
+func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() }
+func file_github_com_golang_protobuf_ptypes_any_any_proto_init() {
+	if File_github_com_golang_protobuf_ptypes_any_any_proto != nil {
+		return
 	}
-	return ""
-}
-
-func (m *Any) GetValue() []byte {
-	if m != nil {
-		return m.Value
-	}
-	return nil
-}
-
-func init() {
-	proto.RegisterType((*Any)(nil), "google.protobuf.Any")
-}
-
-func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) }
-
-var fileDescriptor_b53526c13ae22eb4 = []byte{
-	// 185 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
-	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
-	0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
-	0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
-	0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
-	0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce,
-	0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52,
-	0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc,
-	0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c,
-	0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce,
-	0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff,
-	0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00,
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   0,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes,
+		DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs,
+	}.Build()
+	File_github_com_golang_protobuf_ptypes_any_any_proto = out.File
+	file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil
+	file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil
+	file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil
 }
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
deleted file mode 100644
index 4932942..0000000
--- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto
+++ /dev/null
@@ -1,154 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc.  All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-syntax = "proto3";
-
-package google.protobuf;
-
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
-option go_package = "github.com/golang/protobuf/ptypes/any";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "AnyProto";
-option java_multiple_files = true;
-option objc_class_prefix = "GPB";
-
-// `Any` contains an arbitrary serialized protocol buffer message along with a
-// URL that describes the type of the serialized message.
-//
-// Protobuf library provides support to pack/unpack Any values in the form
-// of utility functions or additional generated methods of the Any type.
-//
-// Example 1: Pack and unpack a message in C++.
-//
-//     Foo foo = ...;
-//     Any any;
-//     any.PackFrom(foo);
-//     ...
-//     if (any.UnpackTo(&foo)) {
-//       ...
-//     }
-//
-// Example 2: Pack and unpack a message in Java.
-//
-//     Foo foo = ...;
-//     Any any = Any.pack(foo);
-//     ...
-//     if (any.is(Foo.class)) {
-//       foo = any.unpack(Foo.class);
-//     }
-//
-//  Example 3: Pack and unpack a message in Python.
-//
-//     foo = Foo(...)
-//     any = Any()
-//     any.Pack(foo)
-//     ...
-//     if any.Is(Foo.DESCRIPTOR):
-//       any.Unpack(foo)
-//       ...
-//
-//  Example 4: Pack and unpack a message in Go
-//
-//      foo := &pb.Foo{...}
-//      any, err := ptypes.MarshalAny(foo)
-//      ...
-//      foo := &pb.Foo{}
-//      if err := ptypes.UnmarshalAny(any, foo); err != nil {
-//        ...
-//      }
-//
-// The pack methods provided by protobuf library will by default use
-// 'type.googleapis.com/full.type.name' as the type URL and the unpack
-// methods only use the fully qualified type name after the last '/'
-// in the type URL, for example "foo.bar.com/x/y.z" will yield type
-// name "y.z".
-//
-//
-// JSON
-// ====
-// The JSON representation of an `Any` value uses the regular
-// representation of the deserialized, embedded message, with an
-// additional field `@type` which contains the type URL. Example:
-//
-//     package google.profile;
-//     message Person {
-//       string first_name = 1;
-//       string last_name = 2;
-//     }
-//
-//     {
-//       "@type": "type.googleapis.com/google.profile.Person",
-//       "firstName": <string>,
-//       "lastName": <string>
-//     }
-//
-// If the embedded message type is well-known and has a custom JSON
-// representation, that representation will be embedded adding a field
-// `value` which holds the custom JSON in addition to the `@type`
-// field. Example (for message [google.protobuf.Duration][]):
-//
-//     {
-//       "@type": "type.googleapis.com/google.protobuf.Duration",
-//       "value": "1.212s"
-//     }
-//
-message Any {
-  // A URL/resource name that uniquely identifies the type of the serialized
-  // protocol buffer message. The last segment of the URL's path must represent
-  // the fully qualified name of the type (as in
-  // `path/google.protobuf.Duration`). The name should be in a canonical form
-  // (e.g., leading "." is not accepted).
-  //
-  // In practice, teams usually precompile into the binary all types that they
-  // expect it to use in the context of Any. However, for URLs which use the
-  // scheme `http`, `https`, or no scheme, one can optionally set up a type
-  // server that maps type URLs to message definitions as follows:
-  //
-  // * If no scheme is provided, `https` is assumed.
-  // * An HTTP GET on the URL must yield a [google.protobuf.Type][]
-  //   value in binary format, or produce an error.
-  // * Applications are allowed to cache lookup results based on the
-  //   URL, or have them precompiled into a binary to avoid any
-  //   lookup. Therefore, binary compatibility needs to be preserved
-  //   on changes to types. (Use versioned type names to manage
-  //   breaking changes.)
-  //
-  // Note: this functionality is not currently available in the official
-  // protobuf release, and it is not used for type URLs beginning with
-  // type.googleapis.com.
-  //
-  // Schemes other than `http`, `https` (or the empty scheme) might be
-  // used with implementation specific semantics.
-  //
-  string type_url = 1;
-
-  // Must be a valid serialized protocol buffer of the above specified type.
-  bytes value = 2;
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go
index c0d595d..d3c3325 100644
--- a/vendor/github.com/golang/protobuf/ptypes/doc.go
+++ b/vendor/github.com/golang/protobuf/ptypes/doc.go
@@ -1,35 +1,10 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
 
-/*
-Package ptypes contains code for interacting with well-known types.
-*/
+// Package ptypes provides functionality for interacting with well-known types.
+//
+// Deprecated: Well-known types have specialized functionality directly
+// injected into the generated packages for each message type.
+// See the deprecation notice for each function for the suggested alternative.
 package ptypes
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go
index 26d1ca2..b2b55dd 100644
--- a/vendor/github.com/golang/protobuf/ptypes/duration.go
+++ b/vendor/github.com/golang/protobuf/ptypes/duration.go
@@ -1,102 +1,76 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
 
 package ptypes
 
-// This file implements conversions between google.protobuf.Duration
-// and time.Duration.
-
 import (
 	"errors"
 	"fmt"
 	"time"
 
-	durpb "github.com/golang/protobuf/ptypes/duration"
+	durationpb "github.com/golang/protobuf/ptypes/duration"
 )
 
+// Range of google.protobuf.Duration as specified in duration.proto.
+// This is about 10,000 years in seconds.
 const (
-	// Range of a durpb.Duration in seconds, as specified in
-	// google/protobuf/duration.proto. This is about 10,000 years in seconds.
 	maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
 	minSeconds = -maxSeconds
 )
 
-// validateDuration determines whether the durpb.Duration is valid according to the
-// definition in google/protobuf/duration.proto. A valid durpb.Duration
-// may still be too large to fit into a time.Duration (the range of durpb.Duration
-// is about 10,000 years, and the range of time.Duration is about 290).
-func validateDuration(d *durpb.Duration) error {
-	if d == nil {
-		return errors.New("duration: nil Duration")
-	}
-	if d.Seconds < minSeconds || d.Seconds > maxSeconds {
-		return fmt.Errorf("duration: %v: seconds out of range", d)
-	}
-	if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
-		return fmt.Errorf("duration: %v: nanos out of range", d)
-	}
-	// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
-	if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
-		return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
-	}
-	return nil
-}
-
-// Duration converts a durpb.Duration to a time.Duration. Duration
-// returns an error if the durpb.Duration is invalid or is too large to be
-// represented in a time.Duration.
-func Duration(p *durpb.Duration) (time.Duration, error) {
-	if err := validateDuration(p); err != nil {
+// Duration converts a durationpb.Duration to a time.Duration.
+// Duration returns an error if dur is invalid or overflows a time.Duration.
+//
+// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead.
+func Duration(dur *durationpb.Duration) (time.Duration, error) {
+	if err := validateDuration(dur); err != nil {
 		return 0, err
 	}
-	d := time.Duration(p.Seconds) * time.Second
-	if int64(d/time.Second) != p.Seconds {
-		return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+	d := time.Duration(dur.Seconds) * time.Second
+	if int64(d/time.Second) != dur.Seconds {
+		return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
 	}
-	if p.Nanos != 0 {
-		d += time.Duration(p.Nanos) * time.Nanosecond
-		if (d < 0) != (p.Nanos < 0) {
-			return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+	if dur.Nanos != 0 {
+		d += time.Duration(dur.Nanos) * time.Nanosecond
+		if (d < 0) != (dur.Nanos < 0) {
+			return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
 		}
 	}
 	return d, nil
 }
 
-// DurationProto converts a time.Duration to a durpb.Duration.
-func DurationProto(d time.Duration) *durpb.Duration {
+// DurationProto converts a time.Duration to a durationpb.Duration.
+//
+// Deprecated: Call the durationpb.New function instead.
+func DurationProto(d time.Duration) *durationpb.Duration {
 	nanos := d.Nanoseconds()
 	secs := nanos / 1e9
 	nanos -= secs * 1e9
-	return &durpb.Duration{
-		Seconds: secs,
+	return &durationpb.Duration{
+		Seconds: int64(secs),
 		Nanos:   int32(nanos),
 	}
 }
+
+// validateDuration determines whether the durationpb.Duration is valid
+// according to the definition in google/protobuf/duration.proto.
+// A valid durpb.Duration may still be too large to fit into a time.Duration
+// Note that the range of durationpb.Duration is about 10,000 years,
+// while the range of time.Duration is about 290 years.
+func validateDuration(dur *durationpb.Duration) error {
+	if dur == nil {
+		return errors.New("duration: nil Duration")
+	}
+	if dur.Seconds < minSeconds || dur.Seconds > maxSeconds {
+		return fmt.Errorf("duration: %v: seconds out of range", dur)
+	}
+	if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 {
+		return fmt.Errorf("duration: %v: nanos out of range", dur)
+	}
+	// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
+	if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) {
+		return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur)
+	}
+	return nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
index 0d681ee..d0079ee 100644
--- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
@@ -1,161 +1,63 @@
 // Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/duration.proto
+// source: github.com/golang/protobuf/ptypes/duration/duration.proto
 
 package duration
 
 import (
-	fmt "fmt"
-	proto "github.com/golang/protobuf/proto"
-	math "math"
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	durationpb "google.golang.org/protobuf/types/known/durationpb"
+	reflect "reflect"
 )
 
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+// Symbols defined in public import of google/protobuf/duration.proto.
 
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+type Duration = durationpb.Duration
 
-// A Duration represents a signed, fixed-length span of time represented
-// as a count of seconds and fractions of seconds at nanosecond
-// resolution. It is independent of any calendar and concepts like "day"
-// or "month". It is related to Timestamp in that the difference between
-// two Timestamp values is a Duration and it can be added or subtracted
-// from a Timestamp. Range is approximately +-10,000 years.
-//
-// # Examples
-//
-// Example 1: Compute Duration from two Timestamps in pseudo code.
-//
-//     Timestamp start = ...;
-//     Timestamp end = ...;
-//     Duration duration = ...;
-//
-//     duration.seconds = end.seconds - start.seconds;
-//     duration.nanos = end.nanos - start.nanos;
-//
-//     if (duration.seconds < 0 && duration.nanos > 0) {
-//       duration.seconds += 1;
-//       duration.nanos -= 1000000000;
-//     } else if (durations.seconds > 0 && duration.nanos < 0) {
-//       duration.seconds -= 1;
-//       duration.nanos += 1000000000;
-//     }
-//
-// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
-//
-//     Timestamp start = ...;
-//     Duration duration = ...;
-//     Timestamp end = ...;
-//
-//     end.seconds = start.seconds + duration.seconds;
-//     end.nanos = start.nanos + duration.nanos;
-//
-//     if (end.nanos < 0) {
-//       end.seconds -= 1;
-//       end.nanos += 1000000000;
-//     } else if (end.nanos >= 1000000000) {
-//       end.seconds += 1;
-//       end.nanos -= 1000000000;
-//     }
-//
-// Example 3: Compute Duration from datetime.timedelta in Python.
-//
-//     td = datetime.timedelta(days=3, minutes=10)
-//     duration = Duration()
-//     duration.FromTimedelta(td)
-//
-// # JSON Mapping
-//
-// In JSON format, the Duration type is encoded as a string rather than an
-// object, where the string ends in the suffix "s" (indicating seconds) and
-// is preceded by the number of seconds, with nanoseconds expressed as
-// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
-// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
-// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
-// microsecond should be expressed in JSON format as "3.000001s".
-//
-//
-type Duration struct {
-	// Signed seconds of the span of time. Must be from -315,576,000,000
-	// to +315,576,000,000 inclusive. Note: these bounds are computed from:
-	// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
-	Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
-	// Signed fractions of a second at nanosecond resolution of the span
-	// of time. Durations less than one second are represented with a 0
-	// `seconds` field and a positive or negative `nanos` field. For durations
-	// of one second or more, a non-zero value for the `nanos` field must be
-	// of the same sign as the `seconds` field. Must be from -999,999,999
-	// to +999,999,999 inclusive.
-	Nanos                int32    `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
+var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor
+
+var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{
+	0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+	0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
+	0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
+	0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
+	0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
+	0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
 }
 
-func (m *Duration) Reset()         { *m = Duration{} }
-func (m *Duration) String() string { return proto.CompactTextString(m) }
-func (*Duration) ProtoMessage()    {}
-func (*Duration) Descriptor() ([]byte, []int) {
-	return fileDescriptor_23597b2ebd7ac6c5, []int{0}
+var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{
+	0, // [0:0] is the sub-list for method output_type
+	0, // [0:0] is the sub-list for method input_type
+	0, // [0:0] is the sub-list for extension type_name
+	0, // [0:0] is the sub-list for extension extendee
+	0, // [0:0] is the sub-list for field type_name
 }
 
-func (*Duration) XXX_WellKnownType() string { return "Duration" }
-
-func (m *Duration) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Duration.Unmarshal(m, b)
-}
-func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
-}
-func (m *Duration) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Duration.Merge(m, src)
-}
-func (m *Duration) XXX_Size() int {
-	return xxx_messageInfo_Duration.Size(m)
-}
-func (m *Duration) XXX_DiscardUnknown() {
-	xxx_messageInfo_Duration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Duration proto.InternalMessageInfo
-
-func (m *Duration) GetSeconds() int64 {
-	if m != nil {
-		return m.Seconds
+func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() }
+func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() {
+	if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil {
+		return
 	}
-	return 0
-}
-
-func (m *Duration) GetNanos() int32 {
-	if m != nil {
-		return m.Nanos
-	}
-	return 0
-}
-
-func init() {
-	proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
-}
-
-func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) }
-
-var fileDescriptor_23597b2ebd7ac6c5 = []byte{
-	// 190 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
-	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
-	0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
-	0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
-	0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
-	0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c,
-	0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56,
-	0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e,
-	0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4,
-	0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78,
-	0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63,
-	0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00,
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   0,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes,
+		DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs,
+	}.Build()
+	File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File
+	file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil
+	file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil
+	file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil
 }
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
deleted file mode 100644
index 975fce4..0000000
--- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
+++ /dev/null
@@ -1,117 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc.  All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-syntax = "proto3";
-
-package google.protobuf;
-
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
-option cc_enable_arenas = true;
-option go_package = "github.com/golang/protobuf/ptypes/duration";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "DurationProto";
-option java_multiple_files = true;
-option objc_class_prefix = "GPB";
-
-// A Duration represents a signed, fixed-length span of time represented
-// as a count of seconds and fractions of seconds at nanosecond
-// resolution. It is independent of any calendar and concepts like "day"
-// or "month". It is related to Timestamp in that the difference between
-// two Timestamp values is a Duration and it can be added or subtracted
-// from a Timestamp. Range is approximately +-10,000 years.
-//
-// # Examples
-//
-// Example 1: Compute Duration from two Timestamps in pseudo code.
-//
-//     Timestamp start = ...;
-//     Timestamp end = ...;
-//     Duration duration = ...;
-//
-//     duration.seconds = end.seconds - start.seconds;
-//     duration.nanos = end.nanos - start.nanos;
-//
-//     if (duration.seconds < 0 && duration.nanos > 0) {
-//       duration.seconds += 1;
-//       duration.nanos -= 1000000000;
-//     } else if (durations.seconds > 0 && duration.nanos < 0) {
-//       duration.seconds -= 1;
-//       duration.nanos += 1000000000;
-//     }
-//
-// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
-//
-//     Timestamp start = ...;
-//     Duration duration = ...;
-//     Timestamp end = ...;
-//
-//     end.seconds = start.seconds + duration.seconds;
-//     end.nanos = start.nanos + duration.nanos;
-//
-//     if (end.nanos < 0) {
-//       end.seconds -= 1;
-//       end.nanos += 1000000000;
-//     } else if (end.nanos >= 1000000000) {
-//       end.seconds += 1;
-//       end.nanos -= 1000000000;
-//     }
-//
-// Example 3: Compute Duration from datetime.timedelta in Python.
-//
-//     td = datetime.timedelta(days=3, minutes=10)
-//     duration = Duration()
-//     duration.FromTimedelta(td)
-//
-// # JSON Mapping
-//
-// In JSON format, the Duration type is encoded as a string rather than an
-// object, where the string ends in the suffix "s" (indicating seconds) and
-// is preceded by the number of seconds, with nanoseconds expressed as
-// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
-// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
-// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
-// microsecond should be expressed in JSON format as "3.000001s".
-//
-//
-message Duration {
-
-  // Signed seconds of the span of time. Must be from -315,576,000,000
-  // to +315,576,000,000 inclusive. Note: these bounds are computed from:
-  // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
-  int64 seconds = 1;
-
-  // Signed fractions of a second at nanosecond resolution of the span
-  // of time. Durations less than one second are represented with a 0
-  // `seconds` field and a positive or negative `nanos` field. For durations
-  // of one second or more, a non-zero value for the `nanos` field must be
-  // of the same sign as the `seconds` field. Must be from -999,999,999
-  // to +999,999,999 inclusive.
-  int32 nanos = 2;
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
index b4eb03e..16686a6 100644
--- a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
@@ -1,83 +1,62 @@
 // Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/empty.proto
+// source: github.com/golang/protobuf/ptypes/empty/empty.proto
 
 package empty
 
 import (
-	fmt "fmt"
-	proto "github.com/golang/protobuf/proto"
-	math "math"
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	emptypb "google.golang.org/protobuf/types/known/emptypb"
+	reflect "reflect"
 )
 
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+// Symbols defined in public import of google/protobuf/empty.proto.
 
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+type Empty = emptypb.Empty
 
-// A generic empty message that you can re-use to avoid defining duplicated
-// empty messages in your APIs. A typical example is to use it as the request
-// or the response type of an API method. For instance:
-//
-//     service Foo {
-//       rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
-//     }
-//
-// The JSON representation for `Empty` is empty JSON object `{}`.
-type Empty struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
+var File_github_com_golang_protobuf_ptypes_empty_empty_proto protoreflect.FileDescriptor
+
+var file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = []byte{
+	0x0a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+	0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
+	0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x3b, 0x65, 0x6d,
+	0x70, 0x74, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
 }
 
-func (m *Empty) Reset()         { *m = Empty{} }
-func (m *Empty) String() string { return proto.CompactTextString(m) }
-func (*Empty) ProtoMessage()    {}
-func (*Empty) Descriptor() ([]byte, []int) {
-	return fileDescriptor_900544acb223d5b8, []int{0}
+var file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = []int32{
+	0, // [0:0] is the sub-list for method output_type
+	0, // [0:0] is the sub-list for method input_type
+	0, // [0:0] is the sub-list for extension type_name
+	0, // [0:0] is the sub-list for extension extendee
+	0, // [0:0] is the sub-list for field type_name
 }
 
-func (*Empty) XXX_WellKnownType() string { return "Empty" }
-
-func (m *Empty) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Empty.Unmarshal(m, b)
-}
-func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Empty.Marshal(b, m, deterministic)
-}
-func (m *Empty) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Empty.Merge(m, src)
-}
-func (m *Empty) XXX_Size() int {
-	return xxx_messageInfo_Empty.Size(m)
-}
-func (m *Empty) XXX_DiscardUnknown() {
-	xxx_messageInfo_Empty.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Empty proto.InternalMessageInfo
-
-func init() {
-	proto.RegisterType((*Empty)(nil), "google.protobuf.Empty")
-}
-
-func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_900544acb223d5b8) }
-
-var fileDescriptor_900544acb223d5b8 = []byte{
-	// 148 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f,
-	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28,
-	0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57,
-	0x90, 0xbc, 0x53, 0x19, 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36,
-	0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf,
-	0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0x47, 0x58, 0x53, 0x50, 0x52, 0x59, 0x90, 0x5a, 0x0c,
-	0xb1, 0xed, 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10,
-	0x13, 0x03, 0xa0, 0xea, 0xf4, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40,
-	0xea, 0x93, 0xd8, 0xc0, 0x06, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x64, 0xd4, 0xb3, 0xa6,
-	0xb7, 0x00, 0x00, 0x00,
+func init() { file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() }
+func file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() {
+	if File_github_com_golang_protobuf_ptypes_empty_empty_proto != nil {
+		return
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   0,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes,
+		DependencyIndexes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs,
+	}.Build()
+	File_github_com_golang_protobuf_ptypes_empty_empty_proto = out.File
+	file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = nil
+	file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = nil
+	file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = nil
 }
diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto
deleted file mode 100644
index 03cacd2..0000000
--- a/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto
+++ /dev/null
@@ -1,52 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc.  All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-syntax = "proto3";
-
-package google.protobuf;
-
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
-option go_package = "github.com/golang/protobuf/ptypes/empty";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "EmptyProto";
-option java_multiple_files = true;
-option objc_class_prefix = "GPB";
-option cc_enable_arenas = true;
-
-// A generic empty message that you can re-use to avoid defining duplicated
-// empty messages in your APIs. A typical example is to use it as the request
-// or the response type of an API method. For instance:
-//
-//     service Foo {
-//       rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
-//     }
-//
-// The JSON representation for `Empty` is empty JSON object `{}`.
-message Empty {}
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
deleted file mode 100644
index 33daa73..0000000
--- a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
+++ /dev/null
@@ -1,336 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/struct.proto
-
-package structpb
-
-import (
-	fmt "fmt"
-	proto "github.com/golang/protobuf/proto"
-	math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-// `NullValue` is a singleton enumeration to represent the null value for the
-// `Value` type union.
-//
-//  The JSON representation for `NullValue` is JSON `null`.
-type NullValue int32
-
-const (
-	// Null value.
-	NullValue_NULL_VALUE NullValue = 0
-)
-
-var NullValue_name = map[int32]string{
-	0: "NULL_VALUE",
-}
-
-var NullValue_value = map[string]int32{
-	"NULL_VALUE": 0,
-}
-
-func (x NullValue) String() string {
-	return proto.EnumName(NullValue_name, int32(x))
-}
-
-func (NullValue) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_df322afd6c9fb402, []int{0}
-}
-
-func (NullValue) XXX_WellKnownType() string { return "NullValue" }
-
-// `Struct` represents a structured data value, consisting of fields
-// which map to dynamically typed values. In some languages, `Struct`
-// might be supported by a native representation. For example, in
-// scripting languages like JS a struct is represented as an
-// object. The details of that representation are described together
-// with the proto support for the language.
-//
-// The JSON representation for `Struct` is JSON object.
-type Struct struct {
-	// Unordered map of dynamically typed values.
-	Fields               map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
-	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
-	XXX_unrecognized     []byte            `json:"-"`
-	XXX_sizecache        int32             `json:"-"`
-}
-
-func (m *Struct) Reset()         { *m = Struct{} }
-func (m *Struct) String() string { return proto.CompactTextString(m) }
-func (*Struct) ProtoMessage()    {}
-func (*Struct) Descriptor() ([]byte, []int) {
-	return fileDescriptor_df322afd6c9fb402, []int{0}
-}
-
-func (*Struct) XXX_WellKnownType() string { return "Struct" }
-
-func (m *Struct) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Struct.Unmarshal(m, b)
-}
-func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Struct.Marshal(b, m, deterministic)
-}
-func (m *Struct) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Struct.Merge(m, src)
-}
-func (m *Struct) XXX_Size() int {
-	return xxx_messageInfo_Struct.Size(m)
-}
-func (m *Struct) XXX_DiscardUnknown() {
-	xxx_messageInfo_Struct.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Struct proto.InternalMessageInfo
-
-func (m *Struct) GetFields() map[string]*Value {
-	if m != nil {
-		return m.Fields
-	}
-	return nil
-}
-
-// `Value` represents a dynamically typed value which can be either
-// null, a number, a string, a boolean, a recursive struct value, or a
-// list of values. A producer of value is expected to set one of that
-// variants, absence of any variant indicates an error.
-//
-// The JSON representation for `Value` is JSON value.
-type Value struct {
-	// The kind of value.
-	//
-	// Types that are valid to be assigned to Kind:
-	//	*Value_NullValue
-	//	*Value_NumberValue
-	//	*Value_StringValue
-	//	*Value_BoolValue
-	//	*Value_StructValue
-	//	*Value_ListValue
-	Kind                 isValue_Kind `protobuf_oneof:"kind"`
-	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
-	XXX_unrecognized     []byte       `json:"-"`
-	XXX_sizecache        int32        `json:"-"`
-}
-
-func (m *Value) Reset()         { *m = Value{} }
-func (m *Value) String() string { return proto.CompactTextString(m) }
-func (*Value) ProtoMessage()    {}
-func (*Value) Descriptor() ([]byte, []int) {
-	return fileDescriptor_df322afd6c9fb402, []int{1}
-}
-
-func (*Value) XXX_WellKnownType() string { return "Value" }
-
-func (m *Value) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Value.Unmarshal(m, b)
-}
-func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Value.Marshal(b, m, deterministic)
-}
-func (m *Value) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Value.Merge(m, src)
-}
-func (m *Value) XXX_Size() int {
-	return xxx_messageInfo_Value.Size(m)
-}
-func (m *Value) XXX_DiscardUnknown() {
-	xxx_messageInfo_Value.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Value proto.InternalMessageInfo
-
-type isValue_Kind interface {
-	isValue_Kind()
-}
-
-type Value_NullValue struct {
-	NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
-}
-
-type Value_NumberValue struct {
-	NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"`
-}
-
-type Value_StringValue struct {
-	StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"`
-}
-
-type Value_BoolValue struct {
-	BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"`
-}
-
-type Value_StructValue struct {
-	StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"`
-}
-
-type Value_ListValue struct {
-	ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"`
-}
-
-func (*Value_NullValue) isValue_Kind() {}
-
-func (*Value_NumberValue) isValue_Kind() {}
-
-func (*Value_StringValue) isValue_Kind() {}
-
-func (*Value_BoolValue) isValue_Kind() {}
-
-func (*Value_StructValue) isValue_Kind() {}
-
-func (*Value_ListValue) isValue_Kind() {}
-
-func (m *Value) GetKind() isValue_Kind {
-	if m != nil {
-		return m.Kind
-	}
-	return nil
-}
-
-func (m *Value) GetNullValue() NullValue {
-	if x, ok := m.GetKind().(*Value_NullValue); ok {
-		return x.NullValue
-	}
-	return NullValue_NULL_VALUE
-}
-
-func (m *Value) GetNumberValue() float64 {
-	if x, ok := m.GetKind().(*Value_NumberValue); ok {
-		return x.NumberValue
-	}
-	return 0
-}
-
-func (m *Value) GetStringValue() string {
-	if x, ok := m.GetKind().(*Value_StringValue); ok {
-		return x.StringValue
-	}
-	return ""
-}
-
-func (m *Value) GetBoolValue() bool {
-	if x, ok := m.GetKind().(*Value_BoolValue); ok {
-		return x.BoolValue
-	}
-	return false
-}
-
-func (m *Value) GetStructValue() *Struct {
-	if x, ok := m.GetKind().(*Value_StructValue); ok {
-		return x.StructValue
-	}
-	return nil
-}
-
-func (m *Value) GetListValue() *ListValue {
-	if x, ok := m.GetKind().(*Value_ListValue); ok {
-		return x.ListValue
-	}
-	return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*Value) XXX_OneofWrappers() []interface{} {
-	return []interface{}{
-		(*Value_NullValue)(nil),
-		(*Value_NumberValue)(nil),
-		(*Value_StringValue)(nil),
-		(*Value_BoolValue)(nil),
-		(*Value_StructValue)(nil),
-		(*Value_ListValue)(nil),
-	}
-}
-
-// `ListValue` is a wrapper around a repeated field of values.
-//
-// The JSON representation for `ListValue` is JSON array.
-type ListValue struct {
-	// Repeated field of dynamically typed values.
-	Values               []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *ListValue) Reset()         { *m = ListValue{} }
-func (m *ListValue) String() string { return proto.CompactTextString(m) }
-func (*ListValue) ProtoMessage()    {}
-func (*ListValue) Descriptor() ([]byte, []int) {
-	return fileDescriptor_df322afd6c9fb402, []int{2}
-}
-
-func (*ListValue) XXX_WellKnownType() string { return "ListValue" }
-
-func (m *ListValue) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_ListValue.Unmarshal(m, b)
-}
-func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_ListValue.Marshal(b, m, deterministic)
-}
-func (m *ListValue) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ListValue.Merge(m, src)
-}
-func (m *ListValue) XXX_Size() int {
-	return xxx_messageInfo_ListValue.Size(m)
-}
-func (m *ListValue) XXX_DiscardUnknown() {
-	xxx_messageInfo_ListValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ListValue proto.InternalMessageInfo
-
-func (m *ListValue) GetValues() []*Value {
-	if m != nil {
-		return m.Values
-	}
-	return nil
-}
-
-func init() {
-	proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value)
-	proto.RegisterType((*Struct)(nil), "google.protobuf.Struct")
-	proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry")
-	proto.RegisterType((*Value)(nil), "google.protobuf.Value")
-	proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue")
-}
-
-func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402) }
-
-var fileDescriptor_df322afd6c9fb402 = []byte{
-	// 417 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40,
-	0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09,
-	0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94,
-	0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa,
-	0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff,
-	0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc,
-	0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15,
-	0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d,
-	0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce,
-	0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39,
-	0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab,
-	0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84,
-	0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48,
-	0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f,
-	0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59,
-	0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a,
-	0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64,
-	0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92,
-	0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25,
-	0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37,
-	0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6,
-	0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4,
-	0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda,
-	0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9,
-	0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53,
-	0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00,
-	0x00,
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
deleted file mode 100644
index 7d7808e..0000000
--- a/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
+++ /dev/null
@@ -1,96 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc.  All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-syntax = "proto3";
-
-package google.protobuf;
-
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
-option cc_enable_arenas = true;
-option go_package = "github.com/golang/protobuf/ptypes/struct;structpb";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "StructProto";
-option java_multiple_files = true;
-option objc_class_prefix = "GPB";
-
-
-// `Struct` represents a structured data value, consisting of fields
-// which map to dynamically typed values. In some languages, `Struct`
-// might be supported by a native representation. For example, in
-// scripting languages like JS a struct is represented as an
-// object. The details of that representation are described together
-// with the proto support for the language.
-//
-// The JSON representation for `Struct` is JSON object.
-message Struct {
-  // Unordered map of dynamically typed values.
-  map<string, Value> fields = 1;
-}
-
-// `Value` represents a dynamically typed value which can be either
-// null, a number, a string, a boolean, a recursive struct value, or a
-// list of values. A producer of value is expected to set one of that
-// variants, absence of any variant indicates an error.
-//
-// The JSON representation for `Value` is JSON value.
-message Value {
-  // The kind of value.
-  oneof kind {
-    // Represents a null value.
-    NullValue null_value = 1;
-    // Represents a double value.
-    double number_value = 2;
-    // Represents a string value.
-    string string_value = 3;
-    // Represents a boolean value.
-    bool bool_value = 4;
-    // Represents a structured value.
-    Struct struct_value = 5;
-    // Represents a repeated `Value`.
-    ListValue list_value = 6;
-  }
-}
-
-// `NullValue` is a singleton enumeration to represent the null value for the
-// `Value` type union.
-//
-//  The JSON representation for `NullValue` is JSON `null`.
-enum NullValue {
-  // Null value.
-  NULL_VALUE = 0;
-}
-
-// `ListValue` is a wrapper around a repeated field of values.
-//
-// The JSON representation for `ListValue` is JSON array.
-message ListValue {
-  // Repeated field of dynamically typed values.
-  repeated Value values = 1;
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
index 8da0df0..8368a3f 100644
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
@@ -1,46 +1,18 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
 
 package ptypes
 
-// This file implements operations on google.protobuf.Timestamp.
-
 import (
 	"errors"
 	"fmt"
 	"time"
 
-	tspb "github.com/golang/protobuf/ptypes/timestamp"
+	timestamppb "github.com/golang/protobuf/ptypes/timestamp"
 )
 
+// Range of google.protobuf.Duration as specified in timestamp.proto.
 const (
 	// Seconds field of the earliest valid Timestamp.
 	// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
@@ -50,17 +22,80 @@
 	maxValidSeconds = 253402300800
 )
 
+// Timestamp converts a timestamppb.Timestamp to a time.Time.
+// It returns an error if the argument is invalid.
+//
+// Unlike most Go functions, if Timestamp returns an error, the first return
+// value is not the zero time.Time. Instead, it is the value obtained from the
+// time.Unix function when passed the contents of the Timestamp, in the UTC
+// locale. This may or may not be a meaningful time; many invalid Timestamps
+// do map to valid time.Times.
+//
+// A nil Timestamp returns an error. The first return value in that case is
+// undefined.
+//
+// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead.
+func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
+	// Don't return the zero value on error, because corresponds to a valid
+	// timestamp. Instead return whatever time.Unix gives us.
+	var t time.Time
+	if ts == nil {
+		t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
+	} else {
+		t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
+	}
+	return t, validateTimestamp(ts)
+}
+
+// TimestampNow returns a google.protobuf.Timestamp for the current time.
+//
+// Deprecated: Call the timestamppb.Now function instead.
+func TimestampNow() *timestamppb.Timestamp {
+	ts, err := TimestampProto(time.Now())
+	if err != nil {
+		panic("ptypes: time.Now() out of Timestamp range")
+	}
+	return ts
+}
+
+// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
+// It returns an error if the resulting Timestamp is invalid.
+//
+// Deprecated: Call the timestamppb.New function instead.
+func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
+	ts := &timestamppb.Timestamp{
+		Seconds: t.Unix(),
+		Nanos:   int32(t.Nanosecond()),
+	}
+	if err := validateTimestamp(ts); err != nil {
+		return nil, err
+	}
+	return ts, nil
+}
+
+// TimestampString returns the RFC 3339 string for valid Timestamps.
+// For invalid Timestamps, it returns an error message in parentheses.
+//
+// Deprecated: Call the ts.AsTime method instead,
+// followed by a call to the Format method on the time.Time value.
+func TimestampString(ts *timestamppb.Timestamp) string {
+	t, err := Timestamp(ts)
+	if err != nil {
+		return fmt.Sprintf("(%v)", err)
+	}
+	return t.Format(time.RFC3339Nano)
+}
+
 // validateTimestamp determines whether a Timestamp is valid.
-// A valid timestamp represents a time in the range
-// [0001-01-01, 10000-01-01) and has a Nanos field
-// in the range [0, 1e9).
+// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
+// and has a Nanos field in the range [0, 1e9).
 //
 // If the Timestamp is valid, validateTimestamp returns nil.
-// Otherwise, it returns an error that describes
-// the problem.
+// Otherwise, it returns an error that describes the problem.
 //
-// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
-func validateTimestamp(ts *tspb.Timestamp) error {
+// Every valid Timestamp can be represented by a time.Time,
+// but the converse is not true.
+func validateTimestamp(ts *timestamppb.Timestamp) error {
 	if ts == nil {
 		return errors.New("timestamp: nil Timestamp")
 	}
@@ -75,58 +110,3 @@
 	}
 	return nil
 }
-
-// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
-// It returns an error if the argument is invalid.
-//
-// Unlike most Go functions, if Timestamp returns an error, the first return value
-// is not the zero time.Time. Instead, it is the value obtained from the
-// time.Unix function when passed the contents of the Timestamp, in the UTC
-// locale. This may or may not be a meaningful time; many invalid Timestamps
-// do map to valid time.Times.
-//
-// A nil Timestamp returns an error. The first return value in that case is
-// undefined.
-func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
-	// Don't return the zero value on error, because corresponds to a valid
-	// timestamp. Instead return whatever time.Unix gives us.
-	var t time.Time
-	if ts == nil {
-		t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
-	} else {
-		t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
-	}
-	return t, validateTimestamp(ts)
-}
-
-// TimestampNow returns a google.protobuf.Timestamp for the current time.
-func TimestampNow() *tspb.Timestamp {
-	ts, err := TimestampProto(time.Now())
-	if err != nil {
-		panic("ptypes: time.Now() out of Timestamp range")
-	}
-	return ts
-}
-
-// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
-// It returns an error if the resulting Timestamp is invalid.
-func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
-	ts := &tspb.Timestamp{
-		Seconds: t.Unix(),
-		Nanos:   int32(t.Nanosecond()),
-	}
-	if err := validateTimestamp(ts); err != nil {
-		return nil, err
-	}
-	return ts, nil
-}
-
-// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
-// Timestamps, it returns an error message in parentheses.
-func TimestampString(ts *tspb.Timestamp) string {
-	t, err := Timestamp(ts)
-	if err != nil {
-		return fmt.Sprintf("(%v)", err)
-	}
-	return t.Format(time.RFC3339Nano)
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
index 31cd846..a76f807 100644
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
@@ -1,179 +1,64 @@
 // Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/timestamp.proto
+// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
 
 package timestamp
 
 import (
-	fmt "fmt"
-	proto "github.com/golang/protobuf/proto"
-	math "math"
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+	reflect "reflect"
 )
 
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+// Symbols defined in public import of google/protobuf/timestamp.proto.
 
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+type Timestamp = timestamppb.Timestamp
 
-// A Timestamp represents a point in time independent of any time zone
-// or calendar, represented as seconds and fractions of seconds at
-// nanosecond resolution in UTC Epoch time. It is encoded using the
-// Proleptic Gregorian Calendar which extends the Gregorian calendar
-// backwards to year one. It is encoded assuming all minutes are 60
-// seconds long, i.e. leap seconds are "smeared" so that no leap second
-// table is needed for interpretation. Range is from
-// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
-// By restricting to that range, we ensure that we can convert to
-// and from  RFC 3339 date strings.
-// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
-//
-// # Examples
-//
-// Example 1: Compute Timestamp from POSIX `time()`.
-//
-//     Timestamp timestamp;
-//     timestamp.set_seconds(time(NULL));
-//     timestamp.set_nanos(0);
-//
-// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
-//
-//     struct timeval tv;
-//     gettimeofday(&tv, NULL);
-//
-//     Timestamp timestamp;
-//     timestamp.set_seconds(tv.tv_sec);
-//     timestamp.set_nanos(tv.tv_usec * 1000);
-//
-// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
-//
-//     FILETIME ft;
-//     GetSystemTimeAsFileTime(&ft);
-//     UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
-//
-//     // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
-//     // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
-//     Timestamp timestamp;
-//     timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
-//     timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
-//
-// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
-//
-//     long millis = System.currentTimeMillis();
-//
-//     Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
-//         .setNanos((int) ((millis % 1000) * 1000000)).build();
-//
-//
-// Example 5: Compute Timestamp from current time in Python.
-//
-//     timestamp = Timestamp()
-//     timestamp.GetCurrentTime()
-//
-// # JSON Mapping
-//
-// In JSON format, the Timestamp type is encoded as a string in the
-// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
-// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
-// where {year} is always expressed using four digits while {month}, {day},
-// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
-// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
-// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
-// is required. A proto3 JSON serializer should always use UTC (as indicated by
-// "Z") when printing the Timestamp type and a proto3 JSON parser should be
-// able to accept both UTC and other timezones (as indicated by an offset).
-//
-// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
-// 01:30 UTC on January 15, 2017.
-//
-// In JavaScript, one can convert a Date object to this format using the
-// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
-// method. In Python, a standard `datetime.datetime` object can be converted
-// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
-// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
-// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
-// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
-// ) to obtain a formatter capable of generating timestamps in this format.
-//
-//
-type Timestamp struct {
-	// Represents seconds of UTC time since Unix epoch
-	// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
-	// 9999-12-31T23:59:59Z inclusive.
-	Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
-	// Non-negative fractions of a second at nanosecond resolution. Negative
-	// second values with fractions must still have non-negative nanos values
-	// that count forward in time. Must be from 0 to 999,999,999
-	// inclusive.
-	Nanos                int32    `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
+var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor
+
+var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{
+	0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+	0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69,
+	0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
+	0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37,
+	0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+	0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69,
+	0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x33,
 }
 
-func (m *Timestamp) Reset()         { *m = Timestamp{} }
-func (m *Timestamp) String() string { return proto.CompactTextString(m) }
-func (*Timestamp) ProtoMessage()    {}
-func (*Timestamp) Descriptor() ([]byte, []int) {
-	return fileDescriptor_292007bbfe81227e, []int{0}
+var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{
+	0, // [0:0] is the sub-list for method output_type
+	0, // [0:0] is the sub-list for method input_type
+	0, // [0:0] is the sub-list for extension type_name
+	0, // [0:0] is the sub-list for extension extendee
+	0, // [0:0] is the sub-list for field type_name
 }
 
-func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
-
-func (m *Timestamp) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Timestamp.Unmarshal(m, b)
-}
-func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
-}
-func (m *Timestamp) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Timestamp.Merge(m, src)
-}
-func (m *Timestamp) XXX_Size() int {
-	return xxx_messageInfo_Timestamp.Size(m)
-}
-func (m *Timestamp) XXX_DiscardUnknown() {
-	xxx_messageInfo_Timestamp.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Timestamp proto.InternalMessageInfo
-
-func (m *Timestamp) GetSeconds() int64 {
-	if m != nil {
-		return m.Seconds
+func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() }
+func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() {
+	if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil {
+		return
 	}
-	return 0
-}
-
-func (m *Timestamp) GetNanos() int32 {
-	if m != nil {
-		return m.Nanos
-	}
-	return 0
-}
-
-func init() {
-	proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
-}
-
-func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) }
-
-var fileDescriptor_292007bbfe81227e = []byte{
-	// 191 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
-	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
-	0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
-	0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
-	0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
-	0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70,
-	0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51,
-	0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89,
-	0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71,
-	0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a,
-	0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43,
-	0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00,
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   0,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes,
+		DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs,
+	}.Build()
+	File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File
+	file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil
+	file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil
+	file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil
 }
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
deleted file mode 100644
index eafb3fa..0000000
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
+++ /dev/null
@@ -1,135 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc.  All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-syntax = "proto3";
-
-package google.protobuf;
-
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
-option cc_enable_arenas = true;
-option go_package = "github.com/golang/protobuf/ptypes/timestamp";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "TimestampProto";
-option java_multiple_files = true;
-option objc_class_prefix = "GPB";
-
-// A Timestamp represents a point in time independent of any time zone
-// or calendar, represented as seconds and fractions of seconds at
-// nanosecond resolution in UTC Epoch time. It is encoded using the
-// Proleptic Gregorian Calendar which extends the Gregorian calendar
-// backwards to year one. It is encoded assuming all minutes are 60
-// seconds long, i.e. leap seconds are "smeared" so that no leap second
-// table is needed for interpretation. Range is from
-// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
-// By restricting to that range, we ensure that we can convert to
-// and from  RFC 3339 date strings.
-// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
-//
-// # Examples
-//
-// Example 1: Compute Timestamp from POSIX `time()`.
-//
-//     Timestamp timestamp;
-//     timestamp.set_seconds(time(NULL));
-//     timestamp.set_nanos(0);
-//
-// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
-//
-//     struct timeval tv;
-//     gettimeofday(&tv, NULL);
-//
-//     Timestamp timestamp;
-//     timestamp.set_seconds(tv.tv_sec);
-//     timestamp.set_nanos(tv.tv_usec * 1000);
-//
-// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
-//
-//     FILETIME ft;
-//     GetSystemTimeAsFileTime(&ft);
-//     UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
-//
-//     // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
-//     // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
-//     Timestamp timestamp;
-//     timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
-//     timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
-//
-// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
-//
-//     long millis = System.currentTimeMillis();
-//
-//     Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
-//         .setNanos((int) ((millis % 1000) * 1000000)).build();
-//
-//
-// Example 5: Compute Timestamp from current time in Python.
-//
-//     timestamp = Timestamp()
-//     timestamp.GetCurrentTime()
-//
-// # JSON Mapping
-//
-// In JSON format, the Timestamp type is encoded as a string in the
-// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
-// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
-// where {year} is always expressed using four digits while {month}, {day},
-// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
-// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
-// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
-// is required. A proto3 JSON serializer should always use UTC (as indicated by
-// "Z") when printing the Timestamp type and a proto3 JSON parser should be
-// able to accept both UTC and other timezones (as indicated by an offset).
-//
-// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
-// 01:30 UTC on January 15, 2017.
-//
-// In JavaScript, one can convert a Date object to this format using the
-// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
-// method. In Python, a standard `datetime.datetime` object can be converted
-// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
-// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
-// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
-// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
-// ) to obtain a formatter capable of generating timestamps in this format.
-//
-//
-message Timestamp {
-
-  // Represents seconds of UTC time since Unix epoch
-  // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
-  // 9999-12-31T23:59:59Z inclusive.
-  int64 seconds = 1;
-
-  // Non-negative fractions of a second at nanosecond resolution. Negative
-  // second values with fractions must still have non-negative nanos values
-  // that count forward in time. Must be from 0 to 999,999,999
-  // inclusive.
-  int32 nanos = 2;
-}
diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS
index bcfa195..203e84e 100644
--- a/vendor/github.com/golang/snappy/AUTHORS
+++ b/vendor/github.com/golang/snappy/AUTHORS
@@ -8,8 +8,10 @@
 
 # Please keep the list sorted.
 
+Amazon.com, Inc
 Damian Gryski <dgryski@gmail.com>
 Google Inc.
 Jan Mercl <0xjnml@gmail.com>
+Klaus Post <klauspost@gmail.com>
 Rodolfo Carvalho <rhcarvalho@gmail.com>
 Sebastien Binet <seb.binet@gmail.com>
diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS
index 931ae31..d991473 100644
--- a/vendor/github.com/golang/snappy/CONTRIBUTORS
+++ b/vendor/github.com/golang/snappy/CONTRIBUTORS
@@ -28,7 +28,9 @@
 
 Damian Gryski <dgryski@gmail.com>
 Jan Mercl <0xjnml@gmail.com>
+Jonathan Swinney <jswinney@amazon.com>
 Kai Backman <kaib@golang.org>
+Klaus Post <klauspost@gmail.com>
 Marc-Antoine Ruel <maruel@chromium.org>
 Nigel Tao <nigeltao@golang.org>
 Rob Pike <r@golang.org>
diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go
index 72efb03..f1e04b1 100644
--- a/vendor/github.com/golang/snappy/decode.go
+++ b/vendor/github.com/golang/snappy/decode.go
@@ -52,6 +52,8 @@
 // Otherwise, a newly allocated slice will be returned.
 //
 // The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// Decode handles the Snappy block format, not the Snappy stream format.
 func Decode(dst, src []byte) ([]byte, error) {
 	dLen, s, err := decodedLen(src)
 	if err != nil {
@@ -83,6 +85,8 @@
 }
 
 // Reader is an io.Reader that can read Snappy-compressed bytes.
+//
+// Reader handles the Snappy stream format, not the Snappy block format.
 type Reader struct {
 	r       io.Reader
 	err     error
diff --git a/vendor/github.com/golang/snappy/decode_arm64.s b/vendor/github.com/golang/snappy/decode_arm64.s
new file mode 100644
index 0000000..7a3ead1
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_arm64.s
@@ -0,0 +1,494 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The asm code generally follows the pure Go code in decode_other.go, except
+// where marked with a "!!!".
+
+// func decode(dst, src []byte) int
+//
+// All local variables fit into registers. The non-zero stack size is only to
+// spill registers and push args when issuing a CALL. The register allocation:
+//	- R2	scratch
+//	- R3	scratch
+//	- R4	length or x
+//	- R5	offset
+//	- R6	&src[s]
+//	- R7	&dst[d]
+//	+ R8	dst_base
+//	+ R9	dst_len
+//	+ R10	dst_base + dst_len
+//	+ R11	src_base
+//	+ R12	src_len
+//	+ R13	src_base + src_len
+//	- R14	used by doCopy
+//	- R15	used by doCopy
+//
+// The registers R8-R13 (marked with a "+") are set at the start of the
+// function, and after a CALL returns, and are not otherwise modified.
+//
+// The d variable is implicitly R7 - R8,  and len(dst)-d is R10 - R7.
+// The s variable is implicitly R6 - R11, and len(src)-s is R13 - R6.
+TEXT ·decode(SB), NOSPLIT, $56-56
+	// Initialize R6, R7 and R8-R13.
+	MOVD dst_base+0(FP), R8
+	MOVD dst_len+8(FP), R9
+	MOVD R8, R7
+	MOVD R8, R10
+	ADD  R9, R10, R10
+	MOVD src_base+24(FP), R11
+	MOVD src_len+32(FP), R12
+	MOVD R11, R6
+	MOVD R11, R13
+	ADD  R12, R13, R13
+
+loop:
+	// for s < len(src)
+	CMP R13, R6
+	BEQ end
+
+	// R4 = uint32(src[s])
+	//
+	// switch src[s] & 0x03
+	MOVBU (R6), R4
+	MOVW  R4, R3
+	ANDW  $3, R3
+	MOVW  $1, R1
+	CMPW  R1, R3
+	BGE   tagCopy
+
+	// ----------------------------------------
+	// The code below handles literal tags.
+
+	// case tagLiteral:
+	// x := uint32(src[s] >> 2)
+	// switch
+	MOVW $60, R1
+	LSRW $2, R4, R4
+	CMPW R4, R1
+	BLS  tagLit60Plus
+
+	// case x < 60:
+	// s++
+	ADD $1, R6, R6
+
+doLit:
+	// This is the end of the inner "switch", when we have a literal tag.
+	//
+	// We assume that R4 == x and x fits in a uint32, where x is the variable
+	// used in the pure Go decode_other.go code.
+
+	// length = int(x) + 1
+	//
+	// Unlike the pure Go code, we don't need to check if length <= 0 because
+	// R4 can hold 64 bits, so the increment cannot overflow.
+	ADD $1, R4, R4
+
+	// Prepare to check if copying length bytes will run past the end of dst or
+	// src.
+	//
+	// R2 = len(dst) - d
+	// R3 = len(src) - s
+	MOVD R10, R2
+	SUB  R7, R2, R2
+	MOVD R13, R3
+	SUB  R6, R3, R3
+
+	// !!! Try a faster technique for short (16 or fewer bytes) copies.
+	//
+	// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
+	//   goto callMemmove // Fall back on calling runtime·memmove.
+	// }
+	//
+	// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
+	// against 21 instead of 16, because it cannot assume that all of its input
+	// is contiguous in memory and so it needs to leave enough source bytes to
+	// read the next tag without refilling buffers, but Go's Decode assumes
+	// contiguousness (the src argument is a []byte).
+	CMP $16, R4
+	BGT callMemmove
+	CMP $16, R2
+	BLT callMemmove
+	CMP $16, R3
+	BLT callMemmove
+
+	// !!! Implement the copy from src to dst as a 16-byte load and store.
+	// (Decode's documentation says that dst and src must not overlap.)
+	//
+	// This always copies 16 bytes, instead of only length bytes, but that's
+	// OK. If the input is a valid Snappy encoding then subsequent iterations
+	// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
+	// non-nil error), so the overrun will be ignored.
+	//
+	// Note that on arm64, it is legal and cheap to issue unaligned 8-byte or
+	// 16-byte loads and stores. This technique probably wouldn't be as
+	// effective on architectures that are fussier about alignment.
+	LDP 0(R6), (R14, R15)
+	STP (R14, R15), 0(R7)
+
+	// d += length
+	// s += length
+	ADD R4, R7, R7
+	ADD R4, R6, R6
+	B   loop
+
+callMemmove:
+	// if length > len(dst)-d || length > len(src)-s { etc }
+	CMP R2, R4
+	BGT errCorrupt
+	CMP R3, R4
+	BGT errCorrupt
+
+	// copy(dst[d:], src[s:s+length])
+	//
+	// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
+	// R7, R6 and R4 as arguments. Coincidentally, we also need to spill those
+	// three registers to the stack, to save local variables across the CALL.
+	MOVD R7, 8(RSP)
+	MOVD R6, 16(RSP)
+	MOVD R4, 24(RSP)
+	MOVD R7, 32(RSP)
+	MOVD R6, 40(RSP)
+	MOVD R4, 48(RSP)
+	CALL runtime·memmove(SB)
+
+	// Restore local variables: unspill registers from the stack and
+	// re-calculate R8-R13.
+	MOVD 32(RSP), R7
+	MOVD 40(RSP), R6
+	MOVD 48(RSP), R4
+	MOVD dst_base+0(FP), R8
+	MOVD dst_len+8(FP), R9
+	MOVD R8, R10
+	ADD  R9, R10, R10
+	MOVD src_base+24(FP), R11
+	MOVD src_len+32(FP), R12
+	MOVD R11, R13
+	ADD  R12, R13, R13
+
+	// d += length
+	// s += length
+	ADD R4, R7, R7
+	ADD R4, R6, R6
+	B   loop
+
+tagLit60Plus:
+	// !!! This fragment does the
+	//
+	// s += x - 58; if uint(s) > uint(len(src)) { etc }
+	//
+	// checks. In the asm version, we code it once instead of once per switch case.
+	ADD  R4, R6, R6
+	SUB  $58, R6, R6
+	MOVD R6, R3
+	SUB  R11, R3, R3
+	CMP  R12, R3
+	BGT  errCorrupt
+
+	// case x == 60:
+	MOVW $61, R1
+	CMPW R1, R4
+	BEQ  tagLit61
+	BGT  tagLit62Plus
+
+	// x = uint32(src[s-1])
+	MOVBU -1(R6), R4
+	B     doLit
+
+tagLit61:
+	// case x == 61:
+	// x = uint32(src[s-2]) | uint32(src[s-1])<<8
+	MOVHU -2(R6), R4
+	B     doLit
+
+tagLit62Plus:
+	CMPW $62, R4
+	BHI  tagLit63
+
+	// case x == 62:
+	// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+	MOVHU -3(R6), R4
+	MOVBU -1(R6), R3
+	ORR   R3<<16, R4
+	B     doLit
+
+tagLit63:
+	// case x == 63:
+	// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+	MOVWU -4(R6), R4
+	B     doLit
+
+	// The code above handles literal tags.
+	// ----------------------------------------
+	// The code below handles copy tags.
+
+tagCopy4:
+	// case tagCopy4:
+	// s += 5
+	ADD $5, R6, R6
+
+	// if uint(s) > uint(len(src)) { etc }
+	MOVD R6, R3
+	SUB  R11, R3, R3
+	CMP  R12, R3
+	BGT  errCorrupt
+
+	// length = 1 + int(src[s-5])>>2
+	MOVD $1, R1
+	ADD  R4>>2, R1, R4
+
+	// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+	MOVWU -4(R6), R5
+	B     doCopy
+
+tagCopy2:
+	// case tagCopy2:
+	// s += 3
+	ADD $3, R6, R6
+
+	// if uint(s) > uint(len(src)) { etc }
+	MOVD R6, R3
+	SUB  R11, R3, R3
+	CMP  R12, R3
+	BGT  errCorrupt
+
+	// length = 1 + int(src[s-3])>>2
+	MOVD $1, R1
+	ADD  R4>>2, R1, R4
+
+	// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+	MOVHU -2(R6), R5
+	B     doCopy
+
+tagCopy:
+	// We have a copy tag. We assume that:
+	//	- R3 == src[s] & 0x03
+	//	- R4 == src[s]
+	CMP $2, R3
+	BEQ tagCopy2
+	BGT tagCopy4
+
+	// case tagCopy1:
+	// s += 2
+	ADD $2, R6, R6
+
+	// if uint(s) > uint(len(src)) { etc }
+	MOVD R6, R3
+	SUB  R11, R3, R3
+	CMP  R12, R3
+	BGT  errCorrupt
+
+	// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+	MOVD  R4, R5
+	AND   $0xe0, R5
+	MOVBU -1(R6), R3
+	ORR   R5<<3, R3, R5
+
+	// length = 4 + int(src[s-2])>>2&0x7
+	MOVD $7, R1
+	AND  R4>>2, R1, R4
+	ADD  $4, R4, R4
+
+doCopy:
+	// This is the end of the outer "switch", when we have a copy tag.
+	//
+	// We assume that:
+	//	- R4 == length && R4 > 0
+	//	- R5 == offset
+
+	// if offset <= 0 { etc }
+	MOVD $0, R1
+	CMP  R1, R5
+	BLE  errCorrupt
+
+	// if d < offset { etc }
+	MOVD R7, R3
+	SUB  R8, R3, R3
+	CMP  R5, R3
+	BLT  errCorrupt
+
+	// if length > len(dst)-d { etc }
+	MOVD R10, R3
+	SUB  R7, R3, R3
+	CMP  R3, R4
+	BGT  errCorrupt
+
+	// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
+	//
+	// Set:
+	//	- R14 = len(dst)-d
+	//	- R15 = &dst[d-offset]
+	MOVD R10, R14
+	SUB  R7, R14, R14
+	MOVD R7, R15
+	SUB  R5, R15, R15
+
+	// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
+	//
+	// First, try using two 8-byte load/stores, similar to the doLit technique
+	// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
+	// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
+	// and not one 16-byte load/store, and the first store has to be before the
+	// second load, due to the overlap if offset is in the range [8, 16).
+	//
+	// if length > 16 || offset < 8 || len(dst)-d < 16 {
+	//   goto slowForwardCopy
+	// }
+	// copy 16 bytes
+	// d += length
+	CMP  $16, R4
+	BGT  slowForwardCopy
+	CMP  $8, R5
+	BLT  slowForwardCopy
+	CMP  $16, R14
+	BLT  slowForwardCopy
+	MOVD 0(R15), R2
+	MOVD R2, 0(R7)
+	MOVD 8(R15), R3
+	MOVD R3, 8(R7)
+	ADD  R4, R7, R7
+	B    loop
+
+slowForwardCopy:
+	// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
+	// can still try 8-byte load stores, provided we can overrun up to 10 extra
+	// bytes. As above, the overrun will be fixed up by subsequent iterations
+	// of the outermost loop.
+	//
+	// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
+	// commentary says:
+	//
+	// ----
+	//
+	// The main part of this loop is a simple copy of eight bytes at a time
+	// until we've copied (at least) the requested amount of bytes.  However,
+	// if d and d-offset are less than eight bytes apart (indicating a
+	// repeating pattern of length < 8), we first need to expand the pattern in
+	// order to get the correct results. For instance, if the buffer looks like
+	// this, with the eight-byte <d-offset> and <d> patterns marked as
+	// intervals:
+	//
+	//    abxxxxxxxxxxxx
+	//    [------]           d-offset
+	//      [------]         d
+	//
+	// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
+	// once, after which we can move <d> two bytes without moving <d-offset>:
+	//
+	//    ababxxxxxxxxxx
+	//    [------]           d-offset
+	//        [------]       d
+	//
+	// and repeat the exercise until the two no longer overlap.
+	//
+	// This allows us to do very well in the special case of one single byte
+	// repeated many times, without taking a big hit for more general cases.
+	//
+	// The worst case of extra writing past the end of the match occurs when
+	// offset == 1 and length == 1; the last copy will read from byte positions
+	// [0..7] and write to [4..11], whereas it was only supposed to write to
+	// position 1. Thus, ten excess bytes.
+	//
+	// ----
+	//
+	// That "10 byte overrun" worst case is confirmed by Go's
+	// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
+	// and finishSlowForwardCopy algorithm.
+	//
+	// if length > len(dst)-d-10 {
+	//   goto verySlowForwardCopy
+	// }
+	SUB $10, R14, R14
+	CMP R14, R4
+	BGT verySlowForwardCopy
+
+makeOffsetAtLeast8:
+	// !!! As above, expand the pattern so that offset >= 8 and we can use
+	// 8-byte load/stores.
+	//
+	// for offset < 8 {
+	//   copy 8 bytes from dst[d-offset:] to dst[d:]
+	//   length -= offset
+	//   d      += offset
+	//   offset += offset
+	//   // The two previous lines together means that d-offset, and therefore
+	//   // R15, is unchanged.
+	// }
+	CMP  $8, R5
+	BGE  fixUpSlowForwardCopy
+	MOVD (R15), R3
+	MOVD R3, (R7)
+	SUB  R5, R4, R4
+	ADD  R5, R7, R7
+	ADD  R5, R5, R5
+	B    makeOffsetAtLeast8
+
+fixUpSlowForwardCopy:
+	// !!! Add length (which might be negative now) to d (implied by R7 being
+	// &dst[d]) so that d ends up at the right place when we jump back to the
+	// top of the loop. Before we do that, though, we save R7 to R2 so that, if
+	// length is positive, copying the remaining length bytes will write to the
+	// right place.
+	MOVD R7, R2
+	ADD  R4, R7, R7
+
+finishSlowForwardCopy:
+	// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
+	// length means that we overrun, but as above, that will be fixed up by
+	// subsequent iterations of the outermost loop.
+	MOVD $0, R1
+	CMP  R1, R4
+	BLE  loop
+	MOVD (R15), R3
+	MOVD R3, (R2)
+	ADD  $8, R15, R15
+	ADD  $8, R2, R2
+	SUB  $8, R4, R4
+	B    finishSlowForwardCopy
+
+verySlowForwardCopy:
+	// verySlowForwardCopy is a simple implementation of forward copy. In C
+	// parlance, this is a do/while loop instead of a while loop, since we know
+	// that length > 0. In Go syntax:
+	//
+	// for {
+	//   dst[d] = dst[d - offset]
+	//   d++
+	//   length--
+	//   if length == 0 {
+	//     break
+	//   }
+	// }
+	MOVB (R15), R3
+	MOVB R3, (R7)
+	ADD  $1, R15, R15
+	ADD  $1, R7, R7
+	SUB  $1, R4, R4
+	CBNZ R4, verySlowForwardCopy
+	B    loop
+
+	// The code above handles copy tags.
+	// ----------------------------------------
+
+end:
+	// This is the end of the "for s < len(src)".
+	//
+	// if d != len(dst) { etc }
+	CMP R10, R7
+	BNE errCorrupt
+
+	// return 0
+	MOVD $0, ret+48(FP)
+	RET
+
+errCorrupt:
+	// return decodeErrCodeCorrupt
+	MOVD $1, R2
+	MOVD R2, ret+48(FP)
+	RET
diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_asm.go
similarity index 93%
rename from vendor/github.com/golang/snappy/decode_amd64.go
rename to vendor/github.com/golang/snappy/decode_asm.go
index fcd192b..7082b34 100644
--- a/vendor/github.com/golang/snappy/decode_amd64.go
+++ b/vendor/github.com/golang/snappy/decode_asm.go
@@ -5,6 +5,7 @@
 // +build !appengine
 // +build gc
 // +build !noasm
+// +build amd64 arm64
 
 package snappy
 
diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go
index 8c9f204..2f672be 100644
--- a/vendor/github.com/golang/snappy/decode_other.go
+++ b/vendor/github.com/golang/snappy/decode_other.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// +build !amd64 appengine !gc noasm
+// +build !amd64,!arm64 appengine !gc noasm
 
 package snappy
 
@@ -85,14 +85,28 @@
 		if offset <= 0 || d < offset || length > len(dst)-d {
 			return decodeErrCodeCorrupt
 		}
-		// Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
-		// the built-in copy function, this byte-by-byte copy always runs
+		// Copy from an earlier sub-slice of dst to a later sub-slice.
+		// If no overlap, use the built-in copy:
+		if offset >= length {
+			copy(dst[d:d+length], dst[d-offset:])
+			d += length
+			continue
+		}
+
+		// Unlike the built-in copy function, this byte-by-byte copy always runs
 		// forwards, even if the slices overlap. Conceptually, this is:
 		//
 		// d += forwardCopy(dst[d:d+length], dst[d-offset:])
-		for end := d + length; d != end; d++ {
-			dst[d] = dst[d-offset]
+		//
+		// We align the slices into a and b and show the compiler they are the same size.
+		// This allows the loop to run without bounds checks.
+		a := dst[d : d+length]
+		b := dst[d-offset:]
+		b = b[:len(a)]
+		for i := range a {
+			a[i] = b[i]
 		}
+		d += length
 	}
 	if d != len(dst) {
 		return decodeErrCodeCorrupt
diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go
index 8d393e9..7f23657 100644
--- a/vendor/github.com/golang/snappy/encode.go
+++ b/vendor/github.com/golang/snappy/encode.go
@@ -15,6 +15,8 @@
 // Otherwise, a newly allocated slice will be returned.
 //
 // The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// Encode handles the Snappy block format, not the Snappy stream format.
 func Encode(dst, src []byte) []byte {
 	if n := MaxEncodedLen(len(src)); n < 0 {
 		panic(ErrTooLarge)
@@ -139,6 +141,8 @@
 }
 
 // Writer is an io.Writer that can write Snappy-compressed bytes.
+//
+// Writer handles the Snappy stream format, not the Snappy block format.
 type Writer struct {
 	w   io.Writer
 	err error
diff --git a/vendor/github.com/golang/snappy/encode_arm64.s b/vendor/github.com/golang/snappy/encode_arm64.s
new file mode 100644
index 0000000..bf83667
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_arm64.s
@@ -0,0 +1,722 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The asm code generally follows the pure Go code in encode_other.go, except
+// where marked with a "!!!".
+
+// ----------------------------------------------------------------------------
+
+// func emitLiteral(dst, lit []byte) int
+//
+// All local variables fit into registers. The register allocation:
+//	- R3	len(lit)
+//	- R4	n
+//	- R6	return value
+//	- R8	&dst[i]
+//	- R10	&lit[0]
+//
+// The 32 bytes of stack space is to call runtime·memmove.
+//
+// The unusual register allocation of local variables, such as R10 for the
+// source pointer, matches the allocation used at the call site in encodeBlock,
+// which makes it easier to manually inline this function.
+TEXT ·emitLiteral(SB), NOSPLIT, $32-56
+	MOVD dst_base+0(FP), R8
+	MOVD lit_base+24(FP), R10
+	MOVD lit_len+32(FP), R3
+	MOVD R3, R6
+	MOVW R3, R4
+	SUBW $1, R4, R4
+
+	CMPW $60, R4
+	BLT  oneByte
+	CMPW $256, R4
+	BLT  twoBytes
+
+threeBytes:
+	MOVD $0xf4, R2
+	MOVB R2, 0(R8)
+	MOVW R4, 1(R8)
+	ADD  $3, R8, R8
+	ADD  $3, R6, R6
+	B    memmove
+
+twoBytes:
+	MOVD $0xf0, R2
+	MOVB R2, 0(R8)
+	MOVB R4, 1(R8)
+	ADD  $2, R8, R8
+	ADD  $2, R6, R6
+	B    memmove
+
+oneByte:
+	LSLW $2, R4, R4
+	MOVB R4, 0(R8)
+	ADD  $1, R8, R8
+	ADD  $1, R6, R6
+
+memmove:
+	MOVD R6, ret+48(FP)
+
+	// copy(dst[i:], lit)
+	//
+	// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+	// R8, R10 and R3 as arguments.
+	MOVD R8, 8(RSP)
+	MOVD R10, 16(RSP)
+	MOVD R3, 24(RSP)
+	CALL runtime·memmove(SB)
+	RET
+
+// ----------------------------------------------------------------------------
+
+// func emitCopy(dst []byte, offset, length int) int
+//
+// All local variables fit into registers. The register allocation:
+//	- R3	length
+//	- R7	&dst[0]
+//	- R8	&dst[i]
+//	- R11	offset
+//
+// The unusual register allocation of local variables, such as R11 for the
+// offset, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·emitCopy(SB), NOSPLIT, $0-48
+	MOVD dst_base+0(FP), R8
+	MOVD R8, R7
+	MOVD offset+24(FP), R11
+	MOVD length+32(FP), R3
+
+loop0:
+	// for length >= 68 { etc }
+	CMPW $68, R3
+	BLT  step1
+
+	// Emit a length 64 copy, encoded as 3 bytes.
+	MOVD $0xfe, R2
+	MOVB R2, 0(R8)
+	MOVW R11, 1(R8)
+	ADD  $3, R8, R8
+	SUB  $64, R3, R3
+	B    loop0
+
+step1:
+	// if length > 64 { etc }
+	CMP $64, R3
+	BLE step2
+
+	// Emit a length 60 copy, encoded as 3 bytes.
+	MOVD $0xee, R2
+	MOVB R2, 0(R8)
+	MOVW R11, 1(R8)
+	ADD  $3, R8, R8
+	SUB  $60, R3, R3
+
+step2:
+	// if length >= 12 || offset >= 2048 { goto step3 }
+	CMP  $12, R3
+	BGE  step3
+	CMPW $2048, R11
+	BGE  step3
+
+	// Emit the remaining copy, encoded as 2 bytes.
+	MOVB R11, 1(R8)
+	LSRW $3, R11, R11
+	AND  $0xe0, R11, R11
+	SUB  $4, R3, R3
+	LSLW $2, R3
+	AND  $0xff, R3, R3
+	ORRW R3, R11, R11
+	ORRW $1, R11, R11
+	MOVB R11, 0(R8)
+	ADD  $2, R8, R8
+
+	// Return the number of bytes written.
+	SUB  R7, R8, R8
+	MOVD R8, ret+40(FP)
+	RET
+
+step3:
+	// Emit the remaining copy, encoded as 3 bytes.
+	SUB  $1, R3, R3
+	AND  $0xff, R3, R3
+	LSLW $2, R3, R3
+	ORRW $2, R3, R3
+	MOVB R3, 0(R8)
+	MOVW R11, 1(R8)
+	ADD  $3, R8, R8
+
+	// Return the number of bytes written.
+	SUB  R7, R8, R8
+	MOVD R8, ret+40(FP)
+	RET
+
+// ----------------------------------------------------------------------------
+
+// func extendMatch(src []byte, i, j int) int
+//
+// All local variables fit into registers. The register allocation:
+//	- R6	&src[0]
+//	- R7	&src[j]
+//	- R13	&src[len(src) - 8]
+//	- R14	&src[len(src)]
+//	- R15	&src[i]
+//
+// The unusual register allocation of local variables, such as R15 for a source
+// pointer, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·extendMatch(SB), NOSPLIT, $0-48
+	MOVD src_base+0(FP), R6
+	MOVD src_len+8(FP), R14
+	MOVD i+24(FP), R15
+	MOVD j+32(FP), R7
+	ADD  R6, R14, R14
+	ADD  R6, R15, R15
+	ADD  R6, R7, R7
+	MOVD R14, R13
+	SUB  $8, R13, R13
+
+cmp8:
+	// As long as we are 8 or more bytes before the end of src, we can load and
+	// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+	CMP  R13, R7
+	BHI  cmp1
+	MOVD (R15), R3
+	MOVD (R7), R4
+	CMP  R4, R3
+	BNE  bsf
+	ADD  $8, R15, R15
+	ADD  $8, R7, R7
+	B    cmp8
+
+bsf:
+	// If those 8 bytes were not equal, XOR the two 8 byte values, and return
+	// the index of the first byte that differs.
+	// RBIT reverses the bit order, then CLZ counts the leading zeros, the
+	// combination of which finds the least significant bit which is set.
+	// The arm64 architecture is little-endian, and the shift by 3 converts
+	// a bit index to a byte index.
+	EOR  R3, R4, R4
+	RBIT R4, R4
+	CLZ  R4, R4
+	ADD  R4>>3, R7, R7
+
+	// Convert from &src[ret] to ret.
+	SUB  R6, R7, R7
+	MOVD R7, ret+40(FP)
+	RET
+
+cmp1:
+	// In src's tail, compare 1 byte at a time.
+	CMP  R7, R14
+	BLS  extendMatchEnd
+	MOVB (R15), R3
+	MOVB (R7), R4
+	CMP  R4, R3
+	BNE  extendMatchEnd
+	ADD  $1, R15, R15
+	ADD  $1, R7, R7
+	B    cmp1
+
+extendMatchEnd:
+	// Convert from &src[ret] to ret.
+	SUB  R6, R7, R7
+	MOVD R7, ret+40(FP)
+	RET
+
+// ----------------------------------------------------------------------------
+
+// func encodeBlock(dst, src []byte) (d int)
+//
+// All local variables fit into registers, other than "var table". The register
+// allocation:
+//	- R3	.	.
+//	- R4	.	.
+//	- R5	64	shift
+//	- R6	72	&src[0], tableSize
+//	- R7	80	&src[s]
+//	- R8	88	&dst[d]
+//	- R9	96	sLimit
+//	- R10	.	&src[nextEmit]
+//	- R11	104	prevHash, currHash, nextHash, offset
+//	- R12	112	&src[base], skip
+//	- R13	.	&src[nextS], &src[len(src) - 8]
+//	- R14	.	len(src), bytesBetweenHashLookups, &src[len(src)], x
+//	- R15	120	candidate
+//	- R16	.	hash constant, 0x1e35a7bd
+//	- R17	.	&table
+//	- .  	128	table
+//
+// The second column (64, 72, etc) is the stack offset to spill the registers
+// when calling other functions. We could pack this slightly tighter, but it's
+// simpler to have a dedicated spill map independent of the function called.
+//
+// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
+// extra 64 bytes, to call other functions, and an extra 64 bytes, to spill
+// local variables (registers) during calls gives 32768 + 64 + 64 = 32896.
+TEXT ·encodeBlock(SB), 0, $32896-56
+	MOVD dst_base+0(FP), R8
+	MOVD src_base+24(FP), R7
+	MOVD src_len+32(FP), R14
+
+	// shift, tableSize := uint32(32-8), 1<<8
+	MOVD  $24, R5
+	MOVD  $256, R6
+	MOVW  $0xa7bd, R16
+	MOVKW $(0x1e35<<16), R16
+
+calcShift:
+	// for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+	//	shift--
+	// }
+	MOVD $16384, R2
+	CMP  R2, R6
+	BGE  varTable
+	CMP  R14, R6
+	BGE  varTable
+	SUB  $1, R5, R5
+	LSL  $1, R6, R6
+	B    calcShift
+
+varTable:
+	// var table [maxTableSize]uint16
+	//
+	// In the asm code, unlike the Go code, we can zero-initialize only the
+	// first tableSize elements. Each uint16 element is 2 bytes and each
+	// iterations writes 64 bytes, so we can do only tableSize/32 writes
+	// instead of the 2048 writes that would zero-initialize all of table's
+	// 32768 bytes. This clear could overrun the first tableSize elements, but
+	// it won't overrun the allocated stack size.
+	ADD  $128, RSP, R17
+	MOVD R17, R4
+
+	// !!! R6 = &src[tableSize]
+	ADD R6<<1, R17, R6
+
+memclr:
+	STP.P (ZR, ZR), 64(R4)
+	STP   (ZR, ZR), -48(R4)
+	STP   (ZR, ZR), -32(R4)
+	STP   (ZR, ZR), -16(R4)
+	CMP   R4, R6
+	BHI   memclr
+
+	// !!! R6 = &src[0]
+	MOVD R7, R6
+
+	// sLimit := len(src) - inputMargin
+	MOVD R14, R9
+	SUB  $15, R9, R9
+
+	// !!! Pre-emptively spill R5, R6 and R9 to the stack. Their values don't
+	// change for the rest of the function.
+	MOVD R5, 64(RSP)
+	MOVD R6, 72(RSP)
+	MOVD R9, 96(RSP)
+
+	// nextEmit := 0
+	MOVD R6, R10
+
+	// s := 1
+	ADD $1, R7, R7
+
+	// nextHash := hash(load32(src, s), shift)
+	MOVW 0(R7), R11
+	MULW R16, R11, R11
+	LSRW R5, R11, R11
+
+outer:
+	// for { etc }
+
+	// skip := 32
+	MOVD $32, R12
+
+	// nextS := s
+	MOVD R7, R13
+
+	// candidate := 0
+	MOVD $0, R15
+
+inner0:
+	// for { etc }
+
+	// s := nextS
+	MOVD R13, R7
+
+	// bytesBetweenHashLookups := skip >> 5
+	MOVD R12, R14
+	LSR  $5, R14, R14
+
+	// nextS = s + bytesBetweenHashLookups
+	ADD R14, R13, R13
+
+	// skip += bytesBetweenHashLookups
+	ADD R14, R12, R12
+
+	// if nextS > sLimit { goto emitRemainder }
+	MOVD R13, R3
+	SUB  R6, R3, R3
+	CMP  R9, R3
+	BHI  emitRemainder
+
+	// candidate = int(table[nextHash])
+	MOVHU 0(R17)(R11<<1), R15
+
+	// table[nextHash] = uint16(s)
+	MOVD R7, R3
+	SUB  R6, R3, R3
+
+	MOVH R3, 0(R17)(R11<<1)
+
+	// nextHash = hash(load32(src, nextS), shift)
+	MOVW 0(R13), R11
+	MULW R16, R11
+	LSRW R5, R11, R11
+
+	// if load32(src, s) != load32(src, candidate) { continue } break
+	MOVW 0(R7), R3
+	MOVW (R6)(R15*1), R4
+	CMPW R4, R3
+	BNE  inner0
+
+fourByteMatch:
+	// As per the encode_other.go code:
+	//
+	// A 4-byte match has been found. We'll later see etc.
+
+	// !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
+	// on inputMargin in encode.go.
+	MOVD R7, R3
+	SUB  R10, R3, R3
+	CMP  $16, R3
+	BLE  emitLiteralFastPath
+
+	// ----------------------------------------
+	// Begin inline of the emitLiteral call.
+	//
+	// d += emitLiteral(dst[d:], src[nextEmit:s])
+
+	MOVW R3, R4
+	SUBW $1, R4, R4
+
+	MOVW $60, R2
+	CMPW R2, R4
+	BLT  inlineEmitLiteralOneByte
+	MOVW $256, R2
+	CMPW R2, R4
+	BLT  inlineEmitLiteralTwoBytes
+
+inlineEmitLiteralThreeBytes:
+	MOVD $0xf4, R1
+	MOVB R1, 0(R8)
+	MOVW R4, 1(R8)
+	ADD  $3, R8, R8
+	B    inlineEmitLiteralMemmove
+
+inlineEmitLiteralTwoBytes:
+	MOVD $0xf0, R1
+	MOVB R1, 0(R8)
+	MOVB R4, 1(R8)
+	ADD  $2, R8, R8
+	B    inlineEmitLiteralMemmove
+
+inlineEmitLiteralOneByte:
+	LSLW $2, R4, R4
+	MOVB R4, 0(R8)
+	ADD  $1, R8, R8
+
+inlineEmitLiteralMemmove:
+	// Spill local variables (registers) onto the stack; call; unspill.
+	//
+	// copy(dst[i:], lit)
+	//
+	// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+	// R8, R10 and R3 as arguments.
+	MOVD R8, 8(RSP)
+	MOVD R10, 16(RSP)
+	MOVD R3, 24(RSP)
+
+	// Finish the "d +=" part of "d += emitLiteral(etc)".
+	ADD   R3, R8, R8
+	MOVD  R7, 80(RSP)
+	MOVD  R8, 88(RSP)
+	MOVD  R15, 120(RSP)
+	CALL  runtime·memmove(SB)
+	MOVD  64(RSP), R5
+	MOVD  72(RSP), R6
+	MOVD  80(RSP), R7
+	MOVD  88(RSP), R8
+	MOVD  96(RSP), R9
+	MOVD  120(RSP), R15
+	ADD   $128, RSP, R17
+	MOVW  $0xa7bd, R16
+	MOVKW $(0x1e35<<16), R16
+	B     inner1
+
+inlineEmitLiteralEnd:
+	// End inline of the emitLiteral call.
+	// ----------------------------------------
+
+emitLiteralFastPath:
+	// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
+	MOVB R3, R4
+	SUBW $1, R4, R4
+	AND  $0xff, R4, R4
+	LSLW $2, R4, R4
+	MOVB R4, (R8)
+	ADD  $1, R8, R8
+
+	// !!! Implement the copy from lit to dst as a 16-byte load and store.
+	// (Encode's documentation says that dst and src must not overlap.)
+	//
+	// This always copies 16 bytes, instead of only len(lit) bytes, but that's
+	// OK. Subsequent iterations will fix up the overrun.
+	//
+	// Note that on arm64, it is legal and cheap to issue unaligned 8-byte or
+	// 16-byte loads and stores. This technique probably wouldn't be as
+	// effective on architectures that are fussier about alignment.
+	LDP 0(R10), (R0, R1)
+	STP (R0, R1), 0(R8)
+	ADD R3, R8, R8
+
+inner1:
+	// for { etc }
+
+	// base := s
+	MOVD R7, R12
+
+	// !!! offset := base - candidate
+	MOVD R12, R11
+	SUB  R15, R11, R11
+	SUB  R6, R11, R11
+
+	// ----------------------------------------
+	// Begin inline of the extendMatch call.
+	//
+	// s = extendMatch(src, candidate+4, s+4)
+
+	// !!! R14 = &src[len(src)]
+	MOVD src_len+32(FP), R14
+	ADD  R6, R14, R14
+
+	// !!! R13 = &src[len(src) - 8]
+	MOVD R14, R13
+	SUB  $8, R13, R13
+
+	// !!! R15 = &src[candidate + 4]
+	ADD $4, R15, R15
+	ADD R6, R15, R15
+
+	// !!! s += 4
+	ADD $4, R7, R7
+
+inlineExtendMatchCmp8:
+	// As long as we are 8 or more bytes before the end of src, we can load and
+	// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+	CMP  R13, R7
+	BHI  inlineExtendMatchCmp1
+	MOVD (R15), R3
+	MOVD (R7), R4
+	CMP  R4, R3
+	BNE  inlineExtendMatchBSF
+	ADD  $8, R15, R15
+	ADD  $8, R7, R7
+	B    inlineExtendMatchCmp8
+
+inlineExtendMatchBSF:
+	// If those 8 bytes were not equal, XOR the two 8 byte values, and return
+	// the index of the first byte that differs.
+	// RBIT reverses the bit order, then CLZ counts the leading zeros, the
+	// combination of which finds the least significant bit which is set.
+	// The arm64 architecture is little-endian, and the shift by 3 converts
+	// a bit index to a byte index.
+	EOR  R3, R4, R4
+	RBIT R4, R4
+	CLZ  R4, R4
+	ADD  R4>>3, R7, R7
+	B    inlineExtendMatchEnd
+
+inlineExtendMatchCmp1:
+	// In src's tail, compare 1 byte at a time.
+	CMP  R7, R14
+	BLS  inlineExtendMatchEnd
+	MOVB (R15), R3
+	MOVB (R7), R4
+	CMP  R4, R3
+	BNE  inlineExtendMatchEnd
+	ADD  $1, R15, R15
+	ADD  $1, R7, R7
+	B    inlineExtendMatchCmp1
+
+inlineExtendMatchEnd:
+	// End inline of the extendMatch call.
+	// ----------------------------------------
+
+	// ----------------------------------------
+	// Begin inline of the emitCopy call.
+	//
+	// d += emitCopy(dst[d:], base-candidate, s-base)
+
+	// !!! length := s - base
+	MOVD R7, R3
+	SUB  R12, R3, R3
+
+inlineEmitCopyLoop0:
+	// for length >= 68 { etc }
+	MOVW $68, R2
+	CMPW R2, R3
+	BLT  inlineEmitCopyStep1
+
+	// Emit a length 64 copy, encoded as 3 bytes.
+	MOVD $0xfe, R1
+	MOVB R1, 0(R8)
+	MOVW R11, 1(R8)
+	ADD  $3, R8, R8
+	SUBW $64, R3, R3
+	B    inlineEmitCopyLoop0
+
+inlineEmitCopyStep1:
+	// if length > 64 { etc }
+	MOVW $64, R2
+	CMPW R2, R3
+	BLE  inlineEmitCopyStep2
+
+	// Emit a length 60 copy, encoded as 3 bytes.
+	MOVD $0xee, R1
+	MOVB R1, 0(R8)
+	MOVW R11, 1(R8)
+	ADD  $3, R8, R8
+	SUBW $60, R3, R3
+
+inlineEmitCopyStep2:
+	// if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
+	MOVW $12, R2
+	CMPW R2, R3
+	BGE  inlineEmitCopyStep3
+	MOVW $2048, R2
+	CMPW R2, R11
+	BGE  inlineEmitCopyStep3
+
+	// Emit the remaining copy, encoded as 2 bytes.
+	MOVB R11, 1(R8)
+	LSRW $8, R11, R11
+	LSLW $5, R11, R11
+	SUBW $4, R3, R3
+	AND  $0xff, R3, R3
+	LSLW $2, R3, R3
+	ORRW R3, R11, R11
+	ORRW $1, R11, R11
+	MOVB R11, 0(R8)
+	ADD  $2, R8, R8
+	B    inlineEmitCopyEnd
+
+inlineEmitCopyStep3:
+	// Emit the remaining copy, encoded as 3 bytes.
+	SUBW $1, R3, R3
+	LSLW $2, R3, R3
+	ORRW $2, R3, R3
+	MOVB R3, 0(R8)
+	MOVW R11, 1(R8)
+	ADD  $3, R8, R8
+
+inlineEmitCopyEnd:
+	// End inline of the emitCopy call.
+	// ----------------------------------------
+
+	// nextEmit = s
+	MOVD R7, R10
+
+	// if s >= sLimit { goto emitRemainder }
+	MOVD R7, R3
+	SUB  R6, R3, R3
+	CMP  R3, R9
+	BLS  emitRemainder
+
+	// As per the encode_other.go code:
+	//
+	// We could immediately etc.
+
+	// x := load64(src, s-1)
+	MOVD -1(R7), R14
+
+	// prevHash := hash(uint32(x>>0), shift)
+	MOVW R14, R11
+	MULW R16, R11, R11
+	LSRW R5, R11, R11
+
+	// table[prevHash] = uint16(s-1)
+	MOVD R7, R3
+	SUB  R6, R3, R3
+	SUB  $1, R3, R3
+
+	MOVHU R3, 0(R17)(R11<<1)
+
+	// currHash := hash(uint32(x>>8), shift)
+	LSR  $8, R14, R14
+	MOVW R14, R11
+	MULW R16, R11, R11
+	LSRW R5, R11, R11
+
+	// candidate = int(table[currHash])
+	MOVHU 0(R17)(R11<<1), R15
+
+	// table[currHash] = uint16(s)
+	ADD   $1, R3, R3
+	MOVHU R3, 0(R17)(R11<<1)
+
+	// if uint32(x>>8) == load32(src, candidate) { continue }
+	MOVW (R6)(R15*1), R4
+	CMPW R4, R14
+	BEQ  inner1
+
+	// nextHash = hash(uint32(x>>16), shift)
+	LSR  $8, R14, R14
+	MOVW R14, R11
+	MULW R16, R11, R11
+	LSRW R5, R11, R11
+
+	// s++
+	ADD $1, R7, R7
+
+	// break out of the inner1 for loop, i.e. continue the outer loop.
+	B outer
+
+emitRemainder:
+	// if nextEmit < len(src) { etc }
+	MOVD src_len+32(FP), R3
+	ADD  R6, R3, R3
+	CMP  R3, R10
+	BEQ  encodeBlockEnd
+
+	// d += emitLiteral(dst[d:], src[nextEmit:])
+	//
+	// Push args.
+	MOVD R8, 8(RSP)
+	MOVD $0, 16(RSP)  // Unnecessary, as the callee ignores it, but conservative.
+	MOVD $0, 24(RSP)  // Unnecessary, as the callee ignores it, but conservative.
+	MOVD R10, 32(RSP)
+	SUB  R10, R3, R3
+	MOVD R3, 40(RSP)
+	MOVD R3, 48(RSP)  // Unnecessary, as the callee ignores it, but conservative.
+
+	// Spill local variables (registers) onto the stack; call; unspill.
+	MOVD R8, 88(RSP)
+	CALL ·emitLiteral(SB)
+	MOVD 88(RSP), R8
+
+	// Finish the "d +=" part of "d += emitLiteral(etc)".
+	MOVD 56(RSP), R1
+	ADD  R1, R8, R8
+
+encodeBlockEnd:
+	MOVD dst_base+0(FP), R3
+	SUB  R3, R8, R8
+	MOVD R8, d+48(FP)
+	RET
diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_asm.go
similarity index 96%
rename from vendor/github.com/golang/snappy/encode_amd64.go
rename to vendor/github.com/golang/snappy/encode_asm.go
index 150d91b..107c1e7 100644
--- a/vendor/github.com/golang/snappy/encode_amd64.go
+++ b/vendor/github.com/golang/snappy/encode_asm.go
@@ -5,6 +5,7 @@
 // +build !appengine
 // +build gc
 // +build !noasm
+// +build amd64 arm64
 
 package snappy
 
diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go
index dbcae90..296d7f0 100644
--- a/vendor/github.com/golang/snappy/encode_other.go
+++ b/vendor/github.com/golang/snappy/encode_other.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// +build !amd64 appengine !gc noasm
+// +build !amd64,!arm64 appengine !gc noasm
 
 package snappy
 
diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md
index 9d92c11..f765a46 100644
--- a/vendor/github.com/google/uuid/README.md
+++ b/vendor/github.com/google/uuid/README.md
@@ -16,4 +16,4 @@
 
 Full `go doc` style documentation for the package can be viewed online without
 installing this package by using the GoDoc site here: 
-http://godoc.org/github.com/google/uuid
+http://pkg.go.dev/github.com/google/uuid
diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go
index b174616..b404f4b 100644
--- a/vendor/github.com/google/uuid/hash.go
+++ b/vendor/github.com/google/uuid/hash.go
@@ -26,8 +26,8 @@
 // NewMD5 and NewSHA1.
 func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
 	h.Reset()
-	h.Write(space[:])
-	h.Write(data)
+	h.Write(space[:]) //nolint:errcheck
+	h.Write(data)     //nolint:errcheck
 	s := h.Sum(nil)
 	var uuid UUID
 	copy(uuid[:], s)
diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go
index 7f9e0c6..14bd340 100644
--- a/vendor/github.com/google/uuid/marshal.go
+++ b/vendor/github.com/google/uuid/marshal.go
@@ -16,10 +16,11 @@
 // UnmarshalText implements encoding.TextUnmarshaler.
 func (uuid *UUID) UnmarshalText(data []byte) error {
 	id, err := ParseBytes(data)
-	if err == nil {
-		*uuid = id
+	if err != nil {
+		return err
 	}
-	return err
+	*uuid = id
+	return nil
 }
 
 // MarshalBinary implements encoding.BinaryMarshaler.
diff --git a/vendor/github.com/google/uuid/null.go b/vendor/github.com/google/uuid/null.go
new file mode 100644
index 0000000..d7fcbf2
--- /dev/null
+++ b/vendor/github.com/google/uuid/null.go
@@ -0,0 +1,118 @@
+// Copyright 2021 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"bytes"
+	"database/sql/driver"
+	"encoding/json"
+	"fmt"
+)
+
+var jsonNull = []byte("null")
+
+// NullUUID represents a UUID that may be null.
+// NullUUID implements the SQL driver.Scanner interface so
+// it can be used as a scan destination:
+//
+//  var u uuid.NullUUID
+//  err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
+//  ...
+//  if u.Valid {
+//     // use u.UUID
+//  } else {
+//     // NULL value
+//  }
+//
+type NullUUID struct {
+	UUID  UUID
+	Valid bool // Valid is true if UUID is not NULL
+}
+
+// Scan implements the SQL driver.Scanner interface.
+func (nu *NullUUID) Scan(value interface{}) error {
+	if value == nil {
+		nu.UUID, nu.Valid = Nil, false
+		return nil
+	}
+
+	err := nu.UUID.Scan(value)
+	if err != nil {
+		nu.Valid = false
+		return err
+	}
+
+	nu.Valid = true
+	return nil
+}
+
+// Value implements the driver Valuer interface.
+func (nu NullUUID) Value() (driver.Value, error) {
+	if !nu.Valid {
+		return nil, nil
+	}
+	// Delegate to UUID Value function
+	return nu.UUID.Value()
+}
+
+// MarshalBinary implements encoding.BinaryMarshaler.
+func (nu NullUUID) MarshalBinary() ([]byte, error) {
+	if nu.Valid {
+		return nu.UUID[:], nil
+	}
+
+	return []byte(nil), nil
+}
+
+// UnmarshalBinary implements encoding.BinaryUnmarshaler.
+func (nu *NullUUID) UnmarshalBinary(data []byte) error {
+	if len(data) != 16 {
+		return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
+	}
+	copy(nu.UUID[:], data)
+	nu.Valid = true
+	return nil
+}
+
+// MarshalText implements encoding.TextMarshaler.
+func (nu NullUUID) MarshalText() ([]byte, error) {
+	if nu.Valid {
+		return nu.UUID.MarshalText()
+	}
+
+	return jsonNull, nil
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (nu *NullUUID) UnmarshalText(data []byte) error {
+	id, err := ParseBytes(data)
+	if err != nil {
+		nu.Valid = false
+		return err
+	}
+	nu.UUID = id
+	nu.Valid = true
+	return nil
+}
+
+// MarshalJSON implements json.Marshaler.
+func (nu NullUUID) MarshalJSON() ([]byte, error) {
+	if nu.Valid {
+		return json.Marshal(nu.UUID)
+	}
+
+	return jsonNull, nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (nu *NullUUID) UnmarshalJSON(data []byte) error {
+	if bytes.Equal(data, jsonNull) {
+		*nu = NullUUID{}
+		return nil // valid null UUID
+	}
+	err := json.Unmarshal(data, &nu.UUID)
+	nu.Valid = err == nil
+	return err
+}
diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go
index f326b54..2e02ec0 100644
--- a/vendor/github.com/google/uuid/sql.go
+++ b/vendor/github.com/google/uuid/sql.go
@@ -9,7 +9,7 @@
 	"fmt"
 )
 
-// Scan implements sql.Scanner so UUIDs can be read from databases transparently
+// Scan implements sql.Scanner so UUIDs can be read from databases transparently.
 // Currently, database types that map to string and []byte are supported. Please
 // consult database-specific driver documentation for matching types.
 func (uuid *UUID) Scan(src interface{}) error {
diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go
index 524404c..a57207a 100644
--- a/vendor/github.com/google/uuid/uuid.go
+++ b/vendor/github.com/google/uuid/uuid.go
@@ -12,6 +12,7 @@
 	"fmt"
 	"io"
 	"strings"
+	"sync"
 )
 
 // A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
@@ -33,7 +34,27 @@
 	Future                    // Reserved for future definition.
 )
 
-var rander = rand.Reader // random function
+const randPoolSize = 16 * 16
+
+var (
+	rander      = rand.Reader // random function
+	poolEnabled = false
+	poolMu      sync.Mutex
+	poolPos     = randPoolSize     // protected with poolMu
+	pool        [randPoolSize]byte // protected with poolMu
+)
+
+type invalidLengthError struct{ len int }
+
+func (err invalidLengthError) Error() string {
+	return fmt.Sprintf("invalid UUID length: %d", err.len)
+}
+
+// IsInvalidLengthError is matcher function for custom error invalidLengthError
+func IsInvalidLengthError(err error) bool {
+	_, ok := err.(invalidLengthError)
+	return ok
+}
 
 // Parse decodes s into a UUID or returns an error.  Both the standard UUID
 // forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
@@ -68,7 +89,7 @@
 		}
 		return uuid, nil
 	default:
-		return uuid, fmt.Errorf("invalid UUID length: %d", len(s))
+		return uuid, invalidLengthError{len(s)}
 	}
 	// s is now at least 36 bytes long
 	// it must be of the form  xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
@@ -112,7 +133,7 @@
 		}
 		return uuid, nil
 	default:
-		return uuid, fmt.Errorf("invalid UUID length: %d", len(b))
+		return uuid, invalidLengthError{len(b)}
 	}
 	// s is now at least 36 bytes long
 	// it must be of the form  xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
@@ -243,3 +264,31 @@
 	}
 	rander = r
 }
+
+// EnableRandPool enables internal randomness pool used for Random
+// (Version 4) UUID generation. The pool contains random bytes read from
+// the random number generator on demand in batches. Enabling the pool
+// may improve the UUID generation throughput significantly.
+//
+// Since the pool is stored on the Go heap, this feature may be a bad fit
+// for security sensitive applications.
+//
+// Both EnableRandPool and DisableRandPool are not thread-safe and should
+// only be called when there is no possibility that New or any other
+// UUID Version 4 generation function will be called concurrently.
+func EnableRandPool() {
+	poolEnabled = true
+}
+
+// DisableRandPool disables the randomness pool if it was previously
+// enabled with EnableRandPool.
+//
+// Both EnableRandPool and DisableRandPool are not thread-safe and should
+// only be called when there is no possibility that New or any other
+// UUID Version 4 generation function will be called concurrently.
+func DisableRandPool() {
+	poolEnabled = false
+	defer poolMu.Unlock()
+	poolMu.Lock()
+	poolPos = randPoolSize
+}
diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go
index 199a1ac..4631096 100644
--- a/vendor/github.com/google/uuid/version1.go
+++ b/vendor/github.com/google/uuid/version1.go
@@ -17,12 +17,6 @@
 //
 // In most cases, New should be used.
 func NewUUID() (UUID, error) {
-	nodeMu.Lock()
-	if nodeID == zeroID {
-		setNodeInterface("")
-	}
-	nodeMu.Unlock()
-
 	var uuid UUID
 	now, seq, err := GetTime()
 	if err != nil {
@@ -38,7 +32,13 @@
 	binary.BigEndian.PutUint16(uuid[4:], timeMid)
 	binary.BigEndian.PutUint16(uuid[6:], timeHi)
 	binary.BigEndian.PutUint16(uuid[8:], seq)
+
+	nodeMu.Lock()
+	if nodeID == zeroID {
+		setNodeInterface("")
+	}
 	copy(uuid[10:], nodeID[:])
+	nodeMu.Unlock()
 
 	return uuid, nil
 }
diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go
index 84af91c..7697802 100644
--- a/vendor/github.com/google/uuid/version4.go
+++ b/vendor/github.com/google/uuid/version4.go
@@ -14,11 +14,21 @@
 	return Must(NewRandom())
 }
 
+// NewString creates a new random UUID and returns it as a string or panics.
+// NewString is equivalent to the expression
+//
+//    uuid.New().String()
+func NewString() string {
+	return Must(NewRandom()).String()
+}
+
 // NewRandom returns a Random (Version 4) UUID.
 //
 // The strength of the UUIDs is based on the strength of the crypto/rand
 // package.
 //
+// Uses the randomness pool if it was enabled with EnableRandPool.
+//
 // A note about uniqueness derived from the UUID Wikipedia entry:
 //
 //  Randomly generated UUIDs have 122 random bits.  One's annual risk of being
@@ -27,8 +37,16 @@
 //  equivalent to the odds of creating a few tens of trillions of UUIDs in a
 //  year and having one duplicate.
 func NewRandom() (UUID, error) {
+	if !poolEnabled {
+		return NewRandomFromReader(rander)
+	}
+	return newRandomFromPool()
+}
+
+// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
+func NewRandomFromReader(r io.Reader) (UUID, error) {
 	var uuid UUID
-	_, err := io.ReadFull(rander, uuid[:])
+	_, err := io.ReadFull(r, uuid[:])
 	if err != nil {
 		return Nil, err
 	}
@@ -36,3 +54,23 @@
 	uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
 	return uuid, nil
 }
+
+func newRandomFromPool() (UUID, error) {
+	var uuid UUID
+	poolMu.Lock()
+	if poolPos == randPoolSize {
+		_, err := io.ReadFull(rander, pool[:])
+		if err != nil {
+			poolMu.Unlock()
+			return Nil, err
+		}
+		poolPos = 0
+	}
+	copy(uuid[:], pool[poolPos:(poolPos+16)])
+	poolPos += 16
+	poolMu.Unlock()
+
+	uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
+	uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
+	return uuid, nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.gitignore b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.gitignore
index c198e6a..826caa3 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.gitignore
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.gitignore
@@ -200,3 +200,5 @@
 
 #vendor
 vendor/
+
+.envrc
\ No newline at end of file
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml
index 702fa5b..fc198d8 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml
@@ -1,21 +1,16 @@
 sudo: false
 language: go
 go:
-  - 1.8.x
+  - 1.13.x
+  - 1.14.x
+  - 1.15.x
+
 env:
-  - DEP_VERSION="0.3.2"
-
-before_install:
-  # Download the binary to bin folder in $GOPATH
-  - curl -L -s https://github.com/golang/dep/releases/download/v${DEP_VERSION}/dep-linux-amd64 -o $GOPATH/bin/dep
-  # Make the binary executable
-  - chmod +x $GOPATH/bin/dep
-
-install:
-  - dep ensure
+  global:
+    - GO111MODULE=on
 
 script:
  - make test
- 
+
 after_success:
   - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md
index 0e64822..6eeb7e2 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md
@@ -13,10 +13,30 @@
 - `Security` in case of vulnerabilities.
 
 ## [Unreleased]
-### Added
-- This CHANGELOG file to keep track of changes.
 
-## 1.0.0 - 2018-05-08
+### Added
+
+- [#223](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/223) Add go-kit logging middleware - [adrien-f](https://github.com/adrien-f)
+
+## [v1.1.0] - 2019-09-12
+### Added
+- [#226](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/226) Support for go modules.
+- [#221](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/221) logging/zap add support for gRPC LoggerV2  - [kush-patel-hs](https://github.com/kush-patel-hs)
+- [#181](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/181) Rate Limit support - [ceshihao](https://github.com/ceshihao)
+- [#161](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/161) Retry on server stream call - [lonnblad](https://github.com/lonnblad)
+- [#152](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/152) Exponential backoff functions - [polyfloyd](https://github.com/polyfloyd)
+- [#147](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/147) Jaeger support for ctxtags extraction - [vporoshok](https://github.com/vporoshok)
+- [#184](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/184) ctxTags identifies if the call was sampled
+
+### Deprecated
+- [#201](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/201) `golang.org/x/net/context` - [houz42](https://github.com/houz42)
+- [#183](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/183) Documentation Generation in favour of <godoc.org>.
+
+### Fixed
+- [172](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/172) Passing ctx into retry and recover - [johanbrandhorst](https://github.com/johanbrandhorst)
+- Numerious documentation fixes.
+
+## v1.0.0 - 2018-05-08
 ### Added
 - grpc_auth 
 - grpc_ctxtags
@@ -27,4 +47,5 @@
 - grpc_validator
 - grpc_recovery
 
-[Unreleased]: https://github.com/grpc-ecosystem/go-grpc-middleware/compare/v1.0.0...HEAD 
+[Unreleased]: https://github.com/grpc-ecosystem/go-grpc-middleware/compare/v1.1.0...HEAD 
+[v1.1.0]: https://github.com/grpc-ecosystem/go-grpc-middleware/compare/v1.0.0...v1.1.0 
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.lock b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.lock
deleted file mode 100644
index ebdcb75..0000000
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.lock
+++ /dev/null
@@ -1,123 +0,0 @@
-# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
-
-
-[[projects]]
-  name = "cloud.google.com/go"
-  packages = ["compute/metadata"]
-  revision = "2d3a6656c17a60b0815b7e06ab0be04eacb6e613"
-  version = "v0.16.0"
-
-[[projects]]
-  name = "github.com/davecgh/go-spew"
-  packages = ["spew"]
-  revision = "346938d642f2ec3594ed81d874461961cd0faa76"
-  version = "v1.1.0"
-
-[[projects]]
-  name = "github.com/gogo/protobuf"
-  packages = ["gogoproto","proto","protoc-gen-gogo/descriptor"]
-  revision = "342cbe0a04158f6dcb03ca0079991a51a4248c02"
-  version = "v0.5"
-
-[[projects]]
-  branch = "master"
-  name = "github.com/golang/protobuf"
-  packages = ["jsonpb","proto","ptypes","ptypes/any","ptypes/duration","ptypes/struct","ptypes/timestamp"]
-  revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845"
-
-[[projects]]
-  name = "github.com/opentracing/opentracing-go"
-  packages = [".","ext","log","mocktracer"]
-  revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38"
-  version = "v1.0.2"
-
-[[projects]]
-  name = "github.com/pmezard/go-difflib"
-  packages = ["difflib"]
-  revision = "792786c7400a136282c1664665ae0a8db921c6c2"
-  version = "v1.0.0"
-
-[[projects]]
-  name = "github.com/sirupsen/logrus"
-  packages = ["."]
-  revision = "f006c2ac4710855cf0f916dd6b77acf6b048dc6e"
-  version = "v1.0.3"
-
-[[projects]]
-  name = "github.com/stretchr/testify"
-  packages = ["assert","require","suite"]
-  revision = "69483b4bd14f5845b5a1e55bca19e954e827f1d0"
-  version = "v1.1.4"
-
-[[projects]]
-  name = "go.uber.org/atomic"
-  packages = ["."]
-  revision = "8474b86a5a6f79c443ce4b2992817ff32cf208b8"
-  version = "v1.3.1"
-
-[[projects]]
-  name = "go.uber.org/multierr"
-  packages = ["."]
-  revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a"
-  version = "v1.1.0"
-
-[[projects]]
-  name = "go.uber.org/zap"
-  packages = [".","buffer","internal/bufferpool","internal/color","internal/exit","zapcore"]
-  revision = "35aad584952c3e7020db7b839f6b102de6271f89"
-  version = "v1.7.1"
-
-[[projects]]
-  branch = "master"
-  name = "golang.org/x/crypto"
-  packages = ["ssh/terminal"]
-  revision = "94eea52f7b742c7cbe0b03b22f0c4c8631ece122"
-
-[[projects]]
-  branch = "master"
-  name = "golang.org/x/net"
-  packages = ["context","context/ctxhttp","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"]
-  revision = "a8b9294777976932365dabb6640cf1468d95c70f"
-
-[[projects]]
-  branch = "master"
-  name = "golang.org/x/oauth2"
-  packages = [".","google","internal","jws","jwt"]
-  revision = "f95fa95eaa936d9d87489b15d1d18b97c1ba9c28"
-
-[[projects]]
-  branch = "master"
-  name = "golang.org/x/sys"
-  packages = ["unix","windows"]
-  revision = "13fcbd661c8ececa8807a29b48407d674b1d8ed8"
-
-[[projects]]
-  branch = "master"
-  name = "golang.org/x/text"
-  packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
-  revision = "75cc3cad82b5f47d3fb229ddda8c5167da14f294"
-
-[[projects]]
-  name = "google.golang.org/appengine"
-  packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"]
-  revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
-  version = "v1.0.0"
-
-[[projects]]
-  branch = "master"
-  name = "google.golang.org/genproto"
-  packages = ["googleapis/rpc/status"]
-  revision = "7f0da29060c682909f650ad8ed4e515bd74fa12a"
-
-[[projects]]
-  name = "google.golang.org/grpc"
-  packages = [".","balancer","balancer/roundrobin","codes","connectivity","credentials","credentials/oauth","encoding","grpclb/grpc_lb_v1/messages","grpclog","internal","keepalive","metadata","naming","peer","resolver","resolver/dns","resolver/passthrough","stats","status","tap","transport"]
-  revision = "5a9f7b402fe85096d2e1d0383435ee1876e863d0"
-  version = "v1.8.0"
-
-[solve-meta]
-  analyzer-name = "dep"
-  analyzer-version = 1
-  inputs-digest = "b24c6670412eb0bc44ed1db77fecc52333f8725f3e3272bdc568f5683a63031f"
-  solver-name = "gps-cdcl"
-  solver-version = 1
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.toml b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.toml
deleted file mode 100644
index 0a7d4c1..0000000
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.toml
+++ /dev/null
@@ -1,35 +0,0 @@
-[[constraint]]
-  name = "github.com/gogo/protobuf"
-  version = "0.5.0"
-
-[[constraint]]
-  branch = "master"
-  name = "github.com/golang/protobuf"
-
-[[constraint]]
-  name = "github.com/opentracing/opentracing-go"
-  version = "1.0.2"
-
-[[constraint]]
-  name = "github.com/sirupsen/logrus"
-  version = "1.0.3"
-
-[[constraint]]
-  name = "github.com/stretchr/testify"
-  version = "1.1.4"
-
-[[constraint]]
-  name = "go.uber.org/zap"
-  version = "1.7.1"
-
-[[constraint]]
-  branch = "master"
-  name = "golang.org/x/net"
-
-[[constraint]]
-  branch = "master"
-  name = "golang.org/x/oauth2"
-
-[[constraint]]
-  name = "google.golang.org/grpc"
-  version = "1.8.0"
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md
index 224069b..814e155 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md
@@ -7,7 +7,7 @@
 [![codecov](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware/branch/master/graph/badge.svg)](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware)
 [![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE)
 [![quality: production](https://img.shields.io/badge/quality-production-orange.svg)](#status)
-[![Slack](slack.png)](https://join.slack.com/t/improbable-eng/shared_invite/enQtMzQ1ODcyMzQ5MjM4LWY5ZWZmNGM2ODc5MmViNmQ3ZTA3ZTY3NzQwOTBlMTkzZmIxZTIxODk0OWU3YjZhNWVlNDU3MDlkZGViZjhkMjc)
+[![Slack](https://img.shields.io/badge/slack-%23grpc--middleware-brightgreen)](https://slack.com/share/IRUQCFC23/9Tm7hxRFVKKNoajQfMOcUiIk/enQtODc4ODI4NTIyMDcxLWM5NDA0ZTE4Njg5YjRjYWZkMTI5MzQwNDY3YzBjMzE1YzdjOGM5ZjI1NDNiM2JmNzI2YjM5ODE5OTRiNTEyOWE)
 
 [gRPC Go](https://github.com/grpc/grpc-go) Middleware: interceptors, helpers, utilities.
 
@@ -15,7 +15,7 @@
 
 [gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for
 Interceptors, i.e. [middleware](https://medium.com/@matryer/writing-middleware-in-golang-and-how-go-makes-it-so-much-fun-4375c1246e81#.gv7tdlghs) 
-that is executed either on the gRPC Server before the request is passed onto the user's application logic, or on the gRPC client either around the user call. It is a perfect way to implement
+that is executed either on the gRPC Server before the request is passed onto the user's application logic, or on the gRPC client around the user call. It is a perfect way to implement
 common patterns: auth, logging, message, validation, retries or monitoring.
 
 These are generic building blocks that make it easy to build multiple microservices easily.
@@ -29,20 +29,20 @@
 
 myServer := grpc.NewServer(
     grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
+        grpc_recovery.StreamServerInterceptor(),
         grpc_ctxtags.StreamServerInterceptor(),
         grpc_opentracing.StreamServerInterceptor(),
         grpc_prometheus.StreamServerInterceptor,
         grpc_zap.StreamServerInterceptor(zapLogger),
         grpc_auth.StreamServerInterceptor(myAuthFunction),
-        grpc_recovery.StreamServerInterceptor(),
     )),
     grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
+        grpc_recovery.UnaryServerInterceptor(),
         grpc_ctxtags.UnaryServerInterceptor(),
         grpc_opentracing.UnaryServerInterceptor(),
         grpc_prometheus.UnaryServerInterceptor,
         grpc_zap.UnaryServerInterceptor(zapLogger),
         grpc_auth.UnaryServerInterceptor(myAuthFunction),
-        grpc_recovery.UnaryServerInterceptor(),
     )),
 )
 ```
@@ -58,7 +58,8 @@
    * [`grpc_ctxtags`](tags/) - a library that adds a `Tag` map to context, with data populated from request body
    * [`grpc_zap`](logging/zap/) - integration of [zap](https://github.com/uber-go/zap) logging library into gRPC handlers.
    * [`grpc_logrus`](logging/logrus/) - integration of [logrus](https://github.com/sirupsen/logrus) logging library into gRPC handlers.
-
+   * [`grpc_kit`](logging/kit/) - integration of [go-kit](https://github.com/go-kit/kit/tree/master/log) logging library into gRPC handlers.
+   * [`grpc_grpc_logsettable`](logging/settable/) - a wrapper around `grpclog.LoggerV2` that allows to replace loggers in runtime (thread-safe).
 
 #### Monitoring
    * [`grpc_prometheus`⚡](https://github.com/grpc-ecosystem/go-grpc-prometheus) - Prometheus client-side and server-side monitoring middleware
@@ -71,6 +72,7 @@
 #### Server
    * [`grpc_validator`](validator/) - codegen inbound message validation from `.proto` options
    * [`grpc_recovery`](recovery/) - turn panics into gRPC errors
+   * [`ratelimit`](ratelimit/) - grpc rate limiting by your own limiter
 
 
 ## Status
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go
index 45a2f5f..ea3738b 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go
@@ -6,7 +6,8 @@
 package grpc_middleware
 
 import (
-	"golang.org/x/net/context"
+	"context"
+
 	"google.golang.org/grpc"
 )
 
@@ -18,35 +19,19 @@
 func ChainUnaryServer(interceptors ...grpc.UnaryServerInterceptor) grpc.UnaryServerInterceptor {
 	n := len(interceptors)
 
-	if n > 1 {
-		lastI := n - 1
-		return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
-			var (
-				chainHandler grpc.UnaryHandler
-				curI         int
-			)
-
-			chainHandler = func(currentCtx context.Context, currentReq interface{}) (interface{}, error) {
-				if curI == lastI {
-					return handler(currentCtx, currentReq)
-				}
-				curI++
-				resp, err := interceptors[curI](currentCtx, currentReq, info, chainHandler)
-				curI--
-				return resp, err
+	return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+		chainer := func(currentInter grpc.UnaryServerInterceptor, currentHandler grpc.UnaryHandler) grpc.UnaryHandler {
+			return func(currentCtx context.Context, currentReq interface{}) (interface{}, error) {
+				return currentInter(currentCtx, currentReq, info, currentHandler)
 			}
-
-			return interceptors[0](ctx, req, info, chainHandler)
 		}
-	}
 
-	if n == 1 {
-		return interceptors[0]
-	}
+		chainedHandler := handler
+		for i := n - 1; i >= 0; i-- {
+			chainedHandler = chainer(interceptors[i], chainedHandler)
+		}
 
-	// n == 0; Dummy interceptor maintained for backward compatibility to avoid returning nil.
-	return func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
-		return handler(ctx, req)
+		return chainedHandler(ctx, req)
 	}
 }
 
@@ -58,35 +43,19 @@
 func ChainStreamServer(interceptors ...grpc.StreamServerInterceptor) grpc.StreamServerInterceptor {
 	n := len(interceptors)
 
-	if n > 1 {
-		lastI := n - 1
-		return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
-			var (
-				chainHandler grpc.StreamHandler
-				curI         int
-			)
-
-			chainHandler = func(currentSrv interface{}, currentStream grpc.ServerStream) error {
-				if curI == lastI {
-					return handler(currentSrv, currentStream)
-				}
-				curI++
-				err := interceptors[curI](currentSrv, currentStream, info, chainHandler)
-				curI--
-				return err
+	return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+		chainer := func(currentInter grpc.StreamServerInterceptor, currentHandler grpc.StreamHandler) grpc.StreamHandler {
+			return func(currentSrv interface{}, currentStream grpc.ServerStream) error {
+				return currentInter(currentSrv, currentStream, info, currentHandler)
 			}
-
-			return interceptors[0](srv, stream, info, chainHandler)
 		}
-	}
 
-	if n == 1 {
-		return interceptors[0]
-	}
+		chainedHandler := handler
+		for i := n - 1; i >= 0; i-- {
+			chainedHandler = chainer(interceptors[i], chainedHandler)
+		}
 
-	// n == 0; Dummy interceptor maintained for backward compatibility to avoid returning nil.
-	return func(srv interface{}, stream grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
-		return handler(srv, stream)
+		return chainedHandler(srv, ss)
 	}
 }
 
@@ -97,35 +66,19 @@
 func ChainUnaryClient(interceptors ...grpc.UnaryClientInterceptor) grpc.UnaryClientInterceptor {
 	n := len(interceptors)
 
-	if n > 1 {
-		lastI := n - 1
-		return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
-			var (
-				chainHandler grpc.UnaryInvoker
-				curI         int
-			)
-
-			chainHandler = func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error {
-				if curI == lastI {
-					return invoker(currentCtx, currentMethod, currentReq, currentRepl, currentConn, currentOpts...)
-				}
-				curI++
-				err := interceptors[curI](currentCtx, currentMethod, currentReq, currentRepl, currentConn, chainHandler, currentOpts...)
-				curI--
-				return err
-			}
-
-			return interceptors[0](ctx, method, req, reply, cc, chainHandler, opts...)
-		}
-	}
-
-	if n == 1 {
-		return interceptors[0]
-	}
-
-	// n == 0; Dummy interceptor maintained for backward compatibility to avoid returning nil.
 	return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
-		return invoker(ctx, method, req, reply, cc, opts...)
+		chainer := func(currentInter grpc.UnaryClientInterceptor, currentInvoker grpc.UnaryInvoker) grpc.UnaryInvoker {
+			return func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error {
+				return currentInter(currentCtx, currentMethod, currentReq, currentRepl, currentConn, currentInvoker, currentOpts...)
+			}
+		}
+
+		chainedInvoker := invoker
+		for i := n - 1; i >= 0; i-- {
+			chainedInvoker = chainer(interceptors[i], chainedInvoker)
+		}
+
+		return chainedInvoker(ctx, method, req, reply, cc, opts...)
 	}
 }
 
@@ -136,35 +89,19 @@
 func ChainStreamClient(interceptors ...grpc.StreamClientInterceptor) grpc.StreamClientInterceptor {
 	n := len(interceptors)
 
-	if n > 1 {
-		lastI := n - 1
-		return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
-			var (
-				chainHandler grpc.Streamer
-				curI         int
-			)
-
-			chainHandler = func(currentCtx context.Context, currentDesc *grpc.StreamDesc, currentConn *grpc.ClientConn, currentMethod string, currentOpts ...grpc.CallOption) (grpc.ClientStream, error) {
-				if curI == lastI {
-					return streamer(currentCtx, currentDesc, currentConn, currentMethod, currentOpts...)
-				}
-				curI++
-				stream, err := interceptors[curI](currentCtx, currentDesc, currentConn, currentMethod, chainHandler, currentOpts...)
-				curI--
-				return stream, err
-			}
-
-			return interceptors[0](ctx, desc, cc, method, chainHandler, opts...)
-		}
-	}
-
-	if n == 1 {
-		return interceptors[0]
-	}
-
-	// n == 0; Dummy interceptor maintained for backward compatibility to avoid returning nil.
 	return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
-		return streamer(ctx, desc, cc, method, opts...)
+		chainer := func(currentInter grpc.StreamClientInterceptor, currentStreamer grpc.Streamer) grpc.Streamer {
+			return func(currentCtx context.Context, currentDesc *grpc.StreamDesc, currentConn *grpc.ClientConn, currentMethod string, currentOpts ...grpc.CallOption) (grpc.ClientStream, error) {
+				return currentInter(currentCtx, currentDesc, currentConn, currentMethod, currentStreamer, currentOpts...)
+			}
+		}
+
+		chainedStreamer := streamer
+		for i := n - 1; i >= 0; i-- {
+			chainedStreamer = chainer(interceptors[i], chainedStreamer)
+		}
+
+		return chainedStreamer(ctx, desc, cc, method, opts...)
 	}
 }
 
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go
index 7168950..718e100 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go
@@ -23,7 +23,7 @@
 
 	myServer := grpc.NewServer(
 	    grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(loggingStream, monitoringStream, authStream)),
-	    grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(loggingUnary, monitoringUnary, authUnary),
+	    grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(loggingUnary, monitoringUnary, authUnary)),
 	)
 
 These interceptors will be executed from left to right: logging, monitoring and auth.
@@ -63,7 +63,7 @@
 	func FakeAuthStreamingInterceptor(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
 	   newStream := grpc_middleware.WrapServerStream(stream)
 	   newStream.WrappedContext = context.WithValue(ctx, "user_id", "john@example.com")
-	   return handler(srv, stream)
+	   return handler(srv, newStream)
 	}
 */
 package grpc_middleware
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/go.mod b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/go.mod
new file mode 100644
index 0000000..7dc62e5
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/go.mod
@@ -0,0 +1,22 @@
+module github.com/grpc-ecosystem/go-grpc-middleware
+
+require (
+	github.com/go-kit/kit v0.9.0
+	github.com/go-logfmt/logfmt v0.4.0 // indirect
+	github.com/go-stack/stack v1.8.0 // indirect
+	github.com/gogo/protobuf v1.3.2
+	github.com/golang/protobuf v1.3.3
+	github.com/opentracing/opentracing-go v1.1.0
+	github.com/pkg/errors v0.8.1 // indirect
+	github.com/sirupsen/logrus v1.4.2
+	github.com/stretchr/testify v1.4.0
+	go.uber.org/atomic v1.4.0 // indirect
+	go.uber.org/multierr v1.1.0 // indirect
+	go.uber.org/zap v1.10.0
+	golang.org/x/net v0.0.0-20201021035429-f5854403a974
+	golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
+	google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215 // indirect
+	google.golang.org/grpc v1.29.1
+)
+
+go 1.14
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/go.sum b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/go.sum
new file mode 100644
index 0000000..ee522cd
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/go.sum
@@ -0,0 +1,122 @@
+cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
+github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
+go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215 h1:0Uz5jLJQioKgVozXa1gzGbzYxbb/rhQEVvSWxzw5oUs=
+google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/makefile b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/makefile
index 51dc5b8..b18d2d2 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/makefile
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/makefile
@@ -8,7 +8,8 @@
 	go fmt $(GOFILES_NOVENDOR)
 
 vet:
-	go vet $(GOFILES_NOVENDOR)
+	# do not check lostcancel, they are intentional.
+	go vet -lostcancel=false $(GOFILES_NOVENDOR)
 
 test: vet
 	./scripts/test_all.sh
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/context.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/context.go
index 583025c..0da1658 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/context.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/context.go
@@ -69,10 +69,10 @@
 	return t
 }
 
-func setInContext(ctx context.Context, tags Tags) context.Context {
+func SetInContext(ctx context.Context, tags Tags) context.Context {
 	return context.WithValue(ctx, ctxMarkerKey, tags)
 }
 
-func newTags() Tags {
+func NewTags() Tags {
 	return &mapTags{values: make(map[string]interface{})}
 }
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/fieldextractor.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/fieldextractor.go
index 549ff48..a4073ab 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/fieldextractor.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/fieldextractor.go
@@ -64,10 +64,10 @@
 		field := v.Field(i)
 		kind := field.Kind()
 		// Only recurse down direct pointers, which should only be to nested structs.
-		if kind == reflect.Ptr {
+		if (kind == reflect.Ptr || kind == reflect.Interface) && field.CanInterface() {
 			reflectMessageTags(field.Interface(), existingMap, tagName)
 		}
-		// In case of arrays/splices (repeated fields) go down to the concrete type.
+		// In case of arrays/slices (repeated fields) go down to the concrete type.
 		if kind == reflect.Array || kind == reflect.Slice {
 			if field.Len() == 0 {
 				continue
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/interceptors.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/interceptors.go
index 038afd2..a7ced60 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/interceptors.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/interceptors.go
@@ -4,10 +4,12 @@
 package grpc_ctxtags
 
 import (
-	"github.com/grpc-ecosystem/go-grpc-middleware"
-	"golang.org/x/net/context"
+	"context"
+
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/peer"
+
+	"github.com/grpc-ecosystem/go-grpc-middleware"
 )
 
 // UnaryServerInterceptor returns a new unary server interceptors that sets the values for request tags.
@@ -66,11 +68,11 @@
 }
 
 func newTagsForCtx(ctx context.Context) context.Context {
-	t := newTags()
+	t := NewTags()
 	if peer, ok := peer.FromContext(ctx); ok {
 		t.Set("peer.address", peer.Addr.String())
 	}
-	return setInContext(ctx, t)
+	return SetInContext(ctx, t)
 }
 
 func setRequestFieldTags(ctx context.Context, f RequestFieldExtractorFunc, fullMethodName string, req interface{}) {
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/client_interceptors.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/client_interceptors.go
index f8fdecf..2e9cafd 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/client_interceptors.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/client_interceptors.go
@@ -4,6 +4,7 @@
 package grpc_opentracing
 
 import (
+	"context"
 	"io"
 	"sync"
 
@@ -11,7 +12,6 @@
 	opentracing "github.com/opentracing/opentracing-go"
 	"github.com/opentracing/opentracing-go/ext"
 	"github.com/opentracing/opentracing-go/log"
-	"golang.org/x/net/context"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/metadata"
@@ -25,6 +25,9 @@
 			return invoker(parentCtx, method, req, reply, cc, opts...)
 		}
 		newCtx, clientSpan := newClientSpanFromContext(parentCtx, o.tracer, method)
+		if o.unaryRequestHandlerFunc != nil {
+			o.unaryRequestHandlerFunc(clientSpan, req)
+		}
 		err := invoker(newCtx, method, req, reply, cc, opts...)
 		finishClientSpan(clientSpan, err)
 		return err
@@ -76,9 +79,7 @@
 
 func (s *tracedClientStream) CloseSend() error {
 	err := s.ClientStream.CloseSend()
-	if err != nil {
-		s.finishClientSpan(err)
-	}
+	s.finishClientSpan(err)
 	return err
 }
 
@@ -127,7 +128,7 @@
 	// Make sure we add this to the metadata of the call, so it gets propagated:
 	md := metautils.ExtractOutgoing(ctx).Clone()
 	if err := tracer.Inject(clientSpan.Context(), opentracing.HTTPHeaders, metadataTextMap(md)); err != nil {
-		grpclog.Printf("grpc_opentracing: failed serializing trace information: %v", err)
+		grpclog.Infof("grpc_opentracing: failed serializing trace information: %v", err)
 	}
 	ctxWithMetadata := md.ToOutgoing(ctx)
 	return opentracing.ContextWithSpan(ctxWithMetadata, clientSpan), clientSpan
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/id_extract.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/id_extract.go
index d19f3c8..bc7302e 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/id_extract.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/id_extract.go
@@ -23,8 +23,11 @@
 // https://github.com/opentracing/basictracer-go/blob/1b32af207119a14b1b231d451df3ed04a72efebf/propagation_ot.go#L26
 // Jaeger from Uber use one-key schema with next format '{trace-id}:{span-id}:{parent-span-id}:{flags}'
 // https://www.jaegertracing.io/docs/client-libraries/#trace-span-identity
-func injectOpentracingIdsToTags(span opentracing.Span, tags grpc_ctxtags.Tags) {
-	if err := span.Tracer().Inject(span.Context(), opentracing.HTTPHeaders, &tagsCarrier{tags}); err != nil {
+// Datadog uses keys ending with 'trace-id' and 'parent-id' (for span) by default:
+// https://github.com/DataDog/dd-trace-go/blob/v1/ddtrace/tracer/textmap.go#L77
+func injectOpentracingIdsToTags(traceHeaderName string, span opentracing.Span, tags grpc_ctxtags.Tags) {
+	if err := span.Tracer().Inject(span.Context(), opentracing.HTTPHeaders,
+		&tagsCarrier{Tags: tags, traceHeaderName: traceHeaderName}); err != nil {
 		grpclog.Infof("grpc_opentracing: failed extracting trace info into ctx %v", err)
 	}
 }
@@ -32,10 +35,28 @@
 // tagsCarrier is a really hacky way of
 type tagsCarrier struct {
 	grpc_ctxtags.Tags
+	traceHeaderName string
 }
 
 func (t *tagsCarrier) Set(key, val string) {
 	key = strings.ToLower(key)
+
+	if key == t.traceHeaderName {
+		parts := strings.Split(val, ":")
+		if len(parts) == 4 {
+			t.Tags.Set(TagTraceId, parts[0])
+			t.Tags.Set(TagSpanId, parts[1])
+
+			if parts[3] != jaegerNotSampledFlag {
+				t.Tags.Set(TagSampled, "true")
+			} else {
+				t.Tags.Set(TagSampled, "false")
+			}
+
+			return
+		}
+	}
+
 	if strings.Contains(key, "traceid") {
 		t.Tags.Set(TagTraceId, val) // this will most likely be base-16 (hex) encoded
 	}
@@ -51,17 +72,11 @@
 		}
 	}
 
-	if key == "uber-trace-id" {
-		parts := strings.Split(val, ":")
-		if len(parts) == 4 {
-			t.Tags.Set(TagTraceId, parts[0])
-			t.Tags.Set(TagSpanId, parts[1])
+	if strings.HasSuffix(key, "trace-id") {
+		t.Tags.Set(TagTraceId, val)
+	}
 
-			if parts[3] != jaegerNotSampledFlag {
-				t.Tags.Set(TagSampled, "true")
-			} else {
-				t.Tags.Set(TagSampled, "false")
-			}
-		}
+	if strings.HasSuffix(key, "parent-id") {
+		t.Tags.Set(TagSpanId, val)
 	}
 }
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/metadata.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/metadata.go
index 38f251d..3649fb5 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/metadata.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/metadata.go
@@ -7,8 +7,6 @@
 	"encoding/base64"
 	"strings"
 
-	"fmt"
-
 	"google.golang.org/grpc/metadata"
 )
 
@@ -32,12 +30,8 @@
 func (m metadataTextMap) ForeachKey(callback func(key, val string) error) error {
 	for k, vv := range m {
 		for _, v := range vv {
-			if decodedKey, decodedVal, err := metadata.DecodeKeyValue(k, v); err == nil {
-				if err = callback(decodedKey, decodedVal); err != nil {
-					return err
-				}
-			} else {
-				return fmt.Errorf("failed decoding opentracing from gRPC metadata: %v", err)
+			if err := callback(k, v); err != nil {
+				return err
 			}
 		}
 	}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/options.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/options.go
index e75102b..430fe56 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/options.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/options.go
@@ -21,9 +21,18 @@
 // If it returns false, the given request will not be traced.
 type FilterFunc func(ctx context.Context, fullMethodName string) bool
 
+// UnaryRequestHandlerFunc is a custom request handler
+type UnaryRequestHandlerFunc func(span opentracing.Span, req interface{})
+
+// OpNameFunc is a func that allows custom operation names instead of the gRPC method.
+type OpNameFunc func(method string) string
+
 type options struct {
-	filterOutFunc FilterFunc
-	tracer        opentracing.Tracer
+	filterOutFunc           FilterFunc
+	tracer                  opentracing.Tracer
+	traceHeaderName         string
+	unaryRequestHandlerFunc UnaryRequestHandlerFunc
+	opNameFunc              OpNameFunc
 }
 
 func evaluateOptions(opts []Option) *options {
@@ -35,6 +44,9 @@
 	if optCopy.tracer == nil {
 		optCopy.tracer = opentracing.GlobalTracer()
 	}
+	if optCopy.traceHeaderName == "" {
+		optCopy.traceHeaderName = "uber-trace-id"
+	}
 	return optCopy
 }
 
@@ -47,9 +59,31 @@
 	}
 }
 
+// WithTraceHeaderName customizes the trace header name where trace metadata passed with requests.
+// Default one is `uber-trace-id`
+func WithTraceHeaderName(name string) Option {
+	return func(o *options) {
+		o.traceHeaderName = name
+	}
+}
+
 // WithTracer sets a custom tracer to be used for this middleware, otherwise the opentracing.GlobalTracer is used.
 func WithTracer(tracer opentracing.Tracer) Option {
 	return func(o *options) {
 		o.tracer = tracer
 	}
 }
+
+// WithUnaryRequestHandlerFunc sets a custom handler for the request
+func WithUnaryRequestHandlerFunc(f UnaryRequestHandlerFunc) Option {
+	return func(o *options) {
+		o.unaryRequestHandlerFunc = f
+	}
+}
+
+// WithOpName customizes the trace Operation name
+func WithOpName(f OpNameFunc) Option {
+	return func(o *options) {
+		o.opNameFunc = f
+	}
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/server_interceptors.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/server_interceptors.go
index 53764a0..186b108 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/server_interceptors.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/server_interceptors.go
@@ -4,13 +4,14 @@
 package grpc_opentracing
 
 import (
+	"context"
+
 	"github.com/grpc-ecosystem/go-grpc-middleware"
 	"github.com/grpc-ecosystem/go-grpc-middleware/tags"
 	"github.com/grpc-ecosystem/go-grpc-middleware/util/metautils"
 	"github.com/opentracing/opentracing-go"
 	"github.com/opentracing/opentracing-go/ext"
 	"github.com/opentracing/opentracing-go/log"
-	"golang.org/x/net/context"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/grpclog"
 )
@@ -26,7 +27,14 @@
 		if o.filterOutFunc != nil && !o.filterOutFunc(ctx, info.FullMethod) {
 			return handler(ctx, req)
 		}
-		newCtx, serverSpan := newServerSpanFromInbound(ctx, o.tracer, info.FullMethod)
+		opName := info.FullMethod
+		if o.opNameFunc != nil {
+			opName = o.opNameFunc(info.FullMethod)
+		}
+		newCtx, serverSpan := newServerSpanFromInbound(ctx, o.tracer, o.traceHeaderName, opName)
+		if o.unaryRequestHandlerFunc != nil {
+			o.unaryRequestHandlerFunc(serverSpan, req)
+		}
 		resp, err := handler(newCtx, req)
 		finishServerSpan(ctx, serverSpan, err)
 		return resp, err
@@ -40,7 +48,11 @@
 		if o.filterOutFunc != nil && !o.filterOutFunc(stream.Context(), info.FullMethod) {
 			return handler(srv, stream)
 		}
-		newCtx, serverSpan := newServerSpanFromInbound(stream.Context(), o.tracer, info.FullMethod)
+		opName := info.FullMethod
+		if o.opNameFunc != nil {
+			opName = o.opNameFunc(info.FullMethod)
+		}
+		newCtx, serverSpan := newServerSpanFromInbound(stream.Context(), o.tracer, o.traceHeaderName, opName)
 		wrappedStream := grpc_middleware.WrapServerStream(stream)
 		wrappedStream.WrappedContext = newCtx
 		err := handler(srv, wrappedStream)
@@ -49,21 +61,21 @@
 	}
 }
 
-func newServerSpanFromInbound(ctx context.Context, tracer opentracing.Tracer, fullMethodName string) (context.Context, opentracing.Span) {
+func newServerSpanFromInbound(ctx context.Context, tracer opentracing.Tracer, traceHeaderName, opName string) (context.Context, opentracing.Span) {
 	md := metautils.ExtractIncoming(ctx)
 	parentSpanContext, err := tracer.Extract(opentracing.HTTPHeaders, metadataTextMap(md))
 	if err != nil && err != opentracing.ErrSpanContextNotFound {
-		grpclog.Printf("grpc_opentracing: failed parsing trace information: %v", err)
+		grpclog.Infof("grpc_opentracing: failed parsing trace information: %v", err)
 	}
 
 	serverSpan := tracer.StartSpan(
-		fullMethodName,
+		opName,
 		// this is magical, it attaches the new span to the parent parentSpanContext, and creates an unparented one if empty.
 		ext.RPCServerOption(parentSpanContext),
 		grpcTag,
 	)
 
-	injectOpentracingIdsToTags(serverSpan, grpc_ctxtags.Extract(ctx))
+	injectOpentracingIdsToTags(traceHeaderName, serverSpan, grpc_ctxtags.Extract(ctx))
 	return opentracing.ContextWithSpan(ctx, serverSpan), serverSpan
 }
 
@@ -74,7 +86,6 @@
 		// Don't tag errors, log them instead.
 		if vErr, ok := v.(error); ok {
 			serverSpan.LogKV(k, vErr.Error())
-
 		} else {
 			serverSpan.SetTag(k, v)
 		}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go
index a277bee..1c60585 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go
@@ -4,9 +4,9 @@
 package metautils
 
 import (
+	"context"
 	"strings"
 
-	"golang.org/x/net/context"
 	"google.golang.org/grpc/metadata"
 )
 
@@ -49,7 +49,7 @@
 			found = true
 		} else {
 			for _, allowedKey := range copiedKeys {
-				if strings.ToLower(allowedKey) == strings.ToLower(k) {
+				if strings.EqualFold(allowedKey, k) {
 					found = true
 					break
 				}
@@ -83,7 +83,7 @@
 //
 // The function is binary-key safe.
 func (m NiceMD) Get(key string) string {
-	k, _ := encodeKeyValue(key, "")
+	k := strings.ToLower(key)
 	vv, ok := m[k]
 	if !ok {
 		return ""
@@ -98,7 +98,7 @@
 // The function is binary-key safe.
 
 func (m NiceMD) Del(key string) NiceMD {
-	k, _ := encodeKeyValue(key, "")
+	k := strings.ToLower(key)
 	delete(m, k)
 	return m
 }
@@ -109,8 +109,8 @@
 //
 // The function is binary-key safe.
 func (m NiceMD) Set(key string, value string) NiceMD {
-	k, v := encodeKeyValue(key, value)
-	m[k] = []string{v}
+	k := strings.ToLower(key)
+	m[k] = []string{value}
 	return m
 }
 
@@ -120,7 +120,7 @@
 //
 // The function is binary-key safe.
 func (m NiceMD) Add(key string, value string) NiceMD {
-	k, v := encodeKeyValue(key, value)
-	m[k] = append(m[k], v)
+	k := strings.ToLower(key)
+	m[k] = append(m[k], value)
 	return m
 }
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/single_key.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/single_key.go
deleted file mode 100644
index 8a53871..0000000
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/single_key.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2016 Michal Witkowski. All Rights Reserved.
-// See LICENSE for licensing terms.
-
-package metautils
-
-import (
-	"encoding/base64"
-	"strings"
-)
-
-const (
-	binHdrSuffix = "-bin"
-)
-
-func encodeKeyValue(k, v string) (string, string) {
-	k = strings.ToLower(k)
-	if strings.HasSuffix(k, binHdrSuffix) {
-		val := base64.StdEncoding.EncodeToString([]byte(v))
-		v = string(val)
-	}
-	return k, v
-}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go
index 597b862..05ccfb3 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go
@@ -4,7 +4,8 @@
 package grpc_middleware
 
 import (
-	"golang.org/x/net/context"
+	"context"
+
 	"google.golang.org/grpc"
 )
 
diff --git a/vendor/github.com/hashicorp/go-uuid/uuid.go b/vendor/github.com/hashicorp/go-uuid/uuid.go
index 911227f..0c10c4e 100644
--- a/vendor/github.com/hashicorp/go-uuid/uuid.go
+++ b/vendor/github.com/hashicorp/go-uuid/uuid.go
@@ -4,22 +4,40 @@
 	"crypto/rand"
 	"encoding/hex"
 	"fmt"
+	"io"
 )
 
 // GenerateRandomBytes is used to generate random bytes of given size.
 func GenerateRandomBytes(size int) ([]byte, error) {
+	return GenerateRandomBytesWithReader(size, rand.Reader)
+}
+
+// GenerateRandomBytesWithReader is used to generate random bytes of given size read from a given reader.
+func GenerateRandomBytesWithReader(size int, reader io.Reader) ([]byte, error) {
+	if reader == nil {
+		return nil, fmt.Errorf("provided reader is nil")
+	}
 	buf := make([]byte, size)
-	if _, err := rand.Read(buf); err != nil {
+	if _, err := io.ReadFull(reader, buf); err != nil {
 		return nil, fmt.Errorf("failed to read random bytes: %v", err)
 	}
 	return buf, nil
 }
 
+
 const uuidLen = 16
 
 // GenerateUUID is used to generate a random UUID
 func GenerateUUID() (string, error) {
-	buf, err := GenerateRandomBytes(uuidLen)
+	return GenerateUUIDWithReader(rand.Reader)
+}
+
+// GenerateUUIDWithReader is used to generate a random UUID with a given Reader
+func GenerateUUIDWithReader(reader io.Reader) (string, error) {
+	if reader == nil {
+		return "", fmt.Errorf("provided reader is nil")
+	}
+	buf, err := GenerateRandomBytesWithReader(uuidLen, reader)
 	if err != nil {
 		return "", err
 	}
diff --git a/vendor/github.com/jcmturner/aescts/v2/LICENSE b/vendor/github.com/jcmturner/aescts/v2/LICENSE
new file mode 100644
index 0000000..8dada3e
--- /dev/null
+++ b/vendor/github.com/jcmturner/aescts/v2/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/jcmturner/aescts/v2/aescts.go b/vendor/github.com/jcmturner/aescts/v2/aescts.go
new file mode 100644
index 0000000..fee3b43
--- /dev/null
+++ b/vendor/github.com/jcmturner/aescts/v2/aescts.go
@@ -0,0 +1,186 @@
+// Package aescts provides AES CBC CipherText Stealing encryption and decryption methods
+package aescts
+
+import (
+	"crypto/aes"
+	"crypto/cipher"
+	"errors"
+	"fmt"
+)
+
+// Encrypt the message with the key and the initial vector.
+// Returns: next iv, ciphertext bytes, error
+func Encrypt(key, iv, plaintext []byte) ([]byte, []byte, error) {
+	l := len(plaintext)
+
+	block, err := aes.NewCipher(key)
+	if err != nil {
+		return []byte{}, []byte{}, fmt.Errorf("error creating cipher: %v", err)
+	}
+	mode := cipher.NewCBCEncrypter(block, iv)
+
+	m := make([]byte, len(plaintext))
+	copy(m, plaintext)
+
+	/*For consistency, ciphertext stealing is always used for the last two
+	blocks of the data to be encrypted, as in [RC5].  If the data length
+	is a multiple of the block size, this is equivalent to plain CBC mode
+	with the last two ciphertext blocks swapped.*/
+	/*The initial vector carried out from one encryption for use in a
+	subsequent encryption is the next-to-last block of the encryption
+	output; this is the encrypted form of the last plaintext block.*/
+	if l <= aes.BlockSize {
+		m, _ = zeroPad(m, aes.BlockSize)
+		mode.CryptBlocks(m, m)
+		return m, m, nil
+	}
+	if l%aes.BlockSize == 0 {
+		mode.CryptBlocks(m, m)
+		iv = m[len(m)-aes.BlockSize:]
+		rb, _ := swapLastTwoBlocks(m, aes.BlockSize)
+		return iv, rb, nil
+	}
+	m, _ = zeroPad(m, aes.BlockSize)
+	rb, pb, lb, err := tailBlocks(m, aes.BlockSize)
+	if err != nil {
+		return []byte{}, []byte{}, fmt.Errorf("error tailing blocks: %v", err)
+	}
+	var ct []byte
+	if rb != nil {
+		// Encrpt all but the lats 2 blocks and update the rolling iv
+		mode.CryptBlocks(rb, rb)
+		iv = rb[len(rb)-aes.BlockSize:]
+		mode = cipher.NewCBCEncrypter(block, iv)
+		ct = append(ct, rb...)
+	}
+	mode.CryptBlocks(pb, pb)
+	mode = cipher.NewCBCEncrypter(block, pb)
+	mode.CryptBlocks(lb, lb)
+	// Cipher Text Stealing (CTS) - Ref: https://en.wikipedia.org/wiki/Ciphertext_stealing#CBC_ciphertext_stealing
+	// Swap the last two cipher blocks
+	// Truncate the ciphertext to the length of the original plaintext
+	ct = append(ct, lb...)
+	ct = append(ct, pb...)
+	return lb, ct[:l], nil
+}
+
+// Decrypt the ciphertext with the key and the initial vector.
+func Decrypt(key, iv, ciphertext []byte) ([]byte, error) {
+	// Copy the cipher text as golang slices even when passed by value to this method can result in the backing arrays of the calling code value being updated.
+	ct := make([]byte, len(ciphertext))
+	copy(ct, ciphertext)
+	if len(ct) < aes.BlockSize {
+		return []byte{}, fmt.Errorf("ciphertext is not large enough. It is less that one block size. Blocksize:%v; Ciphertext:%v", aes.BlockSize, len(ct))
+	}
+	// Configure the CBC
+	block, err := aes.NewCipher(key)
+	if err != nil {
+		return nil, fmt.Errorf("error creating cipher: %v", err)
+	}
+	var mode cipher.BlockMode
+
+	//If ciphertext is multiple of blocksize we just need to swap back the last two blocks and then do CBC
+	//If the ciphertext is just one block we can't swap so we just decrypt
+	if len(ct)%aes.BlockSize == 0 {
+		if len(ct) > aes.BlockSize {
+			ct, _ = swapLastTwoBlocks(ct, aes.BlockSize)
+		}
+		mode = cipher.NewCBCDecrypter(block, iv)
+		message := make([]byte, len(ct))
+		mode.CryptBlocks(message, ct)
+		return message[:len(ct)], nil
+	}
+
+	// Cipher Text Stealing (CTS) using CBC interface. Ref: https://en.wikipedia.org/wiki/Ciphertext_stealing#CBC_ciphertext_stealing
+	// Get ciphertext of the 2nd to last (penultimate) block (cpb), the last block (clb) and the rest (crb)
+	crb, cpb, clb, _ := tailBlocks(ct, aes.BlockSize)
+	v := make([]byte, len(iv), len(iv))
+	copy(v, iv)
+	var message []byte
+	if crb != nil {
+		//If there is more than just the last and the penultimate block we decrypt it and the last bloc of this becomes the iv for later
+		rb := make([]byte, len(crb))
+		mode = cipher.NewCBCDecrypter(block, v)
+		v = crb[len(crb)-aes.BlockSize:]
+		mode.CryptBlocks(rb, crb)
+		message = append(message, rb...)
+	}
+
+	// We need to modify the cipher text
+	// Decryt the 2nd to last (penultimate) block with a the original iv
+	pb := make([]byte, aes.BlockSize)
+	mode = cipher.NewCBCDecrypter(block, iv)
+	mode.CryptBlocks(pb, cpb)
+	// number of byte needed to pad
+	npb := aes.BlockSize - len(ct)%aes.BlockSize
+	//pad last block using the number of bytes needed from the tail of the plaintext 2nd to last (penultimate) block
+	clb = append(clb, pb[len(pb)-npb:]...)
+
+	// Now decrypt the last block in the penultimate position (iv will be from the crb, if the is no crb it's zeros)
+	// iv for the penultimate block decrypted in the last position becomes the modified last block
+	lb := make([]byte, aes.BlockSize)
+	mode = cipher.NewCBCDecrypter(block, v)
+	v = clb
+	mode.CryptBlocks(lb, clb)
+	message = append(message, lb...)
+
+	// Now decrypt the penultimate block in the last position (iv will be from the modified last block)
+	mode = cipher.NewCBCDecrypter(block, v)
+	mode.CryptBlocks(cpb, cpb)
+	message = append(message, cpb...)
+
+	// Truncate to the size of the original cipher text
+	return message[:len(ct)], nil
+}
+
+func tailBlocks(b []byte, c int) ([]byte, []byte, []byte, error) {
+	if len(b) <= c {
+		return []byte{}, []byte{}, []byte{}, errors.New("bytes slice is not larger than one block so cannot tail")
+	}
+	// Get size of last block
+	var lbs int
+	if l := len(b) % aes.BlockSize; l == 0 {
+		lbs = aes.BlockSize
+	} else {
+		lbs = l
+	}
+	// Get last block
+	lb := b[len(b)-lbs:]
+	// Get 2nd to last (penultimate) block
+	pb := b[len(b)-lbs-c : len(b)-lbs]
+	if len(b) > 2*c {
+		rb := b[:len(b)-lbs-c]
+		return rb, pb, lb, nil
+	}
+	return nil, pb, lb, nil
+}
+
+func swapLastTwoBlocks(b []byte, c int) ([]byte, error) {
+	rb, pb, lb, err := tailBlocks(b, c)
+	if err != nil {
+		return nil, err
+	}
+	var out []byte
+	if rb != nil {
+		out = append(out, rb...)
+	}
+	out = append(out, lb...)
+	out = append(out, pb...)
+	return out, nil
+}
+
+// zeroPad pads bytes with zeros to nearest multiple of message size m.
+func zeroPad(b []byte, m int) ([]byte, error) {
+	if m <= 0 {
+		return nil, errors.New("invalid message block size when padding")
+	}
+	if b == nil || len(b) == 0 {
+		return nil, errors.New("data not valid to pad: Zero size")
+	}
+	if l := len(b) % m; l != 0 {
+		n := m - l
+		z := make([]byte, n)
+		b = append(b, z...)
+	}
+	return b, nil
+}
diff --git a/vendor/github.com/jcmturner/aescts/v2/go.mod b/vendor/github.com/jcmturner/aescts/v2/go.mod
new file mode 100644
index 0000000..034c3ce
--- /dev/null
+++ b/vendor/github.com/jcmturner/aescts/v2/go.mod
@@ -0,0 +1,5 @@
+module github.com/jcmturner/aescts/v2
+
+go 1.13
+
+require github.com/stretchr/testify v1.4.0
diff --git a/vendor/github.com/jcmturner/aescts/v2/go.sum b/vendor/github.com/jcmturner/aescts/v2/go.sum
new file mode 100644
index 0000000..e863f51
--- /dev/null
+++ b/vendor/github.com/jcmturner/aescts/v2/go.sum
@@ -0,0 +1,10 @@
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/jcmturner/dnsutils/v2/LICENSE b/vendor/github.com/jcmturner/dnsutils/v2/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/jcmturner/dnsutils/v2/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/jcmturner/dnsutils/v2/go.mod b/vendor/github.com/jcmturner/dnsutils/v2/go.mod
new file mode 100644
index 0000000..f75ac6d
--- /dev/null
+++ b/vendor/github.com/jcmturner/dnsutils/v2/go.mod
@@ -0,0 +1,5 @@
+module github.com/jcmturner/dnsutils/v2
+
+go 1.13
+
+require github.com/stretchr/testify v1.4.0
diff --git a/vendor/github.com/jcmturner/dnsutils/v2/go.sum b/vendor/github.com/jcmturner/dnsutils/v2/go.sum
new file mode 100644
index 0000000..e863f51
--- /dev/null
+++ b/vendor/github.com/jcmturner/dnsutils/v2/go.sum
@@ -0,0 +1,10 @@
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/jcmturner/dnsutils/v2/srv.go b/vendor/github.com/jcmturner/dnsutils/v2/srv.go
new file mode 100644
index 0000000..15ea912
--- /dev/null
+++ b/vendor/github.com/jcmturner/dnsutils/v2/srv.go
@@ -0,0 +1,95 @@
+package dnsutils
+
+import (
+	"math/rand"
+	"net"
+	"sort"
+)
+
+// OrderedSRV returns a count of the results and a map keyed on the order they should be used.
+// This based on the records' priority and randomised selection based on their relative weighting.
+// The function's inputs are the same as those for net.LookupSRV
+// To use in the correct order:
+//
+// count, orderedSRV, err := OrderedSRV(service, proto, name)
+// i := 1
+// for  i <= count {
+//   srv := orderedSRV[i]
+//   // Do something such as dial this SRV. If fails move on the the next or break if it succeeds.
+//   i += 1
+// }
+func OrderedSRV(service, proto, name string) (int, map[int]*net.SRV, error) {
+	_, addrs, err := net.LookupSRV(service, proto, name)
+	if err != nil {
+		return 0, make(map[int]*net.SRV), err
+	}
+	index, osrv := orderSRV(addrs)
+	return index, osrv, nil
+}
+
+func orderSRV(addrs []*net.SRV) (int, map[int]*net.SRV) {
+	// Initialise the ordered map
+	var o int
+	osrv := make(map[int]*net.SRV)
+
+	prioMap := make(map[int][]*net.SRV, 0)
+	for _, srv := range addrs {
+		prioMap[int(srv.Priority)] = append(prioMap[int(srv.Priority)], srv)
+	}
+
+	priorities := make([]int, 0)
+	for p := range prioMap {
+		priorities = append(priorities, p)
+	}
+
+	var count int
+	sort.Ints(priorities)
+	for _, p := range priorities {
+		tos := weightedOrder(prioMap[p])
+		for i, s := range tos {
+			count += 1
+			osrv[o+i] = s
+		}
+		o += len(tos)
+	}
+	return count, osrv
+}
+
+func weightedOrder(srvs []*net.SRV) map[int]*net.SRV {
+	// Get the total weight
+	var tw int
+	for _, s := range srvs {
+		tw += int(s.Weight)
+	}
+
+	// Initialise the ordered map
+	o := 1
+	osrv := make(map[int]*net.SRV)
+
+	// Whilst there are still entries to be ordered
+	l := len(srvs)
+	for l > 0 {
+		i := rand.Intn(l)
+		s := srvs[i]
+		var rw int
+		if tw > 0 {
+			// Greater the weight the more likely this will be zero or less
+			rw = rand.Intn(tw) - int(s.Weight)
+		}
+		if rw <= 0 {
+			// Put entry in position
+			osrv[o] = s
+			if len(srvs) > 1 {
+				// Remove the entry from the source slice by swapping with the last entry and truncating
+				srvs[len(srvs)-1], srvs[i] = srvs[i], srvs[len(srvs)-1]
+				srvs = srvs[:len(srvs)-1]
+				l = len(srvs)
+			} else {
+				l = 0
+			}
+			o += 1
+			tw = tw - int(s.Weight)
+		}
+	}
+	return osrv
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/LICENSE b/vendor/github.com/jcmturner/gokrb5/v8/LICENSE
new file mode 100644
index 0000000..8dada3e
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/asn1tools/tools.go b/vendor/github.com/jcmturner/gokrb5/v8/asn1tools/tools.go
new file mode 100644
index 0000000..f27740b
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/asn1tools/tools.go
@@ -0,0 +1,86 @@
+// Package asn1tools provides tools for managing ASN1 marshaled data.
+package asn1tools
+
+import (
+	"github.com/jcmturner/gofork/encoding/asn1"
+)
+
+// MarshalLengthBytes returns the ASN1 encoded bytes for the length 'l'
+//
+// There are two forms: short (for lengths between 0 and 127), and long definite (for lengths between 0 and 2^1008 -1).
+//
+// Short form: One octet. Bit 8 has value "0" and bits 7-1 give the length.
+//
+// Long form: Two to 127 octets. Bit 8 of first octet has value "1" and bits 7-1 give the number of additional length octets. Second and following octets give the length, base 256, most significant digit first.
+func MarshalLengthBytes(l int) []byte {
+	if l <= 127 {
+		return []byte{byte(l)}
+	}
+	var b []byte
+	p := 1
+	for i := 1; i < 127; {
+		b = append([]byte{byte((l % (p * 256)) / p)}, b...)
+		p = p * 256
+		l = l - l%p
+		if l <= 0 {
+			break
+		}
+	}
+	return append([]byte{byte(128 + len(b))}, b...)
+}
+
+// GetLengthFromASN returns the length of a slice of ASN1 encoded bytes from the ASN1 length header it contains.
+func GetLengthFromASN(b []byte) int {
+	if int(b[1]) <= 127 {
+		return int(b[1])
+	}
+	// The bytes that indicate the length
+	lb := b[2 : 2+int(b[1])-128]
+	base := 1
+	l := 0
+	for i := len(lb) - 1; i >= 0; i-- {
+		l += int(lb[i]) * base
+		base = base * 256
+	}
+	return l
+}
+
+// GetNumberBytesInLengthHeader returns the number of bytes in the ASn1 header that indicate the length.
+func GetNumberBytesInLengthHeader(b []byte) int {
+	if int(b[1]) <= 127 {
+		return 1
+	}
+	// The bytes that indicate the length
+	return 1 + int(b[1]) - 128
+}
+
+// AddASNAppTag adds an ASN1 encoding application tag value to the raw bytes provided.
+func AddASNAppTag(b []byte, tag int) []byte {
+	r := asn1.RawValue{
+		Class:      asn1.ClassApplication,
+		IsCompound: true,
+		Tag:        tag,
+		Bytes:      b,
+	}
+	ab, _ := asn1.Marshal(r)
+	return ab
+}
+
+/*
+// The Marshal method of golang's asn1 package does not enable you to define wrapping the output in an application tag.
+// This method adds that wrapping tag.
+func AddASNAppTag(b []byte, tag int) []byte {
+	// The ASN1 wrapping consists of 2 bytes:
+	// 1st byte -> Identifier Octet - Application Tag
+	// 2nd byte -> The length (this will be the size indicated in the input bytes + 2 for the additional bytes we add here.
+	// Application Tag:
+	//| Bit:        | 8                            | 7                          | 6                                         | 5 | 4 | 3 | 2 | 1             |
+	//| Value:      | 0                            | 1                          | 1                                         | From the RFC spec 4120        |
+	//| Explanation | Defined by the ASN1 encoding rules for an application tag | A value of 1 indicates a constructed type | The ASN Application tag value |
+	// Therefore the value of the byte is an integer = ( Application tag value + 96 )
+	//b = append(MarshalLengthBytes(int(b[1])+2), b...)
+	b = append(MarshalLengthBytes(len(b)), b...)
+	b = append([]byte{byte(96 + tag)}, b...)
+	return b
+}
+*/
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/ASExchange.go b/vendor/github.com/jcmturner/gokrb5/v8/client/ASExchange.go
new file mode 100644
index 0000000..5becccc
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/client/ASExchange.go
@@ -0,0 +1,182 @@
+package client
+
+import (
+	"github.com/jcmturner/gokrb5/v8/crypto"
+	"github.com/jcmturner/gokrb5/v8/crypto/etype"
+	"github.com/jcmturner/gokrb5/v8/iana/errorcode"
+	"github.com/jcmturner/gokrb5/v8/iana/keyusage"
+	"github.com/jcmturner/gokrb5/v8/iana/patype"
+	"github.com/jcmturner/gokrb5/v8/krberror"
+	"github.com/jcmturner/gokrb5/v8/messages"
+	"github.com/jcmturner/gokrb5/v8/types"
+)
+
+// ASExchange performs an AS exchange for the client to retrieve a TGT.
+func (cl *Client) ASExchange(realm string, ASReq messages.ASReq, referral int) (messages.ASRep, error) {
+	if ok, err := cl.IsConfigured(); !ok {
+		return messages.ASRep{}, krberror.Errorf(err, krberror.ConfigError, "AS Exchange cannot be performed")
+	}
+
+	// Set PAData if required
+	err := setPAData(cl, nil, &ASReq)
+	if err != nil {
+		return messages.ASRep{}, krberror.Errorf(err, krberror.KRBMsgError, "AS Exchange Error: issue with setting PAData on AS_REQ")
+	}
+
+	b, err := ASReq.Marshal()
+	if err != nil {
+		return messages.ASRep{}, krberror.Errorf(err, krberror.EncodingError, "AS Exchange Error: failed marshaling AS_REQ")
+	}
+	var ASRep messages.ASRep
+
+	rb, err := cl.sendToKDC(b, realm)
+	if err != nil {
+		if e, ok := err.(messages.KRBError); ok {
+			switch e.ErrorCode {
+			case errorcode.KDC_ERR_PREAUTH_REQUIRED, errorcode.KDC_ERR_PREAUTH_FAILED:
+				// From now on assume this client will need to do this pre-auth and set the PAData
+				cl.settings.assumePreAuthentication = true
+				err = setPAData(cl, &e, &ASReq)
+				if err != nil {
+					return messages.ASRep{}, krberror.Errorf(err, krberror.KRBMsgError, "AS Exchange Error: failed setting AS_REQ PAData for pre-authentication required")
+				}
+				b, err := ASReq.Marshal()
+				if err != nil {
+					return messages.ASRep{}, krberror.Errorf(err, krberror.EncodingError, "AS Exchange Error: failed marshaling AS_REQ with PAData")
+				}
+				rb, err = cl.sendToKDC(b, realm)
+				if err != nil {
+					if _, ok := err.(messages.KRBError); ok {
+						return messages.ASRep{}, krberror.Errorf(err, krberror.KDCError, "AS Exchange Error: kerberos error response from KDC")
+					}
+					return messages.ASRep{}, krberror.Errorf(err, krberror.NetworkingError, "AS Exchange Error: failed sending AS_REQ to KDC")
+				}
+			case errorcode.KDC_ERR_WRONG_REALM:
+				// Client referral https://tools.ietf.org/html/rfc6806.html#section-7
+				if referral > 5 {
+					return messages.ASRep{}, krberror.Errorf(err, krberror.KRBMsgError, "maximum number of client referrals exceeded")
+				}
+				referral++
+				return cl.ASExchange(e.CRealm, ASReq, referral)
+			default:
+				return messages.ASRep{}, krberror.Errorf(err, krberror.KDCError, "AS Exchange Error: kerberos error response from KDC")
+			}
+		} else {
+			return messages.ASRep{}, krberror.Errorf(err, krberror.NetworkingError, "AS Exchange Error: failed sending AS_REQ to KDC")
+		}
+	}
+	err = ASRep.Unmarshal(rb)
+	if err != nil {
+		return messages.ASRep{}, krberror.Errorf(err, krberror.EncodingError, "AS Exchange Error: failed to process the AS_REP")
+	}
+	if ok, err := ASRep.Verify(cl.Config, cl.Credentials, ASReq); !ok {
+		return messages.ASRep{}, krberror.Errorf(err, krberror.KRBMsgError, "AS Exchange Error: AS_REP is not valid or client password/keytab incorrect")
+	}
+	return ASRep, nil
+}
+
+// setPAData adds pre-authentication data to the AS_REQ.
+func setPAData(cl *Client, krberr *messages.KRBError, ASReq *messages.ASReq) error {
+	if !cl.settings.DisablePAFXFAST() {
+		pa := types.PAData{PADataType: patype.PA_REQ_ENC_PA_REP}
+		ASReq.PAData = append(ASReq.PAData, pa)
+	}
+	if cl.settings.AssumePreAuthentication() {
+		// Identify the etype to use to encrypt the PA Data
+		var et etype.EType
+		var err error
+		var key types.EncryptionKey
+		var kvno int
+		if krberr == nil {
+			// This is not in response to an error from the KDC. It is preemptive or renewal
+			// There is no KRB Error that tells us the etype to use
+			etn := cl.settings.preAuthEType // Use the etype that may have previously been negotiated
+			if etn == 0 {
+				etn = int32(cl.Config.LibDefaults.PreferredPreauthTypes[0]) // Resort to config
+			}
+			et, err = crypto.GetEtype(etn)
+			if err != nil {
+				return krberror.Errorf(err, krberror.EncryptingError, "error getting etype for pre-auth encryption")
+			}
+			key, kvno, err = cl.Key(et, 0, nil)
+			if err != nil {
+				return krberror.Errorf(err, krberror.EncryptingError, "error getting key from credentials")
+			}
+		} else {
+			// Get the etype to use from the PA data in the KRBError e-data
+			et, err = preAuthEType(krberr)
+			if err != nil {
+				return krberror.Errorf(err, krberror.EncryptingError, "error getting etype for pre-auth encryption")
+			}
+			cl.settings.preAuthEType = et.GetETypeID() // Set the etype that has been defined for potential future use
+			key, kvno, err = cl.Key(et, 0, krberr)
+			if err != nil {
+				return krberror.Errorf(err, krberror.EncryptingError, "error getting key from credentials")
+			}
+		}
+		// Generate the PA data
+		paTSb, err := types.GetPAEncTSEncAsnMarshalled()
+		if err != nil {
+			return krberror.Errorf(err, krberror.KRBMsgError, "error creating PAEncTSEnc for Pre-Authentication")
+		}
+		paEncTS, err := crypto.GetEncryptedData(paTSb, key, keyusage.AS_REQ_PA_ENC_TIMESTAMP, kvno)
+		if err != nil {
+			return krberror.Errorf(err, krberror.EncryptingError, "error encrypting pre-authentication timestamp")
+		}
+		pb, err := paEncTS.Marshal()
+		if err != nil {
+			return krberror.Errorf(err, krberror.EncodingError, "error marshaling the PAEncTSEnc encrypted data")
+		}
+		pa := types.PAData{
+			PADataType:  patype.PA_ENC_TIMESTAMP,
+			PADataValue: pb,
+		}
+		// Look for and delete any exiting patype.PA_ENC_TIMESTAMP
+		for i, pa := range ASReq.PAData {
+			if pa.PADataType == patype.PA_ENC_TIMESTAMP {
+				ASReq.PAData[i] = ASReq.PAData[len(ASReq.PAData)-1]
+				ASReq.PAData = ASReq.PAData[:len(ASReq.PAData)-1]
+			}
+		}
+		ASReq.PAData = append(ASReq.PAData, pa)
+	}
+	return nil
+}
+
+// preAuthEType establishes what encryption type to use for pre-authentication from the KRBError returned from the KDC.
+func preAuthEType(krberr *messages.KRBError) (etype etype.EType, err error) {
+	//RFC 4120 5.2.7.5 covers the preference order of ETYPE-INFO2 and ETYPE-INFO.
+	var etypeID int32
+	var pas types.PADataSequence
+	e := pas.Unmarshal(krberr.EData)
+	if e != nil {
+		err = krberror.Errorf(e, krberror.EncodingError, "error unmashalling KRBError data")
+		return
+	}
+Loop:
+	for _, pa := range pas {
+		switch pa.PADataType {
+		case patype.PA_ETYPE_INFO2:
+			info, e := pa.GetETypeInfo2()
+			if e != nil {
+				err = krberror.Errorf(e, krberror.EncodingError, "error unmashalling ETYPE-INFO2 data")
+				return
+			}
+			etypeID = info[0].EType
+			break Loop
+		case patype.PA_ETYPE_INFO:
+			info, e := pa.GetETypeInfo()
+			if e != nil {
+				err = krberror.Errorf(e, krberror.EncodingError, "error unmashalling ETYPE-INFO data")
+				return
+			}
+			etypeID = info[0].EType
+		}
+	}
+	etype, e = crypto.GetEtype(etypeID)
+	if e != nil {
+		err = krberror.Errorf(e, krberror.EncryptingError, "error creating etype")
+		return
+	}
+	return etype, nil
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/TGSExchange.go b/vendor/github.com/jcmturner/gokrb5/v8/client/TGSExchange.go
new file mode 100644
index 0000000..e4571ce
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/client/TGSExchange.go
@@ -0,0 +1,103 @@
+package client
+
+import (
+	"github.com/jcmturner/gokrb5/v8/iana/flags"
+	"github.com/jcmturner/gokrb5/v8/iana/nametype"
+	"github.com/jcmturner/gokrb5/v8/krberror"
+	"github.com/jcmturner/gokrb5/v8/messages"
+	"github.com/jcmturner/gokrb5/v8/types"
+)
+
+// TGSREQGenerateAndExchange generates the TGS_REQ and performs a TGS exchange to retrieve a ticket to the specified SPN.
+func (cl *Client) TGSREQGenerateAndExchange(spn types.PrincipalName, kdcRealm string, tgt messages.Ticket, sessionKey types.EncryptionKey, renewal bool) (tgsReq messages.TGSReq, tgsRep messages.TGSRep, err error) {
+	tgsReq, err = messages.NewTGSReq(cl.Credentials.CName(), kdcRealm, cl.Config, tgt, sessionKey, spn, renewal)
+	if err != nil {
+		return tgsReq, tgsRep, krberror.Errorf(err, krberror.KRBMsgError, "TGS Exchange Error: failed to generate a new TGS_REQ")
+	}
+	return cl.TGSExchange(tgsReq, kdcRealm, tgsRep.Ticket, sessionKey, 0)
+}
+
+// TGSExchange exchanges the provided TGS_REQ with the KDC to retrieve a TGS_REP.
+// Referrals are automatically handled.
+// The client's cache is updated with the ticket received.
+func (cl *Client) TGSExchange(tgsReq messages.TGSReq, kdcRealm string, tgt messages.Ticket, sessionKey types.EncryptionKey, referral int) (messages.TGSReq, messages.TGSRep, error) {
+	var tgsRep messages.TGSRep
+	b, err := tgsReq.Marshal()
+	if err != nil {
+		return tgsReq, tgsRep, krberror.Errorf(err, krberror.EncodingError, "TGS Exchange Error: failed to marshal TGS_REQ")
+	}
+	r, err := cl.sendToKDC(b, kdcRealm)
+	if err != nil {
+		if _, ok := err.(messages.KRBError); ok {
+			return tgsReq, tgsRep, krberror.Errorf(err, krberror.KDCError, "TGS Exchange Error: kerberos error response from KDC when requesting for %s", tgsReq.ReqBody.SName.PrincipalNameString())
+		}
+		return tgsReq, tgsRep, krberror.Errorf(err, krberror.NetworkingError, "TGS Exchange Error: issue sending TGS_REQ to KDC")
+	}
+	err = tgsRep.Unmarshal(r)
+	if err != nil {
+		return tgsReq, tgsRep, krberror.Errorf(err, krberror.EncodingError, "TGS Exchange Error: failed to process the TGS_REP")
+	}
+	err = tgsRep.DecryptEncPart(sessionKey)
+	if err != nil {
+		return tgsReq, tgsRep, krberror.Errorf(err, krberror.EncodingError, "TGS Exchange Error: failed to process the TGS_REP")
+	}
+	if ok, err := tgsRep.Verify(cl.Config, tgsReq); !ok {
+		return tgsReq, tgsRep, krberror.Errorf(err, krberror.EncodingError, "TGS Exchange Error: TGS_REP is not valid")
+	}
+
+	if tgsRep.Ticket.SName.NameString[0] == "krbtgt" && !tgsRep.Ticket.SName.Equal(tgsReq.ReqBody.SName) {
+		if referral > 5 {
+			return tgsReq, tgsRep, krberror.Errorf(err, krberror.KRBMsgError, "TGS Exchange Error: maximum number of referrals exceeded")
+		}
+		// Server referral https://tools.ietf.org/html/rfc6806.html#section-8
+		// The TGS Rep contains a TGT for another domain as the service resides in that domain.
+		cl.addSession(tgsRep.Ticket, tgsRep.DecryptedEncPart)
+		realm := tgsRep.Ticket.SName.NameString[len(tgsRep.Ticket.SName.NameString)-1]
+		referral++
+		if types.IsFlagSet(&tgsReq.ReqBody.KDCOptions, flags.EncTktInSkey) && len(tgsReq.ReqBody.AdditionalTickets) > 0 {
+			tgsReq, err = messages.NewUser2UserTGSReq(cl.Credentials.CName(), kdcRealm, cl.Config, tgt, sessionKey, tgsReq.ReqBody.SName, tgsReq.Renewal, tgsReq.ReqBody.AdditionalTickets[0])
+			if err != nil {
+				return tgsReq, tgsRep, err
+			}
+		}
+		tgsReq, err = messages.NewTGSReq(cl.Credentials.CName(), realm, cl.Config, tgsRep.Ticket, tgsRep.DecryptedEncPart.Key, tgsReq.ReqBody.SName, tgsReq.Renewal)
+		if err != nil {
+			return tgsReq, tgsRep, err
+		}
+		return cl.TGSExchange(tgsReq, realm, tgsRep.Ticket, tgsRep.DecryptedEncPart.Key, referral)
+	}
+	cl.cache.addEntry(
+		tgsRep.Ticket,
+		tgsRep.DecryptedEncPart.AuthTime,
+		tgsRep.DecryptedEncPart.StartTime,
+		tgsRep.DecryptedEncPart.EndTime,
+		tgsRep.DecryptedEncPart.RenewTill,
+		tgsRep.DecryptedEncPart.Key,
+	)
+	cl.Log("ticket added to cache for %s (EndTime: %v)", tgsRep.Ticket.SName.PrincipalNameString(), tgsRep.DecryptedEncPart.EndTime)
+	return tgsReq, tgsRep, err
+}
+
+// GetServiceTicket makes a request to get a service ticket for the SPN specified
+// SPN format: <SERVICE>/<FQDN> Eg. HTTP/www.example.com
+// The ticket will be added to the client's ticket cache
+func (cl *Client) GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error) {
+	var tkt messages.Ticket
+	var skey types.EncryptionKey
+	if tkt, skey, ok := cl.GetCachedTicket(spn); ok {
+		// Already a valid ticket in the cache
+		return tkt, skey, nil
+	}
+	princ := types.NewPrincipalName(nametype.KRB_NT_PRINCIPAL, spn)
+	realm := cl.Config.ResolveRealm(princ.NameString[len(princ.NameString)-1])
+
+	tgt, skey, err := cl.sessionTGT(realm)
+	if err != nil {
+		return tkt, skey, err
+	}
+	_, tgsRep, err := cl.TGSREQGenerateAndExchange(princ, realm, tgt, skey, false)
+	if err != nil {
+		return tkt, skey, err
+	}
+	return tgsRep.Ticket, tgsRep.DecryptedEncPart.Key, nil
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/cache.go b/vendor/github.com/jcmturner/gokrb5/v8/client/cache.go
new file mode 100644
index 0000000..552e73e
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/client/cache.go
@@ -0,0 +1,134 @@
+package client
+
+import (
+	"encoding/json"
+	"errors"
+	"sort"
+	"sync"
+	"time"
+
+	"github.com/jcmturner/gokrb5/v8/messages"
+	"github.com/jcmturner/gokrb5/v8/types"
+)
+
+// Cache for service tickets held by the client.
+type Cache struct {
+	Entries map[string]CacheEntry
+	mux     sync.RWMutex
+}
+
+// CacheEntry holds details for a cache entry.
+type CacheEntry struct {
+	SPN        string
+	Ticket     messages.Ticket `json:"-"`
+	AuthTime   time.Time
+	StartTime  time.Time
+	EndTime    time.Time
+	RenewTill  time.Time
+	SessionKey types.EncryptionKey `json:"-"`
+}
+
+// NewCache creates a new client ticket cache instance.
+func NewCache() *Cache {
+	return &Cache{
+		Entries: map[string]CacheEntry{},
+	}
+}
+
+// getEntry returns a cache entry that matches the SPN.
+func (c *Cache) getEntry(spn string) (CacheEntry, bool) {
+	c.mux.RLock()
+	defer c.mux.RUnlock()
+	e, ok := (*c).Entries[spn]
+	return e, ok
+}
+
+// JSON returns information about the cached service tickets in a JSON format.
+func (c *Cache) JSON() (string, error) {
+	c.mux.RLock()
+	defer c.mux.RUnlock()
+	var es []CacheEntry
+	keys := make([]string, 0, len(c.Entries))
+	for k := range c.Entries {
+		keys = append(keys, k)
+	}
+	sort.Strings(keys)
+	for _, k := range keys {
+		es = append(es, c.Entries[k])
+	}
+	b, err := json.MarshalIndent(&es, "", "  ")
+	if err != nil {
+		return "", err
+	}
+	return string(b), nil
+}
+
+// addEntry adds a ticket to the cache.
+func (c *Cache) addEntry(tkt messages.Ticket, authTime, startTime, endTime, renewTill time.Time, sessionKey types.EncryptionKey) CacheEntry {
+	spn := tkt.SName.PrincipalNameString()
+	c.mux.Lock()
+	defer c.mux.Unlock()
+	(*c).Entries[spn] = CacheEntry{
+		SPN:        spn,
+		Ticket:     tkt,
+		AuthTime:   authTime,
+		StartTime:  startTime,
+		EndTime:    endTime,
+		RenewTill:  renewTill,
+		SessionKey: sessionKey,
+	}
+	return c.Entries[spn]
+}
+
+// clear deletes all the cache entries
+func (c *Cache) clear() {
+	c.mux.Lock()
+	defer c.mux.Unlock()
+	for k := range c.Entries {
+		delete(c.Entries, k)
+	}
+}
+
+// RemoveEntry removes the cache entry for the defined SPN.
+func (c *Cache) RemoveEntry(spn string) {
+	c.mux.Lock()
+	defer c.mux.Unlock()
+	delete(c.Entries, spn)
+}
+
+// GetCachedTicket returns a ticket from the cache for the SPN.
+// Only a ticket that is currently valid will be returned.
+func (cl *Client) GetCachedTicket(spn string) (messages.Ticket, types.EncryptionKey, bool) {
+	if e, ok := cl.cache.getEntry(spn); ok {
+		//If within time window of ticket return it
+		if time.Now().UTC().After(e.StartTime) && time.Now().UTC().Before(e.EndTime) {
+			cl.Log("ticket received from cache for %s", spn)
+			return e.Ticket, e.SessionKey, true
+		} else if time.Now().UTC().Before(e.RenewTill) {
+			e, err := cl.renewTicket(e)
+			if err != nil {
+				return e.Ticket, e.SessionKey, false
+			}
+			return e.Ticket, e.SessionKey, true
+		}
+	}
+	var tkt messages.Ticket
+	var key types.EncryptionKey
+	return tkt, key, false
+}
+
+// renewTicket renews a cache entry ticket.
+// To renew from outside the client package use GetCachedTicket
+func (cl *Client) renewTicket(e CacheEntry) (CacheEntry, error) {
+	spn := e.Ticket.SName
+	_, _, err := cl.TGSREQGenerateAndExchange(spn, e.Ticket.Realm, e.Ticket, e.SessionKey, true)
+	if err != nil {
+		return e, err
+	}
+	e, ok := cl.cache.getEntry(e.Ticket.SName.PrincipalNameString())
+	if !ok {
+		return e, errors.New("ticket was not added to cache")
+	}
+	cl.Log("ticket renewed for %s (EndTime: %v)", spn.PrincipalNameString(), e.EndTime)
+	return e, nil
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/client.go b/vendor/github.com/jcmturner/gokrb5/v8/client/client.go
new file mode 100644
index 0000000..074e3f1
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/client/client.go
@@ -0,0 +1,329 @@
+// Package client provides a client library and methods for Kerberos 5 authentication.
+package client
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"strings"
+	"time"
+
+	"github.com/jcmturner/gokrb5/v8/config"
+	"github.com/jcmturner/gokrb5/v8/credentials"
+	"github.com/jcmturner/gokrb5/v8/crypto"
+	"github.com/jcmturner/gokrb5/v8/crypto/etype"
+	"github.com/jcmturner/gokrb5/v8/iana/errorcode"
+	"github.com/jcmturner/gokrb5/v8/iana/nametype"
+	"github.com/jcmturner/gokrb5/v8/keytab"
+	"github.com/jcmturner/gokrb5/v8/krberror"
+	"github.com/jcmturner/gokrb5/v8/messages"
+	"github.com/jcmturner/gokrb5/v8/types"
+)
+
+// Client side configuration and state.
+type Client struct {
+	Credentials *credentials.Credentials
+	Config      *config.Config
+	settings    *Settings
+	sessions    *sessions
+	cache       *Cache
+}
+
+// NewWithPassword creates a new client from a password credential.
+// Set the realm to empty string to use the default realm from config.
+func NewWithPassword(username, realm, password string, krb5conf *config.Config, settings ...func(*Settings)) *Client {
+	creds := credentials.New(username, realm)
+	return &Client{
+		Credentials: creds.WithPassword(password),
+		Config:      krb5conf,
+		settings:    NewSettings(settings...),
+		sessions: &sessions{
+			Entries: make(map[string]*session),
+		},
+		cache: NewCache(),
+	}
+}
+
+// NewWithKeytab creates a new client from a keytab credential.
+func NewWithKeytab(username, realm string, kt *keytab.Keytab, krb5conf *config.Config, settings ...func(*Settings)) *Client {
+	creds := credentials.New(username, realm)
+	return &Client{
+		Credentials: creds.WithKeytab(kt),
+		Config:      krb5conf,
+		settings:    NewSettings(settings...),
+		sessions: &sessions{
+			Entries: make(map[string]*session),
+		},
+		cache: NewCache(),
+	}
+}
+
+// NewFromCCache create a client from a populated client cache.
+//
+// WARNING: A client created from CCache does not automatically renew TGTs and a failure will occur after the TGT expires.
+func NewFromCCache(c *credentials.CCache, krb5conf *config.Config, settings ...func(*Settings)) (*Client, error) {
+	cl := &Client{
+		Credentials: c.GetClientCredentials(),
+		Config:      krb5conf,
+		settings:    NewSettings(settings...),
+		sessions: &sessions{
+			Entries: make(map[string]*session),
+		},
+		cache: NewCache(),
+	}
+	spn := types.PrincipalName{
+		NameType:   nametype.KRB_NT_SRV_INST,
+		NameString: []string{"krbtgt", c.DefaultPrincipal.Realm},
+	}
+	cred, ok := c.GetEntry(spn)
+	if !ok {
+		return cl, errors.New("TGT not found in CCache")
+	}
+	var tgt messages.Ticket
+	err := tgt.Unmarshal(cred.Ticket)
+	if err != nil {
+		return cl, fmt.Errorf("TGT bytes in cache are not valid: %v", err)
+	}
+	cl.sessions.Entries[c.DefaultPrincipal.Realm] = &session{
+		realm:      c.DefaultPrincipal.Realm,
+		authTime:   cred.AuthTime,
+		endTime:    cred.EndTime,
+		renewTill:  cred.RenewTill,
+		tgt:        tgt,
+		sessionKey: cred.Key,
+	}
+	for _, cred := range c.GetEntries() {
+		var tkt messages.Ticket
+		err = tkt.Unmarshal(cred.Ticket)
+		if err != nil {
+			return cl, fmt.Errorf("cache entry ticket bytes are not valid: %v", err)
+		}
+		cl.cache.addEntry(
+			tkt,
+			cred.AuthTime,
+			cred.StartTime,
+			cred.EndTime,
+			cred.RenewTill,
+			cred.Key,
+		)
+	}
+	return cl, nil
+}
+
+// Key returns the client's encryption key for the specified encryption type and its kvno (kvno of zero will find latest).
+// The key can be retrieved either from the keytab or generated from the client's password.
+// If the client has both a keytab and a password defined the keytab is favoured as the source for the key
+// A KRBError can be passed in the event the KDC returns one of type KDC_ERR_PREAUTH_REQUIRED and is required to derive
+// the key for pre-authentication from the client's password. If a KRBError is not available, pass nil to this argument.
+func (cl *Client) Key(etype etype.EType, kvno int, krberr *messages.KRBError) (types.EncryptionKey, int, error) {
+	if cl.Credentials.HasKeytab() && etype != nil {
+		return cl.Credentials.Keytab().GetEncryptionKey(cl.Credentials.CName(), cl.Credentials.Domain(), kvno, etype.GetETypeID())
+	} else if cl.Credentials.HasPassword() {
+		if krberr != nil && krberr.ErrorCode == errorcode.KDC_ERR_PREAUTH_REQUIRED {
+			var pas types.PADataSequence
+			err := pas.Unmarshal(krberr.EData)
+			if err != nil {
+				return types.EncryptionKey{}, 0, fmt.Errorf("could not get PAData from KRBError to generate key from password: %v", err)
+			}
+			key, _, err := crypto.GetKeyFromPassword(cl.Credentials.Password(), krberr.CName, krberr.CRealm, etype.GetETypeID(), pas)
+			return key, 0, err
+		}
+		key, _, err := crypto.GetKeyFromPassword(cl.Credentials.Password(), cl.Credentials.CName(), cl.Credentials.Domain(), etype.GetETypeID(), types.PADataSequence{})
+		return key, 0, err
+	}
+	return types.EncryptionKey{}, 0, errors.New("credential has neither keytab or password to generate key")
+}
+
+// IsConfigured indicates if the client has the values required set.
+func (cl *Client) IsConfigured() (bool, error) {
+	if cl.Credentials.UserName() == "" {
+		return false, errors.New("client does not have a username")
+	}
+	if cl.Credentials.Domain() == "" {
+		return false, errors.New("client does not have a define realm")
+	}
+	// Client needs to have either a password, keytab or a session already (later when loading from CCache)
+	if !cl.Credentials.HasPassword() && !cl.Credentials.HasKeytab() {
+		authTime, _, _, _, err := cl.sessionTimes(cl.Credentials.Domain())
+		if err != nil || authTime.IsZero() {
+			return false, errors.New("client has neither a keytab nor a password set and no session")
+		}
+	}
+	if !cl.Config.LibDefaults.DNSLookupKDC {
+		for _, r := range cl.Config.Realms {
+			if r.Realm == cl.Credentials.Domain() {
+				if len(r.KDC) > 0 {
+					return true, nil
+				}
+				return false, errors.New("client krb5 config does not have any defined KDCs for the default realm")
+			}
+		}
+	}
+	return true, nil
+}
+
+// Login the client with the KDC via an AS exchange.
+func (cl *Client) Login() error {
+	if ok, err := cl.IsConfigured(); !ok {
+		return err
+	}
+	if !cl.Credentials.HasPassword() && !cl.Credentials.HasKeytab() {
+		_, endTime, _, _, err := cl.sessionTimes(cl.Credentials.Domain())
+		if err != nil {
+			return krberror.Errorf(err, krberror.KRBMsgError, "no user credentials available and error getting any existing session")
+		}
+		if time.Now().UTC().After(endTime) {
+			return krberror.New(krberror.KRBMsgError, "cannot login, no user credentials available and no valid existing session")
+		}
+		// no credentials but there is a session with tgt already
+		return nil
+	}
+	ASReq, err := messages.NewASReqForTGT(cl.Credentials.Domain(), cl.Config, cl.Credentials.CName())
+	if err != nil {
+		return krberror.Errorf(err, krberror.KRBMsgError, "error generating new AS_REQ")
+	}
+	ASRep, err := cl.ASExchange(cl.Credentials.Domain(), ASReq, 0)
+	if err != nil {
+		return err
+	}
+	cl.addSession(ASRep.Ticket, ASRep.DecryptedEncPart)
+	return nil
+}
+
+// AffirmLogin will only perform an AS exchange with the KDC if the client does not already have a TGT.
+func (cl *Client) AffirmLogin() error {
+	_, endTime, _, _, err := cl.sessionTimes(cl.Credentials.Domain())
+	if err != nil || time.Now().UTC().After(endTime) {
+		err := cl.Login()
+		if err != nil {
+			return fmt.Errorf("could not get valid TGT for client's realm: %v", err)
+		}
+	}
+	return nil
+}
+
+// realmLogin obtains or renews a TGT and establishes a session for the realm specified.
+func (cl *Client) realmLogin(realm string) error {
+	if realm == cl.Credentials.Domain() {
+		return cl.Login()
+	}
+	_, endTime, _, _, err := cl.sessionTimes(cl.Credentials.Domain())
+	if err != nil || time.Now().UTC().After(endTime) {
+		err := cl.Login()
+		if err != nil {
+			return fmt.Errorf("could not get valid TGT for client's realm: %v", err)
+		}
+	}
+	tgt, skey, err := cl.sessionTGT(cl.Credentials.Domain())
+	if err != nil {
+		return err
+	}
+
+	spn := types.PrincipalName{
+		NameType:   nametype.KRB_NT_SRV_INST,
+		NameString: []string{"krbtgt", realm},
+	}
+
+	_, tgsRep, err := cl.TGSREQGenerateAndExchange(spn, cl.Credentials.Domain(), tgt, skey, false)
+	if err != nil {
+		return err
+	}
+	cl.addSession(tgsRep.Ticket, tgsRep.DecryptedEncPart)
+
+	return nil
+}
+
+// Destroy stops the auto-renewal of all sessions and removes the sessions and cache entries from the client.
+func (cl *Client) Destroy() {
+	creds := credentials.New("", "")
+	cl.sessions.destroy()
+	cl.cache.clear()
+	cl.Credentials = creds
+	cl.Log("client destroyed")
+}
+
+// Diagnostics runs a set of checks that the client is properly configured and writes details to the io.Writer provided.
+func (cl *Client) Diagnostics(w io.Writer) error {
+	cl.Print(w)
+	var errs []string
+	if cl.Credentials.HasKeytab() {
+		var loginRealmEncTypes []int32
+		for _, e := range cl.Credentials.Keytab().Entries {
+			if e.Principal.Realm == cl.Credentials.Realm() {
+				loginRealmEncTypes = append(loginRealmEncTypes, e.Key.KeyType)
+			}
+		}
+		for _, et := range cl.Config.LibDefaults.DefaultTktEnctypeIDs {
+			var etInKt bool
+			for _, val := range loginRealmEncTypes {
+				if val == et {
+					etInKt = true
+					break
+				}
+			}
+			if !etInKt {
+				errs = append(errs, fmt.Sprintf("default_tkt_enctypes specifies %d but this enctype is not available in the client's keytab", et))
+			}
+		}
+		for _, et := range cl.Config.LibDefaults.PreferredPreauthTypes {
+			var etInKt bool
+			for _, val := range loginRealmEncTypes {
+				if int(val) == et {
+					etInKt = true
+					break
+				}
+			}
+			if !etInKt {
+				errs = append(errs, fmt.Sprintf("preferred_preauth_types specifies %d but this enctype is not available in the client's keytab", et))
+			}
+		}
+	}
+	udpCnt, udpKDC, err := cl.Config.GetKDCs(cl.Credentials.Realm(), false)
+	if err != nil {
+		errs = append(errs, fmt.Sprintf("error when resolving KDCs for UDP communication: %v", err))
+	}
+	if udpCnt < 1 {
+		errs = append(errs, "no KDCs resolved for communication via UDP.")
+	} else {
+		b, _ := json.MarshalIndent(&udpKDC, "", "  ")
+		fmt.Fprintf(w, "UDP KDCs: %s\n", string(b))
+	}
+	tcpCnt, tcpKDC, err := cl.Config.GetKDCs(cl.Credentials.Realm(), false)
+	if err != nil {
+		errs = append(errs, fmt.Sprintf("error when resolving KDCs for TCP communication: %v", err))
+	}
+	if tcpCnt < 1 {
+		errs = append(errs, "no KDCs resolved for communication via TCP.")
+	} else {
+		b, _ := json.MarshalIndent(&tcpKDC, "", "  ")
+		fmt.Fprintf(w, "TCP KDCs: %s\n", string(b))
+	}
+
+	if errs == nil || len(errs) < 1 {
+		return nil
+	}
+	err = fmt.Errorf(strings.Join(errs, "\n"))
+	return err
+}
+
+// Print writes the details of the client to the io.Writer provided.
+func (cl *Client) Print(w io.Writer) {
+	c, _ := cl.Credentials.JSON()
+	fmt.Fprintf(w, "Credentials:\n%s\n", c)
+
+	s, _ := cl.sessions.JSON()
+	fmt.Fprintf(w, "TGT Sessions:\n%s\n", s)
+
+	c, _ = cl.cache.JSON()
+	fmt.Fprintf(w, "Service ticket cache:\n%s\n", c)
+
+	s, _ = cl.settings.JSON()
+	fmt.Fprintf(w, "Settings:\n%s\n", s)
+
+	j, _ := cl.Config.JSON()
+	fmt.Fprintf(w, "Krb5 config:\n%s\n", j)
+
+	k, _ := cl.Credentials.Keytab().JSON()
+	fmt.Fprintf(w, "Keytab:\n%s\n", k)
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/network.go b/vendor/github.com/jcmturner/gokrb5/v8/client/network.go
new file mode 100644
index 0000000..634f015
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/client/network.go
@@ -0,0 +1,218 @@
+package client
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"strings"
+	"time"
+
+	"github.com/jcmturner/gokrb5/v8/iana/errorcode"
+	"github.com/jcmturner/gokrb5/v8/messages"
+)
+
+// SendToKDC performs network actions to send data to the KDC.
+func (cl *Client) sendToKDC(b []byte, realm string) ([]byte, error) {
+	var rb []byte
+	if cl.Config.LibDefaults.UDPPreferenceLimit == 1 {
+		//1 means we should always use TCP
+		rb, errtcp := cl.sendKDCTCP(realm, b)
+		if errtcp != nil {
+			if e, ok := errtcp.(messages.KRBError); ok {
+				return rb, e
+			}
+			return rb, fmt.Errorf("communication error with KDC via TCP: %v", errtcp)
+		}
+		return rb, nil
+	}
+	if len(b) <= cl.Config.LibDefaults.UDPPreferenceLimit {
+		//Try UDP first, TCP second
+		rb, errudp := cl.sendKDCUDP(realm, b)
+		if errudp != nil {
+			if e, ok := errudp.(messages.KRBError); ok && e.ErrorCode != errorcode.KRB_ERR_RESPONSE_TOO_BIG {
+				// Got a KRBError from KDC
+				// If this is not a KRB_ERR_RESPONSE_TOO_BIG we will return immediately otherwise will try TCP.
+				return rb, e
+			}
+			// Try TCP
+			r, errtcp := cl.sendKDCTCP(realm, b)
+			if errtcp != nil {
+				if e, ok := errtcp.(messages.KRBError); ok {
+					// Got a KRBError
+					return r, e
+				}
+				return r, fmt.Errorf("failed to communicate with KDC. Attempts made with UDP (%v) and then TCP (%v)", errudp, errtcp)
+			}
+			rb = r
+		}
+		return rb, nil
+	}
+	//Try TCP first, UDP second
+	rb, errtcp := cl.sendKDCTCP(realm, b)
+	if errtcp != nil {
+		if e, ok := errtcp.(messages.KRBError); ok {
+			// Got a KRBError from KDC so returning and not trying UDP.
+			return rb, e
+		}
+		rb, errudp := cl.sendKDCUDP(realm, b)
+		if errudp != nil {
+			if e, ok := errudp.(messages.KRBError); ok {
+				// Got a KRBError
+				return rb, e
+			}
+			return rb, fmt.Errorf("failed to communicate with KDC. Attempts made with TCP (%v) and then UDP (%v)", errtcp, errudp)
+		}
+	}
+	return rb, nil
+}
+
+// sendKDCUDP sends bytes to the KDC via UDP.
+func (cl *Client) sendKDCUDP(realm string, b []byte) ([]byte, error) {
+	var r []byte
+	_, kdcs, err := cl.Config.GetKDCs(realm, false)
+	if err != nil {
+		return r, err
+	}
+	r, err = dialSendUDP(kdcs, b)
+	if err != nil {
+		return r, err
+	}
+	return checkForKRBError(r)
+}
+
+// dialSendUDP establishes a UDP connection to a KDC.
+func dialSendUDP(kdcs map[int]string, b []byte) ([]byte, error) {
+	var errs []string
+	for i := 1; i <= len(kdcs); i++ {
+		udpAddr, err := net.ResolveUDPAddr("udp", kdcs[i])
+		if err != nil {
+			errs = append(errs, fmt.Sprintf("error resolving KDC address: %v", err))
+			continue
+		}
+
+		conn, err := net.DialTimeout("udp", udpAddr.String(), 5*time.Second)
+		if err != nil {
+			errs = append(errs, fmt.Sprintf("error setting dial timeout on connection to %s: %v", kdcs[i], err))
+			continue
+		}
+		if err := conn.SetDeadline(time.Now().Add(5 * time.Second)); err != nil {
+			errs = append(errs, fmt.Sprintf("error setting deadline on connection to %s: %v", kdcs[i], err))
+			continue
+		}
+		// conn is guaranteed to be a UDPConn
+		rb, err := sendUDP(conn.(*net.UDPConn), b)
+		if err != nil {
+			errs = append(errs, fmt.Sprintf("error sneding to %s: %v", kdcs[i], err))
+			continue
+		}
+		return rb, nil
+	}
+	return nil, fmt.Errorf("error sending to a KDC: %s", strings.Join(errs, "; "))
+}
+
+// sendUDP sends bytes to connection over UDP.
+func sendUDP(conn *net.UDPConn, b []byte) ([]byte, error) {
+	var r []byte
+	defer conn.Close()
+	_, err := conn.Write(b)
+	if err != nil {
+		return r, fmt.Errorf("error sending to (%s): %v", conn.RemoteAddr().String(), err)
+	}
+	udpbuf := make([]byte, 4096)
+	n, _, err := conn.ReadFrom(udpbuf)
+	r = udpbuf[:n]
+	if err != nil {
+		return r, fmt.Errorf("sending over UDP failed to %s: %v", conn.RemoteAddr().String(), err)
+	}
+	if len(r) < 1 {
+		return r, fmt.Errorf("no response data from %s", conn.RemoteAddr().String())
+	}
+	return r, nil
+}
+
+// sendKDCTCP sends bytes to the KDC via TCP.
+func (cl *Client) sendKDCTCP(realm string, b []byte) ([]byte, error) {
+	var r []byte
+	_, kdcs, err := cl.Config.GetKDCs(realm, true)
+	if err != nil {
+		return r, err
+	}
+	r, err = dialSendTCP(kdcs, b)
+	if err != nil {
+		return r, err
+	}
+	return checkForKRBError(r)
+}
+
+// dialKDCTCP establishes a TCP connection to a KDC.
+func dialSendTCP(kdcs map[int]string, b []byte) ([]byte, error) {
+	var errs []string
+	for i := 1; i <= len(kdcs); i++ {
+		tcpAddr, err := net.ResolveTCPAddr("tcp", kdcs[i])
+		if err != nil {
+			errs = append(errs, fmt.Sprintf("error resolving KDC address: %v", err))
+			continue
+		}
+
+		conn, err := net.DialTimeout("tcp", tcpAddr.String(), 5*time.Second)
+		if err != nil {
+			errs = append(errs, fmt.Sprintf("error setting dial timeout on connection to %s: %v", kdcs[i], err))
+			continue
+		}
+		if err := conn.SetDeadline(time.Now().Add(5 * time.Second)); err != nil {
+			errs = append(errs, fmt.Sprintf("error setting deadline on connection to %s: %v", kdcs[i], err))
+			continue
+		}
+		// conn is guaranteed to be a TCPConn
+		rb, err := sendTCP(conn.(*net.TCPConn), b)
+		if err != nil {
+			errs = append(errs, fmt.Sprintf("error sneding to %s: %v", kdcs[i], err))
+			continue
+		}
+		return rb, nil
+	}
+	return nil, errors.New("error in getting a TCP connection to any of the KDCs")
+}
+
+// sendTCP sends bytes to connection over TCP.
+func sendTCP(conn *net.TCPConn, b []byte) ([]byte, error) {
+	defer conn.Close()
+	var r []byte
+	// RFC 4120 7.2.2 specifies the first 4 bytes indicate the length of the message in big endian order.
+	hb := make([]byte, 4, 4)
+	binary.BigEndian.PutUint32(hb, uint32(len(b)))
+	b = append(hb, b...)
+
+	_, err := conn.Write(b)
+	if err != nil {
+		return r, fmt.Errorf("error sending to KDC (%s): %v", conn.RemoteAddr().String(), err)
+	}
+
+	sh := make([]byte, 4, 4)
+	_, err = conn.Read(sh)
+	if err != nil {
+		return r, fmt.Errorf("error reading response size header: %v", err)
+	}
+	s := binary.BigEndian.Uint32(sh)
+
+	rb := make([]byte, s, s)
+	_, err = io.ReadFull(conn, rb)
+	if err != nil {
+		return r, fmt.Errorf("error reading response: %v", err)
+	}
+	if len(rb) < 1 {
+		return r, fmt.Errorf("no response data from KDC %s", conn.RemoteAddr().String())
+	}
+	return rb, nil
+}
+
+// checkForKRBError checks if the response bytes from the KDC are a KRBError.
+func checkForKRBError(b []byte) ([]byte, error) {
+	var KRBErr messages.KRBError
+	if err := KRBErr.Unmarshal(b); err == nil {
+		return b, KRBErr
+	}
+	return b, nil
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/passwd.go b/vendor/github.com/jcmturner/gokrb5/v8/client/passwd.go
new file mode 100644
index 0000000..fe20559
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/client/passwd.go
@@ -0,0 +1,75 @@
+package client
+
+import (
+	"fmt"
+
+	"github.com/jcmturner/gokrb5/v8/kadmin"
+	"github.com/jcmturner/gokrb5/v8/messages"
+)
+
+// Kpasswd server response codes.
+const (
+	KRB5_KPASSWD_SUCCESS             = 0
+	KRB5_KPASSWD_MALFORMED           = 1
+	KRB5_KPASSWD_HARDERROR           = 2
+	KRB5_KPASSWD_AUTHERROR           = 3
+	KRB5_KPASSWD_SOFTERROR           = 4
+	KRB5_KPASSWD_ACCESSDENIED        = 5
+	KRB5_KPASSWD_BAD_VERSION         = 6
+	KRB5_KPASSWD_INITIAL_FLAG_NEEDED = 7
+)
+
+// ChangePasswd changes the password of the client to the value provided.
+func (cl *Client) ChangePasswd(newPasswd string) (bool, error) {
+	ASReq, err := messages.NewASReqForChgPasswd(cl.Credentials.Domain(), cl.Config, cl.Credentials.CName())
+	if err != nil {
+		return false, err
+	}
+	ASRep, err := cl.ASExchange(cl.Credentials.Domain(), ASReq, 0)
+	if err != nil {
+		return false, err
+	}
+
+	msg, key, err := kadmin.ChangePasswdMsg(cl.Credentials.CName(), cl.Credentials.Domain(), newPasswd, ASRep.Ticket, ASRep.DecryptedEncPart.Key)
+	if err != nil {
+		return false, err
+	}
+	r, err := cl.sendToKPasswd(msg)
+	if err != nil {
+		return false, err
+	}
+	err = r.Decrypt(key)
+	if err != nil {
+		return false, err
+	}
+	if r.ResultCode != KRB5_KPASSWD_SUCCESS {
+		return false, fmt.Errorf("error response from kadmin: code: %d; result: %s; krberror: %v", r.ResultCode, r.Result, r.KRBError)
+	}
+	cl.Credentials.WithPassword(newPasswd)
+	return true, nil
+}
+
+func (cl *Client) sendToKPasswd(msg kadmin.Request) (r kadmin.Reply, err error) {
+	_, kps, err := cl.Config.GetKpasswdServers(cl.Credentials.Domain(), true)
+	if err != nil {
+		return
+	}
+	b, err := msg.Marshal()
+	if err != nil {
+		return
+	}
+	var rb []byte
+	if len(b) <= cl.Config.LibDefaults.UDPPreferenceLimit {
+		rb, err = dialSendUDP(kps, b)
+		if err != nil {
+			return
+		}
+	} else {
+		rb, err = dialSendTCP(kps, b)
+		if err != nil {
+			return
+		}
+	}
+	err = r.Unmarshal(rb)
+	return
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/session.go b/vendor/github.com/jcmturner/gokrb5/v8/client/session.go
new file mode 100644
index 0000000..f7654d0
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/client/session.go
@@ -0,0 +1,295 @@
+package client
+
+import (
+	"encoding/json"
+	"fmt"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/jcmturner/gokrb5/v8/iana/nametype"
+	"github.com/jcmturner/gokrb5/v8/krberror"
+	"github.com/jcmturner/gokrb5/v8/messages"
+	"github.com/jcmturner/gokrb5/v8/types"
+)
+
+// sessions hold TGTs and are keyed on the realm name
+type sessions struct {
+	Entries map[string]*session
+	mux     sync.RWMutex
+}
+
+// destroy erases all sessions
+func (s *sessions) destroy() {
+	s.mux.Lock()
+	defer s.mux.Unlock()
+	for k, e := range s.Entries {
+		e.destroy()
+		delete(s.Entries, k)
+	}
+}
+
+// update replaces a session with the one provided or adds it as a new one
+func (s *sessions) update(sess *session) {
+	s.mux.Lock()
+	defer s.mux.Unlock()
+	// if a session already exists for this, cancel its auto renew.
+	if i, ok := s.Entries[sess.realm]; ok {
+		if i != sess {
+			// Session in the sessions cache is not the same as one provided.
+			// Cancel the one in the cache and add this one.
+			i.mux.Lock()
+			defer i.mux.Unlock()
+			i.cancel <- true
+			s.Entries[sess.realm] = sess
+			return
+		}
+	}
+	// No session for this realm was found so just add it
+	s.Entries[sess.realm] = sess
+}
+
+// get returns the session for the realm specified
+func (s *sessions) get(realm string) (*session, bool) {
+	s.mux.RLock()
+	defer s.mux.RUnlock()
+	sess, ok := s.Entries[realm]
+	return sess, ok
+}
+
+// session holds the TGT details for a realm
+type session struct {
+	realm                string
+	authTime             time.Time
+	endTime              time.Time
+	renewTill            time.Time
+	tgt                  messages.Ticket
+	sessionKey           types.EncryptionKey
+	sessionKeyExpiration time.Time
+	cancel               chan bool
+	mux                  sync.RWMutex
+}
+
+// jsonSession is used to enable marshaling some information of a session in a JSON format
+type jsonSession struct {
+	Realm                string
+	AuthTime             time.Time
+	EndTime              time.Time
+	RenewTill            time.Time
+	SessionKeyExpiration time.Time
+}
+
+// AddSession adds a session for a realm with a TGT to the client's session cache.
+// A goroutine is started to automatically renew the TGT before expiry.
+func (cl *Client) addSession(tgt messages.Ticket, dep messages.EncKDCRepPart) {
+	if strings.ToLower(tgt.SName.NameString[0]) != "krbtgt" {
+		// Not a TGT
+		return
+	}
+	realm := tgt.SName.NameString[len(tgt.SName.NameString)-1]
+	s := &session{
+		realm:                realm,
+		authTime:             dep.AuthTime,
+		endTime:              dep.EndTime,
+		renewTill:            dep.RenewTill,
+		tgt:                  tgt,
+		sessionKey:           dep.Key,
+		sessionKeyExpiration: dep.KeyExpiration,
+	}
+	cl.sessions.update(s)
+	cl.enableAutoSessionRenewal(s)
+	cl.Log("TGT session added for %s (EndTime: %v)", realm, dep.EndTime)
+}
+
+// update overwrites the session details with those from the TGT and decrypted encPart
+func (s *session) update(tgt messages.Ticket, dep messages.EncKDCRepPart) {
+	s.mux.Lock()
+	defer s.mux.Unlock()
+	s.authTime = dep.AuthTime
+	s.endTime = dep.EndTime
+	s.renewTill = dep.RenewTill
+	s.tgt = tgt
+	s.sessionKey = dep.Key
+	s.sessionKeyExpiration = dep.KeyExpiration
+}
+
+// destroy will cancel any auto renewal of the session and set the expiration times to the current time
+func (s *session) destroy() {
+	s.mux.Lock()
+	defer s.mux.Unlock()
+	if s.cancel != nil {
+		s.cancel <- true
+	}
+	s.endTime = time.Now().UTC()
+	s.renewTill = s.endTime
+	s.sessionKeyExpiration = s.endTime
+}
+
+// valid informs if the TGT is still within the valid time window
+func (s *session) valid() bool {
+	s.mux.RLock()
+	defer s.mux.RUnlock()
+	t := time.Now().UTC()
+	if t.Before(s.endTime) && s.authTime.Before(t) {
+		return true
+	}
+	return false
+}
+
+// tgtDetails is a thread safe way to get the session's realm, TGT and session key values
+func (s *session) tgtDetails() (string, messages.Ticket, types.EncryptionKey) {
+	s.mux.RLock()
+	defer s.mux.RUnlock()
+	return s.realm, s.tgt, s.sessionKey
+}
+
+// timeDetails is a thread safe way to get the session's validity time values
+func (s *session) timeDetails() (string, time.Time, time.Time, time.Time, time.Time) {
+	s.mux.RLock()
+	defer s.mux.RUnlock()
+	return s.realm, s.authTime, s.endTime, s.renewTill, s.sessionKeyExpiration
+}
+
+// JSON return information about the held sessions in a JSON format.
+func (s *sessions) JSON() (string, error) {
+	s.mux.RLock()
+	defer s.mux.RUnlock()
+	var js []jsonSession
+	keys := make([]string, 0, len(s.Entries))
+	for k := range s.Entries {
+		keys = append(keys, k)
+	}
+	sort.Strings(keys)
+	for _, k := range keys {
+		r, at, et, rt, kt := s.Entries[k].timeDetails()
+		j := jsonSession{
+			Realm:                r,
+			AuthTime:             at,
+			EndTime:              et,
+			RenewTill:            rt,
+			SessionKeyExpiration: kt,
+		}
+		js = append(js, j)
+	}
+	b, err := json.MarshalIndent(js, "", "  ")
+	if err != nil {
+		return "", err
+	}
+	return string(b), nil
+}
+
+// enableAutoSessionRenewal turns on the automatic renewal for the client's TGT session.
+func (cl *Client) enableAutoSessionRenewal(s *session) {
+	var timer *time.Timer
+	s.mux.Lock()
+	s.cancel = make(chan bool, 1)
+	s.mux.Unlock()
+	go func(s *session) {
+		for {
+			s.mux.RLock()
+			w := (s.endTime.Sub(time.Now().UTC()) * 5) / 6
+			s.mux.RUnlock()
+			if w < 0 {
+				return
+			}
+			timer = time.NewTimer(w)
+			select {
+			case <-timer.C:
+				renewal, err := cl.refreshSession(s)
+				if err != nil {
+					cl.Log("error refreshing session: %v", err)
+				}
+				if !renewal && err == nil {
+					// end this goroutine as there will have been a new login and new auto renewal goroutine created.
+					return
+				}
+			case <-s.cancel:
+				// cancel has been called. Stop the timer and exit.
+				timer.Stop()
+				return
+			}
+		}
+	}(s)
+}
+
+// renewTGT renews the client's TGT session.
+func (cl *Client) renewTGT(s *session) error {
+	realm, tgt, skey := s.tgtDetails()
+	spn := types.PrincipalName{
+		NameType:   nametype.KRB_NT_SRV_INST,
+		NameString: []string{"krbtgt", realm},
+	}
+	_, tgsRep, err := cl.TGSREQGenerateAndExchange(spn, cl.Credentials.Domain(), tgt, skey, true)
+	if err != nil {
+		return krberror.Errorf(err, krberror.KRBMsgError, "error renewing TGT for %s", realm)
+	}
+	s.update(tgsRep.Ticket, tgsRep.DecryptedEncPart)
+	cl.sessions.update(s)
+	cl.Log("TGT session renewed for %s (EndTime: %v)", realm, tgsRep.DecryptedEncPart.EndTime)
+	return nil
+}
+
+// refreshSession updates either through renewal or creating a new login.
+// The boolean indicates if the update was a renewal.
+func (cl *Client) refreshSession(s *session) (bool, error) {
+	s.mux.RLock()
+	realm := s.realm
+	renewTill := s.renewTill
+	s.mux.RUnlock()
+	cl.Log("refreshing TGT session for %s", realm)
+	if time.Now().UTC().Before(renewTill) {
+		err := cl.renewTGT(s)
+		return true, err
+	}
+	err := cl.realmLogin(realm)
+	return false, err
+}
+
+// ensureValidSession makes sure there is a valid session for the realm
+func (cl *Client) ensureValidSession(realm string) error {
+	s, ok := cl.sessions.get(realm)
+	if ok {
+		s.mux.RLock()
+		d := s.endTime.Sub(s.authTime) / 6
+		if s.endTime.Sub(time.Now().UTC()) > d {
+			s.mux.RUnlock()
+			return nil
+		}
+		s.mux.RUnlock()
+		_, err := cl.refreshSession(s)
+		return err
+	}
+	return cl.realmLogin(realm)
+}
+
+// sessionTGTDetails is a thread safe way to get the TGT and session key values for a realm
+func (cl *Client) sessionTGT(realm string) (tgt messages.Ticket, sessionKey types.EncryptionKey, err error) {
+	err = cl.ensureValidSession(realm)
+	if err != nil {
+		return
+	}
+	s, ok := cl.sessions.get(realm)
+	if !ok {
+		err = fmt.Errorf("could not find TGT session for %s", realm)
+		return
+	}
+	_, tgt, sessionKey = s.tgtDetails()
+	return
+}
+
+// sessionTimes provides the timing information with regards to a session for the realm specified.
+func (cl *Client) sessionTimes(realm string) (authTime, endTime, renewTime, sessionExp time.Time, err error) {
+	s, ok := cl.sessions.get(realm)
+	if !ok {
+		err = fmt.Errorf("could not find TGT session for %s", realm)
+		return
+	}
+	_, authTime, endTime, renewTime, sessionExp = s.timeDetails()
+	return
+}
+
+// spnRealm resolves the realm name of a service principal name
+func (cl *Client) spnRealm(spn types.PrincipalName) string {
+	return cl.Config.ResolveRealm(spn.NameString[len(spn.NameString)-1])
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/settings.go b/vendor/github.com/jcmturner/gokrb5/v8/client/settings.go
new file mode 100644
index 0000000..bcd3945
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/client/settings.go
@@ -0,0 +1,93 @@
+package client
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+)
+
+// Settings holds optional client settings.
+type Settings struct {
+	disablePAFXFast         bool
+	assumePreAuthentication bool
+	preAuthEType            int32
+	logger                  *log.Logger
+}
+
+// jsonSettings is used when marshaling the Settings details to JSON format.
+type jsonSettings struct {
+	DisablePAFXFast         bool
+	AssumePreAuthentication bool
+}
+
+// NewSettings creates a new client settings struct.
+func NewSettings(settings ...func(*Settings)) *Settings {
+	s := new(Settings)
+	for _, set := range settings {
+		set(s)
+	}
+	return s
+}
+
+// DisablePAFXFAST used to configure the client to not use PA_FX_FAST.
+//
+// s := NewSettings(DisablePAFXFAST(true))
+func DisablePAFXFAST(b bool) func(*Settings) {
+	return func(s *Settings) {
+		s.disablePAFXFast = b
+	}
+}
+
+// DisablePAFXFAST indicates is the client should disable the use of PA_FX_FAST.
+func (s *Settings) DisablePAFXFAST() bool {
+	return s.disablePAFXFast
+}
+
+// AssumePreAuthentication used to configure the client to assume pre-authentication is required.
+//
+// s := NewSettings(AssumePreAuthentication(true))
+func AssumePreAuthentication(b bool) func(*Settings) {
+	return func(s *Settings) {
+		s.assumePreAuthentication = b
+	}
+}
+
+// AssumePreAuthentication indicates if the client should proactively assume using pre-authentication.
+func (s *Settings) AssumePreAuthentication() bool {
+	return s.assumePreAuthentication
+}
+
+// Logger used to configure client with a logger.
+//
+// s := NewSettings(kt, Logger(l))
+func Logger(l *log.Logger) func(*Settings) {
+	return func(s *Settings) {
+		s.logger = l
+	}
+}
+
+// Logger returns the client logger instance.
+func (s *Settings) Logger() *log.Logger {
+	return s.logger
+}
+
+// Log will write to the service's logger if it is configured.
+func (cl *Client) Log(format string, v ...interface{}) {
+	if cl.settings.Logger() != nil {
+		cl.settings.Logger().Output(2, fmt.Sprintf(format, v...))
+	}
+}
+
+// JSON returns a JSON representation of the settings.
+func (s *Settings) JSON() (string, error) {
+	js := jsonSettings{
+		DisablePAFXFast:         s.disablePAFXFast,
+		AssumePreAuthentication: s.assumePreAuthentication,
+	}
+	b, err := json.MarshalIndent(js, "", "  ")
+	if err != nil {
+		return "", err
+	}
+	return string(b), nil
+
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/config/error.go b/vendor/github.com/jcmturner/gokrb5/v8/config/error.go
new file mode 100644
index 0000000..1fbda51
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/config/error.go
@@ -0,0 +1,30 @@
+package config
+
+import "fmt"
+
+// UnsupportedDirective error.
+type UnsupportedDirective struct {
+	text string
+}
+
+// Error implements the error interface for unsupported directives.
+func (e UnsupportedDirective) Error() string {
+	return e.text
+}
+
+// Invalid config error.
+type Invalid struct {
+	text string
+}
+
+// Error implements the error interface for invalid config error.
+func (e Invalid) Error() string {
+	return e.text
+}
+
+// InvalidErrorf creates a new Invalid error.
+func InvalidErrorf(format string, a ...interface{}) Invalid {
+	return Invalid{
+		text: fmt.Sprintf("invalid krb5 config "+format, a...),
+	}
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/config/hosts.go b/vendor/github.com/jcmturner/gokrb5/v8/config/hosts.go
new file mode 100644
index 0000000..3f22c70
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/config/hosts.go
@@ -0,0 +1,141 @@
+package config
+
+import (
+	"fmt"
+	"math/rand"
+	"net"
+	"strconv"
+	"strings"
+
+	"github.com/jcmturner/dnsutils/v2"
+)
+
+// GetKDCs returns the count of KDCs available and a map of KDC host names keyed on preference order.
+func (c *Config) GetKDCs(realm string, tcp bool) (int, map[int]string, error) {
+	if realm == "" {
+		realm = c.LibDefaults.DefaultRealm
+	}
+	kdcs := make(map[int]string)
+	var count int
+
+	// Get the KDCs from the krb5.conf.
+	var ks []string
+	for _, r := range c.Realms {
+		if r.Realm != realm {
+			continue
+		}
+		ks = r.KDC
+	}
+	count = len(ks)
+
+	if count > 0 {
+		// Order the kdcs randomly for preference.
+		kdcs = randServOrder(ks)
+		return count, kdcs, nil
+	}
+
+	if !c.LibDefaults.DNSLookupKDC {
+		return count, kdcs, fmt.Errorf("no KDCs defined in configuration for realm %s", realm)
+	}
+
+	// Use DNS to resolve kerberos SRV records.
+	proto := "udp"
+	if tcp {
+		proto = "tcp"
+	}
+	index, addrs, err := dnsutils.OrderedSRV("kerberos", proto, realm)
+	if err != nil {
+		return count, kdcs, err
+	}
+	if len(addrs) < 1 {
+		return count, kdcs, fmt.Errorf("no KDC SRV records found for realm %s", realm)
+	}
+	count = index
+	for k, v := range addrs {
+		kdcs[k] = strings.TrimRight(v.Target, ".") + ":" + strconv.Itoa(int(v.Port))
+	}
+	return count, kdcs, nil
+}
+
+// GetKpasswdServers returns the count of kpasswd servers available and a map of kpasswd host names keyed on preference order.
+// https://web.mit.edu/kerberos/krb5-latest/doc/admin/conf_files/krb5_conf.html#realms - see kpasswd_server section
+func (c *Config) GetKpasswdServers(realm string, tcp bool) (int, map[int]string, error) {
+	kdcs := make(map[int]string)
+	var count int
+
+	// Use DNS to resolve kerberos SRV records if configured to do so in krb5.conf.
+	if c.LibDefaults.DNSLookupKDC {
+		proto := "udp"
+		if tcp {
+			proto = "tcp"
+		}
+		c, addrs, err := dnsutils.OrderedSRV("kpasswd", proto, realm)
+		if err != nil {
+			return count, kdcs, err
+		}
+		if c < 1 {
+			c, addrs, err = dnsutils.OrderedSRV("kerberos-adm", proto, realm)
+			if err != nil {
+				return count, kdcs, err
+			}
+		}
+		if len(addrs) < 1 {
+			return count, kdcs, fmt.Errorf("no kpasswd or kadmin SRV records found for realm %s", realm)
+		}
+		count = c
+		for k, v := range addrs {
+			kdcs[k] = strings.TrimRight(v.Target, ".") + ":" + strconv.Itoa(int(v.Port))
+		}
+	} else {
+		// Get the KDCs from the krb5.conf an order them randomly for preference.
+		var ks []string
+		var ka []string
+		for _, r := range c.Realms {
+			if r.Realm == realm {
+				ks = r.KPasswdServer
+				ka = r.AdminServer
+				break
+			}
+		}
+		if len(ks) < 1 {
+			for _, k := range ka {
+				h, _, err := net.SplitHostPort(k)
+				if err != nil {
+					continue
+				}
+				ks = append(ks, h+":464")
+			}
+		}
+		count = len(ks)
+		if count < 1 {
+			return count, kdcs, fmt.Errorf("no kpasswd or kadmin defined in configuration for realm %s", realm)
+		}
+		kdcs = randServOrder(ks)
+	}
+	return count, kdcs, nil
+}
+
+func randServOrder(ks []string) map[int]string {
+	kdcs := make(map[int]string)
+	count := len(ks)
+	i := 1
+	if count > 1 {
+		l := len(ks)
+		for l > 0 {
+			ri := rand.Intn(l)
+			kdcs[i] = ks[ri]
+			if l > 1 {
+				// Remove the entry from the source slice by swapping with the last entry and truncating
+				ks[len(ks)-1], ks[ri] = ks[ri], ks[len(ks)-1]
+				ks = ks[:len(ks)-1]
+				l = len(ks)
+			} else {
+				l = 0
+			}
+			i++
+		}
+	} else {
+		kdcs[i] = ks[0]
+	}
+	return kdcs
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/config/krb5conf.go b/vendor/github.com/jcmturner/gokrb5/v8/config/krb5conf.go
new file mode 100644
index 0000000..a763843
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/config/krb5conf.go
@@ -0,0 +1,728 @@
+// Package config implements KRB5 client and service configuration as described at https://web.mit.edu/kerberos/krb5-latest/doc/admin/conf_files/krb5_conf.html
+package config
+
+import (
+	"bufio"
+	"encoding/hex"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"os"
+	"os/user"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/jcmturner/gofork/encoding/asn1"
+	"github.com/jcmturner/gokrb5/v8/iana/etypeID"
+)
+
+// Config represents the KRB5 configuration.
+type Config struct {
+	LibDefaults LibDefaults
+	Realms      []Realm
+	DomainRealm DomainRealm
+	//CaPaths
+	//AppDefaults
+	//Plugins
+}
+
+// WeakETypeList is a list of encryption types that have been deemed weak.
+const WeakETypeList = "des-cbc-crc des-cbc-md4 des-cbc-md5 des-cbc-raw des3-cbc-raw des-hmac-sha1 arcfour-hmac-exp rc4-hmac-exp arcfour-hmac-md5-exp des"
+
+// New creates a new config struct instance.
+func New() *Config {
+	d := make(DomainRealm)
+	return &Config{
+		LibDefaults: newLibDefaults(),
+		DomainRealm: d,
+	}
+}
+
+// LibDefaults represents the [libdefaults] section of the configuration.
+type LibDefaults struct {
+	AllowWeakCrypto bool //default false
+	// ap_req_checksum_type int //unlikely to support this
+	Canonicalize bool          //default false
+	CCacheType   int           //default is 4. unlikely to implement older
+	Clockskew    time.Duration //max allowed skew in seconds, default 300
+	//Default_ccache_name string // default /tmp/krb5cc_%{uid} //Not implementing as will hold in memory
+	DefaultClientKeytabName string //default /usr/local/var/krb5/user/%{euid}/client.keytab
+	DefaultKeytabName       string //default /etc/krb5.keytab
+	DefaultRealm            string
+	DefaultTGSEnctypes      []string //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4
+	DefaultTktEnctypes      []string //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4
+	DefaultTGSEnctypeIDs    []int32  //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4
+	DefaultTktEnctypeIDs    []int32  //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4
+	DNSCanonicalizeHostname bool     //default true
+	DNSLookupKDC            bool     //default false
+	DNSLookupRealm          bool
+	ExtraAddresses          []net.IP       //Not implementing yet
+	Forwardable             bool           //default false
+	IgnoreAcceptorHostname  bool           //default false
+	K5LoginAuthoritative    bool           //default false
+	K5LoginDirectory        string         //default user's home directory. Must be owned by the user or root
+	KDCDefaultOptions       asn1.BitString //default 0x00000010 (KDC_OPT_RENEWABLE_OK)
+	KDCTimeSync             int            //default 1
+	//kdc_req_checksum_type int //unlikely to implement as for very old KDCs
+	NoAddresses         bool     //default true
+	PermittedEnctypes   []string //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4
+	PermittedEnctypeIDs []int32
+	//plugin_base_dir string //not supporting plugins
+	PreferredPreauthTypes []int         //default “17, 16, 15, 14”, which forces libkrb5 to attempt to use PKINIT if it is supported
+	Proxiable             bool          //default false
+	RDNS                  bool          //default true
+	RealmTryDomains       int           //default -1
+	RenewLifetime         time.Duration //default 0
+	SafeChecksumType      int           //default 8
+	TicketLifetime        time.Duration //default 1 day
+	UDPPreferenceLimit    int           // 1 means to always use tcp. MIT krb5 has a default value of 1465, and it prevents user setting more than 32700.
+	VerifyAPReqNofail     bool          //default false
+}
+
+// Create a new LibDefaults struct.
+func newLibDefaults() LibDefaults {
+	uid := "0"
+	var hdir string
+	usr, _ := user.Current()
+	if usr != nil {
+		uid = usr.Uid
+		hdir = usr.HomeDir
+	}
+	opts := asn1.BitString{}
+	opts.Bytes, _ = hex.DecodeString("00000010")
+	opts.BitLength = len(opts.Bytes) * 8
+	return LibDefaults{
+		CCacheType:              4,
+		Clockskew:               time.Duration(300) * time.Second,
+		DefaultClientKeytabName: fmt.Sprintf("/usr/local/var/krb5/user/%s/client.keytab", uid),
+		DefaultKeytabName:       "/etc/krb5.keytab",
+		DefaultTGSEnctypes:      []string{"aes256-cts-hmac-sha1-96", "aes128-cts-hmac-sha1-96", "des3-cbc-sha1", "arcfour-hmac-md5", "camellia256-cts-cmac", "camellia128-cts-cmac", "des-cbc-crc", "des-cbc-md5", "des-cbc-md4"},
+		DefaultTktEnctypes:      []string{"aes256-cts-hmac-sha1-96", "aes128-cts-hmac-sha1-96", "des3-cbc-sha1", "arcfour-hmac-md5", "camellia256-cts-cmac", "camellia128-cts-cmac", "des-cbc-crc", "des-cbc-md5", "des-cbc-md4"},
+		DNSCanonicalizeHostname: true,
+		K5LoginDirectory:        hdir,
+		KDCDefaultOptions:       opts,
+		KDCTimeSync:             1,
+		NoAddresses:             true,
+		PermittedEnctypes:       []string{"aes256-cts-hmac-sha1-96", "aes128-cts-hmac-sha1-96", "des3-cbc-sha1", "arcfour-hmac-md5", "camellia256-cts-cmac", "camellia128-cts-cmac", "des-cbc-crc", "des-cbc-md5", "des-cbc-md4"},
+		RDNS:                    true,
+		RealmTryDomains:         -1,
+		SafeChecksumType:        8,
+		TicketLifetime:          time.Duration(24) * time.Hour,
+		UDPPreferenceLimit:      1465,
+		PreferredPreauthTypes:   []int{17, 16, 15, 14},
+	}
+}
+
+// Parse the lines of the [libdefaults] section of the configuration into the LibDefaults struct.
+func (l *LibDefaults) parseLines(lines []string) error {
+	for _, line := range lines {
+		//Remove comments after the values
+		if idx := strings.IndexAny(line, "#;"); idx != -1 {
+			line = line[:idx]
+		}
+		line = strings.TrimSpace(line)
+		if line == "" {
+			continue
+		}
+		if !strings.Contains(line, "=") {
+			return InvalidErrorf("libdefaults section line (%s)", line)
+		}
+
+		p := strings.Split(line, "=")
+		key := strings.TrimSpace(strings.ToLower(p[0]))
+		switch key {
+		case "allow_weak_crypto":
+			v, err := parseBoolean(p[1])
+			if err != nil {
+				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
+			}
+			l.AllowWeakCrypto = v
+		case "canonicalize":
+			v, err := parseBoolean(p[1])
+			if err != nil {
+				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
+			}
+			l.Canonicalize = v
+		case "ccache_type":
+			p[1] = strings.TrimSpace(p[1])
+			v, err := strconv.ParseUint(p[1], 10, 32)
+			if err != nil || v < 0 || v > 4 {
+				return InvalidErrorf("libdefaults section line (%s)", line)
+			}
+			l.CCacheType = int(v)
+		case "clockskew":
+			d, err := parseDuration(p[1])
+			if err != nil {
+				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
+			}
+			l.Clockskew = d
+		case "default_client_keytab_name":
+			l.DefaultClientKeytabName = strings.TrimSpace(p[1])
+		case "default_keytab_name":
+			l.DefaultKeytabName = strings.TrimSpace(p[1])
+		case "default_realm":
+			l.DefaultRealm = strings.TrimSpace(p[1])
+		case "default_tgs_enctypes":
+			l.DefaultTGSEnctypes = strings.Fields(p[1])
+		case "default_tkt_enctypes":
+			l.DefaultTktEnctypes = strings.Fields(p[1])
+		case "dns_canonicalize_hostname":
+			v, err := parseBoolean(p[1])
+			if err != nil {
+				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
+			}
+			l.DNSCanonicalizeHostname = v
+		case "dns_lookup_kdc":
+			v, err := parseBoolean(p[1])
+			if err != nil {
+				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
+			}
+			l.DNSLookupKDC = v
+		case "dns_lookup_realm":
+			v, err := parseBoolean(p[1])
+			if err != nil {
+				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
+			}
+			l.DNSLookupRealm = v
+		case "extra_addresses":
+			ipStr := strings.TrimSpace(p[1])
+			for _, ip := range strings.Split(ipStr, ",") {
+				if eip := net.ParseIP(ip); eip != nil {
+					l.ExtraAddresses = append(l.ExtraAddresses, eip)
+				}
+			}
+		case "forwardable":
+			v, err := parseBoolean(p[1])
+			if err != nil {
+				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
+			}
+			l.Forwardable = v
+		case "ignore_acceptor_hostname":
+			v, err := parseBoolean(p[1])
+			if err != nil {
+				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
+			}
+			l.IgnoreAcceptorHostname = v
+		case "k5login_authoritative":
+			v, err := parseBoolean(p[1])
+			if err != nil {
+				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
+			}
+			l.K5LoginAuthoritative = v
+		case "k5login_directory":
+			l.K5LoginDirectory = strings.TrimSpace(p[1])
+		case "kdc_default_options":
+			v := strings.TrimSpace(p[1])
+			v = strings.Replace(v, "0x", "", -1)
+			b, err := hex.DecodeString(v)
+			if err != nil {
+				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
+			}
+			l.KDCDefaultOptions.Bytes = b
+			l.KDCDefaultOptions.BitLength = len(b) * 8
+		case "kdc_timesync":
+			p[1] = strings.TrimSpace(p[1])
+			v, err := strconv.ParseInt(p[1], 10, 32)
+			if err != nil || v < 0 {
+				return InvalidErrorf("libdefaults section line (%s)", line)
+			}
+			l.KDCTimeSync = int(v)
+		case "noaddresses":
+			v, err := parseBoolean(p[1])
+			if err != nil {
+				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
+			}
+			l.NoAddresses = v
+		case "permitted_enctypes":
+			l.PermittedEnctypes = strings.Fields(p[1])
+		case "preferred_preauth_types":
+			p[1] = strings.TrimSpace(p[1])
+			t := strings.Split(p[1], ",")
+			var v []int
+			for _, s := range t {
+				i, err := strconv.ParseInt(s, 10, 32)
+				if err != nil {
+					return InvalidErrorf("libdefaults section line (%s): %v", line, err)
+				}
+				v = append(v, int(i))
+			}
+			l.PreferredPreauthTypes = v
+		case "proxiable":
+			v, err := parseBoolean(p[1])
+			if err != nil {
+				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
+			}
+			l.Proxiable = v
+		case "rdns":
+			v, err := parseBoolean(p[1])
+			if err != nil {
+				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
+			}
+			l.RDNS = v
+		case "realm_try_domains":
+			p[1] = strings.TrimSpace(p[1])
+			v, err := strconv.ParseInt(p[1], 10, 32)
+			if err != nil || v < -1 {
+				return InvalidErrorf("libdefaults section line (%s)", line)
+			}
+			l.RealmTryDomains = int(v)
+		case "renew_lifetime":
+			d, err := parseDuration(p[1])
+			if err != nil {
+				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
+			}
+			l.RenewLifetime = d
+		case "safe_checksum_type":
+			p[1] = strings.TrimSpace(p[1])
+			v, err := strconv.ParseInt(p[1], 10, 32)
+			if err != nil || v < 0 {
+				return InvalidErrorf("libdefaults section line (%s)", line)
+			}
+			l.SafeChecksumType = int(v)
+		case "ticket_lifetime":
+			d, err := parseDuration(p[1])
+			if err != nil {
+				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
+			}
+			l.TicketLifetime = d
+		case "udp_preference_limit":
+			p[1] = strings.TrimSpace(p[1])
+			v, err := strconv.ParseUint(p[1], 10, 32)
+			if err != nil || v > 32700 {
+				return InvalidErrorf("libdefaults section line (%s)", line)
+			}
+			l.UDPPreferenceLimit = int(v)
+		case "verify_ap_req_nofail":
+			v, err := parseBoolean(p[1])
+			if err != nil {
+				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
+			}
+			l.VerifyAPReqNofail = v
+		}
+	}
+	l.DefaultTGSEnctypeIDs = parseETypes(l.DefaultTGSEnctypes, l.AllowWeakCrypto)
+	l.DefaultTktEnctypeIDs = parseETypes(l.DefaultTktEnctypes, l.AllowWeakCrypto)
+	l.PermittedEnctypeIDs = parseETypes(l.PermittedEnctypes, l.AllowWeakCrypto)
+	return nil
+}
+
+// Realm represents an entry in the [realms] section of the configuration.
+type Realm struct {
+	Realm       string
+	AdminServer []string
+	//auth_to_local //Not implementing for now
+	//auth_to_local_names //Not implementing for now
+	DefaultDomain string
+	KDC           []string
+	KPasswdServer []string //default admin_server:464
+	MasterKDC     []string
+}
+
+// Parse the lines of a [realms] entry into the Realm struct.
+func (r *Realm) parseLines(name string, lines []string) (err error) {
+	r.Realm = name
+	var adminServerFinal bool
+	var KDCFinal bool
+	var kpasswdServerFinal bool
+	var masterKDCFinal bool
+	var ignore bool
+	var c int // counts the depth of blocks within brackets { }
+	for _, line := range lines {
+		if ignore && c > 0 && !strings.Contains(line, "{") && !strings.Contains(line, "}") {
+			continue
+		}
+		//Remove comments after the values
+		if idx := strings.IndexAny(line, "#;"); idx != -1 {
+			line = line[:idx]
+		}
+		line = strings.TrimSpace(line)
+		if line == "" {
+			continue
+		}
+		if !strings.Contains(line, "=") && !strings.Contains(line, "}") {
+			return InvalidErrorf("realms section line (%s)", line)
+		}
+		if strings.Contains(line, "v4_") {
+			ignore = true
+			err = UnsupportedDirective{"v4 configurations are not supported"}
+		}
+		if strings.Contains(line, "{") {
+			c++
+			if ignore {
+				continue
+			}
+		}
+		if strings.Contains(line, "}") {
+			c--
+			if c < 0 {
+				return InvalidErrorf("unpaired curly brackets")
+			}
+			if ignore {
+				if c < 1 {
+					c = 0
+					ignore = false
+				}
+				continue
+			}
+		}
+
+		p := strings.Split(line, "=")
+		key := strings.TrimSpace(strings.ToLower(p[0]))
+		v := strings.TrimSpace(p[1])
+		switch key {
+		case "admin_server":
+			appendUntilFinal(&r.AdminServer, v, &adminServerFinal)
+		case "default_domain":
+			r.DefaultDomain = v
+		case "kdc":
+			if !strings.Contains(v, ":") {
+				// No port number specified default to 88
+				if strings.HasSuffix(v, `*`) {
+					v = strings.TrimSpace(strings.TrimSuffix(v, `*`)) + ":88*"
+				} else {
+					v = strings.TrimSpace(v) + ":88"
+				}
+			}
+			appendUntilFinal(&r.KDC, v, &KDCFinal)
+		case "kpasswd_server":
+			appendUntilFinal(&r.KPasswdServer, v, &kpasswdServerFinal)
+		case "master_kdc":
+			appendUntilFinal(&r.MasterKDC, v, &masterKDCFinal)
+		}
+	}
+	//default for Kpasswd_server = admin_server:464
+	if len(r.KPasswdServer) < 1 {
+		for _, a := range r.AdminServer {
+			s := strings.Split(a, ":")
+			r.KPasswdServer = append(r.KPasswdServer, s[0]+":464")
+		}
+	}
+	return
+}
+
+// Parse the lines of the [realms] section of the configuration into an slice of Realm structs.
+func parseRealms(lines []string) (realms []Realm, err error) {
+	var name string
+	var start int
+	var c int
+	for i, l := range lines {
+		//Remove comments after the values
+		if idx := strings.IndexAny(l, "#;"); idx != -1 {
+			l = l[:idx]
+		}
+		l = strings.TrimSpace(l)
+		if l == "" {
+			continue
+		}
+		//if strings.Contains(l, "v4_") {
+		//	return nil, errors.New("v4 configurations are not supported in Realms section")
+		//}
+		if strings.Contains(l, "{") {
+			c++
+			if !strings.Contains(l, "=") {
+				return nil, fmt.Errorf("realm configuration line invalid: %s", l)
+			}
+			if c == 1 {
+				start = i
+				p := strings.Split(l, "=")
+				name = strings.TrimSpace(p[0])
+			}
+		}
+		if strings.Contains(l, "}") {
+			if c < 1 {
+				// but not started a block!!!
+				return nil, errors.New("invalid Realms section in configuration")
+			}
+			c--
+			if c == 0 {
+				var r Realm
+				e := r.parseLines(name, lines[start+1:i])
+				if e != nil {
+					if _, ok := e.(UnsupportedDirective); !ok {
+						err = e
+						return
+					}
+					err = e
+				}
+				realms = append(realms, r)
+			}
+		}
+	}
+	return
+}
+
+// DomainRealm maps the domains to realms representing the [domain_realm] section of the configuration.
+type DomainRealm map[string]string
+
+// Parse the lines of the [domain_realm] section of the configuration and add to the mapping.
+func (d *DomainRealm) parseLines(lines []string) error {
+	for _, line := range lines {
+		//Remove comments after the values
+		if idx := strings.IndexAny(line, "#;"); idx != -1 {
+			line = line[:idx]
+		}
+		if strings.TrimSpace(line) == "" {
+			continue
+		}
+		if !strings.Contains(line, "=") {
+			return InvalidErrorf("realm line (%s)", line)
+		}
+		p := strings.Split(line, "=")
+		domain := strings.TrimSpace(strings.ToLower(p[0]))
+		realm := strings.TrimSpace(p[1])
+		d.addMapping(domain, realm)
+	}
+	return nil
+}
+
+// Add a domain to realm mapping.
+func (d *DomainRealm) addMapping(domain, realm string) {
+	(*d)[domain] = realm
+}
+
+// Delete a domain to realm mapping.
+func (d *DomainRealm) deleteMapping(domain, realm string) {
+	delete(*d, domain)
+}
+
+// ResolveRealm resolves the kerberos realm for the specified domain name from the domain to realm mapping.
+// The most specific mapping is returned.
+func (c *Config) ResolveRealm(domainName string) string {
+	domainName = strings.TrimSuffix(domainName, ".")
+
+	// Try to match the entire hostname first
+	if r, ok := c.DomainRealm[domainName]; ok {
+		return r
+	}
+
+	// Try to match all DNS domain parts
+	periods := strings.Count(domainName, ".") + 1
+	for i := 2; i <= periods; i++ {
+		z := strings.SplitN(domainName, ".", i)
+		if r, ok := c.DomainRealm["."+z[len(z)-1]]; ok {
+			return r
+		}
+	}
+	return c.LibDefaults.DefaultRealm
+}
+
+// Load the KRB5 configuration from the specified file path.
+func Load(cfgPath string) (*Config, error) {
+	fh, err := os.Open(cfgPath)
+	if err != nil {
+		return nil, errors.New("configuration file could not be opened: " + cfgPath + " " + err.Error())
+	}
+	defer fh.Close()
+	scanner := bufio.NewScanner(fh)
+	return NewFromScanner(scanner)
+}
+
+// NewFromString creates a new Config struct from a string.
+func NewFromString(s string) (*Config, error) {
+	reader := strings.NewReader(s)
+	return NewFromReader(reader)
+}
+
+// NewFromReader creates a new Config struct from an io.Reader.
+func NewFromReader(r io.Reader) (*Config, error) {
+	scanner := bufio.NewScanner(r)
+	return NewFromScanner(scanner)
+}
+
+// NewFromScanner creates a new Config struct from a bufio.Scanner.
+func NewFromScanner(scanner *bufio.Scanner) (*Config, error) {
+	c := New()
+	var e error
+	sections := make(map[int]string)
+	var sectionLineNum []int
+	var lines []string
+	for scanner.Scan() {
+		// Skip comments and blank lines
+		if matched, _ := regexp.MatchString(`^\s*(#|;|\n)`, scanner.Text()); matched {
+			continue
+		}
+		if matched, _ := regexp.MatchString(`^\s*\[libdefaults\]\s*`, scanner.Text()); matched {
+			sections[len(lines)] = "libdefaults"
+			sectionLineNum = append(sectionLineNum, len(lines))
+			continue
+		}
+		if matched, _ := regexp.MatchString(`^\s*\[realms\]\s*`, scanner.Text()); matched {
+			sections[len(lines)] = "realms"
+			sectionLineNum = append(sectionLineNum, len(lines))
+			continue
+		}
+		if matched, _ := regexp.MatchString(`^\s*\[domain_realm\]\s*`, scanner.Text()); matched {
+			sections[len(lines)] = "domain_realm"
+			sectionLineNum = append(sectionLineNum, len(lines))
+			continue
+		}
+		if matched, _ := regexp.MatchString(`^\s*\[.*\]\s*`, scanner.Text()); matched {
+			sections[len(lines)] = "unknown_section"
+			sectionLineNum = append(sectionLineNum, len(lines))
+			continue
+		}
+		lines = append(lines, scanner.Text())
+	}
+	for i, start := range sectionLineNum {
+		var end int
+		if i+1 >= len(sectionLineNum) {
+			end = len(lines)
+		} else {
+			end = sectionLineNum[i+1]
+		}
+		switch section := sections[start]; section {
+		case "libdefaults":
+			err := c.LibDefaults.parseLines(lines[start:end])
+			if err != nil {
+				if _, ok := err.(UnsupportedDirective); !ok {
+					return nil, fmt.Errorf("error processing libdefaults section: %v", err)
+				}
+				e = err
+			}
+		case "realms":
+			realms, err := parseRealms(lines[start:end])
+			if err != nil {
+				if _, ok := err.(UnsupportedDirective); !ok {
+					return nil, fmt.Errorf("error processing realms section: %v", err)
+				}
+				e = err
+			}
+			c.Realms = realms
+		case "domain_realm":
+			err := c.DomainRealm.parseLines(lines[start:end])
+			if err != nil {
+				if _, ok := err.(UnsupportedDirective); !ok {
+					return nil, fmt.Errorf("error processing domaain_realm section: %v", err)
+				}
+				e = err
+			}
+		}
+	}
+	return c, e
+}
+
+// Parse a space delimited list of ETypes into a list of EType numbers optionally filtering out weak ETypes.
+func parseETypes(s []string, w bool) []int32 {
+	var eti []int32
+	for _, et := range s {
+		if !w {
+			var weak bool
+			for _, wet := range strings.Fields(WeakETypeList) {
+				if et == wet {
+					weak = true
+					break
+				}
+			}
+			if weak {
+				continue
+			}
+		}
+		i := etypeID.EtypeSupported(et)
+		if i != 0 {
+			eti = append(eti, i)
+		}
+	}
+	return eti
+}
+
+// Parse a time duration string in the configuration to a golang time.Duration.
+func parseDuration(s string) (time.Duration, error) {
+	s = strings.Replace(strings.TrimSpace(s), " ", "", -1)
+
+	// handle Nd[NmNs]
+	if strings.Contains(s, "d") {
+		ds := strings.SplitN(s, "d", 2)
+		dn, err := strconv.ParseUint(ds[0], 10, 32)
+		if err != nil {
+			return time.Duration(0), errors.New("invalid time duration")
+		}
+		d := time.Duration(dn*24) * time.Hour
+		if ds[1] != "" {
+			dp, err := time.ParseDuration(ds[1])
+			if err != nil {
+				return time.Duration(0), errors.New("invalid time duration")
+			}
+			d = d + dp
+		}
+		return d, nil
+	}
+
+	// handle Nm[Ns]
+	d, err := time.ParseDuration(s)
+	if err == nil {
+		return d, nil
+	}
+
+	// handle N
+	v, err := strconv.ParseUint(s, 10, 32)
+	if err == nil && v > 0 {
+		return time.Duration(v) * time.Second, nil
+	}
+
+	// handle h:m[:s]
+	if strings.Contains(s, ":") {
+		t := strings.Split(s, ":")
+		if 2 > len(t) || len(t) > 3 {
+			return time.Duration(0), errors.New("invalid time duration value")
+		}
+		var i []int
+		for _, n := range t {
+			j, err := strconv.ParseInt(n, 10, 16)
+			if err != nil {
+				return time.Duration(0), errors.New("invalid time duration value")
+			}
+			i = append(i, int(j))
+		}
+		d := time.Duration(i[0])*time.Hour + time.Duration(i[1])*time.Minute
+		if len(i) == 3 {
+			d = d + time.Duration(i[2])*time.Second
+		}
+		return d, nil
+	}
+	return time.Duration(0), errors.New("invalid time duration value")
+}
+
+// Parse possible boolean values to golang bool.
+func parseBoolean(s string) (bool, error) {
+	s = strings.TrimSpace(s)
+	v, err := strconv.ParseBool(s)
+	if err == nil {
+		return v, nil
+	}
+	switch strings.ToLower(s) {
+	case "yes":
+		return true, nil
+	case "y":
+		return true, nil
+	case "no":
+		return false, nil
+	case "n":
+		return false, nil
+	}
+	return false, errors.New("invalid boolean value")
+}
+
+// Parse array of strings but stop if an asterisk is placed at the end of a line.
+func appendUntilFinal(s *[]string, value string, final *bool) {
+	if *final {
+		return
+	}
+	if last := len(value) - 1; last >= 0 && value[last] == '*' {
+		*final = true
+		value = value[:len(value)-1]
+	}
+	*s = append(*s, value)
+}
+
+// JSON return details of the config in a JSON format.
+func (c *Config) JSON() (string, error) {
+	b, err := json.MarshalIndent(c, "", "  ")
+	if err != nil {
+		return "", err
+	}
+	return string(b), nil
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/credentials/ccache.go b/vendor/github.com/jcmturner/gokrb5/v8/credentials/ccache.go
new file mode 100644
index 0000000..c3b35c7
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/credentials/ccache.go
@@ -0,0 +1,333 @@
+package credentials
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"io/ioutil"
+	"strings"
+	"time"
+	"unsafe"
+
+	"github.com/jcmturner/gofork/encoding/asn1"
+	"github.com/jcmturner/gokrb5/v8/types"
+)
+
+const (
+	headerFieldTagKDCOffset = 1
+)
+
+// CCache is the file credentials cache as define here: https://web.mit.edu/kerberos/krb5-latest/doc/formats/ccache_file_format.html
+type CCache struct {
+	Version          uint8
+	Header           header
+	DefaultPrincipal principal
+	Credentials      []*Credential
+	Path             string
+}
+
+type header struct {
+	length uint16
+	fields []headerField
+}
+
+type headerField struct {
+	tag    uint16
+	length uint16
+	value  []byte
+}
+
+// Credential cache entry principal struct.
+type principal struct {
+	Realm         string
+	PrincipalName types.PrincipalName
+}
+
+// Credential holds a Kerberos client's ccache credential information.
+type Credential struct {
+	Client       principal
+	Server       principal
+	Key          types.EncryptionKey
+	AuthTime     time.Time
+	StartTime    time.Time
+	EndTime      time.Time
+	RenewTill    time.Time
+	IsSKey       bool
+	TicketFlags  asn1.BitString
+	Addresses    []types.HostAddress
+	AuthData     []types.AuthorizationDataEntry
+	Ticket       []byte
+	SecondTicket []byte
+}
+
+// LoadCCache loads a credential cache file into a CCache type.
+func LoadCCache(cpath string) (*CCache, error) {
+	c := new(CCache)
+	b, err := ioutil.ReadFile(cpath)
+	if err != nil {
+		return c, err
+	}
+	err = c.Unmarshal(b)
+	return c, err
+}
+
+// Unmarshal a byte slice of credential cache data into CCache type.
+func (c *CCache) Unmarshal(b []byte) error {
+	p := 0
+	//The first byte of the file always has the value 5
+	if int8(b[p]) != 5 {
+		return errors.New("Invalid credential cache data. First byte does not equal 5")
+	}
+	p++
+	//Get credential cache version
+	//The second byte contains the version number (1 to 4)
+	c.Version = b[p]
+	if c.Version < 1 || c.Version > 4 {
+		return errors.New("Invalid credential cache data. Keytab version is not within 1 to 4")
+	}
+	p++
+	//Version 1 or 2 of the file format uses native byte order for integer representations. Versions 3 & 4 always uses big-endian byte order
+	var endian binary.ByteOrder
+	endian = binary.BigEndian
+	if (c.Version == 1 || c.Version == 2) && isNativeEndianLittle() {
+		endian = binary.LittleEndian
+	}
+	if c.Version == 4 {
+		err := parseHeader(b, &p, c, &endian)
+		if err != nil {
+			return err
+		}
+	}
+	c.DefaultPrincipal = parsePrincipal(b, &p, c, &endian)
+	for p < len(b) {
+		cred, err := parseCredential(b, &p, c, &endian)
+		if err != nil {
+			return err
+		}
+		c.Credentials = append(c.Credentials, cred)
+	}
+	return nil
+}
+
+func parseHeader(b []byte, p *int, c *CCache, e *binary.ByteOrder) error {
+	if c.Version != 4 {
+		return errors.New("Credentials cache version is not 4 so there is no header to parse.")
+	}
+	h := header{}
+	h.length = uint16(readInt16(b, p, e))
+	for *p <= int(h.length) {
+		f := headerField{}
+		f.tag = uint16(readInt16(b, p, e))
+		f.length = uint16(readInt16(b, p, e))
+		f.value = b[*p : *p+int(f.length)]
+		*p += int(f.length)
+		if !f.valid() {
+			return errors.New("Invalid credential cache header found")
+		}
+		h.fields = append(h.fields, f)
+	}
+	c.Header = h
+	return nil
+}
+
+// Parse the Keytab bytes of a principal into a Keytab entry's principal.
+func parsePrincipal(b []byte, p *int, c *CCache, e *binary.ByteOrder) (princ principal) {
+	if c.Version != 1 {
+		//Name Type is omitted in version 1
+		princ.PrincipalName.NameType = readInt32(b, p, e)
+	}
+	nc := int(readInt32(b, p, e))
+	if c.Version == 1 {
+		//In version 1 the number of components includes the realm. Minus 1 to make consistent with version 2
+		nc--
+	}
+	lenRealm := readInt32(b, p, e)
+	princ.Realm = string(readBytes(b, p, int(lenRealm), e))
+	for i := 0; i < nc; i++ {
+		l := readInt32(b, p, e)
+		princ.PrincipalName.NameString = append(princ.PrincipalName.NameString, string(readBytes(b, p, int(l), e)))
+	}
+	return princ
+}
+
+func parseCredential(b []byte, p *int, c *CCache, e *binary.ByteOrder) (cred *Credential, err error) {
+	cred = new(Credential)
+	cred.Client = parsePrincipal(b, p, c, e)
+	cred.Server = parsePrincipal(b, p, c, e)
+	key := types.EncryptionKey{}
+	key.KeyType = int32(readInt16(b, p, e))
+	if c.Version == 3 {
+		//repeated twice in version 3
+		key.KeyType = int32(readInt16(b, p, e))
+	}
+	key.KeyValue = readData(b, p, e)
+	cred.Key = key
+	cred.AuthTime = readTimestamp(b, p, e)
+	cred.StartTime = readTimestamp(b, p, e)
+	cred.EndTime = readTimestamp(b, p, e)
+	cred.RenewTill = readTimestamp(b, p, e)
+	if ik := readInt8(b, p, e); ik == 0 {
+		cred.IsSKey = false
+	} else {
+		cred.IsSKey = true
+	}
+	cred.TicketFlags = types.NewKrbFlags()
+	cred.TicketFlags.Bytes = readBytes(b, p, 4, e)
+	l := int(readInt32(b, p, e))
+	cred.Addresses = make([]types.HostAddress, l, l)
+	for i := range cred.Addresses {
+		cred.Addresses[i] = readAddress(b, p, e)
+	}
+	l = int(readInt32(b, p, e))
+	cred.AuthData = make([]types.AuthorizationDataEntry, l, l)
+	for i := range cred.AuthData {
+		cred.AuthData[i] = readAuthDataEntry(b, p, e)
+	}
+	cred.Ticket = readData(b, p, e)
+	cred.SecondTicket = readData(b, p, e)
+	return
+}
+
+// GetClientPrincipalName returns a PrincipalName type for the client the credentials cache is for.
+func (c *CCache) GetClientPrincipalName() types.PrincipalName {
+	return c.DefaultPrincipal.PrincipalName
+}
+
+// GetClientRealm returns the reals of the client the credentials cache is for.
+func (c *CCache) GetClientRealm() string {
+	return c.DefaultPrincipal.Realm
+}
+
+// GetClientCredentials returns a Credentials object representing the client of the credentials cache.
+func (c *CCache) GetClientCredentials() *Credentials {
+	return &Credentials{
+		username: c.DefaultPrincipal.PrincipalName.PrincipalNameString(),
+		realm:    c.GetClientRealm(),
+		cname:    c.DefaultPrincipal.PrincipalName,
+	}
+}
+
+// Contains tests if the cache contains a credential for the provided server PrincipalName
+func (c *CCache) Contains(p types.PrincipalName) bool {
+	for _, cred := range c.Credentials {
+		if cred.Server.PrincipalName.Equal(p) {
+			return true
+		}
+	}
+	return false
+}
+
+// GetEntry returns a specific credential for the PrincipalName provided.
+func (c *CCache) GetEntry(p types.PrincipalName) (*Credential, bool) {
+	cred := new(Credential)
+	var found bool
+	for i := range c.Credentials {
+		if c.Credentials[i].Server.PrincipalName.Equal(p) {
+			cred = c.Credentials[i]
+			found = true
+			break
+		}
+	}
+	if !found {
+		return cred, false
+	}
+	return cred, true
+}
+
+// GetEntries filters out configuration entries an returns a slice of credentials.
+func (c *CCache) GetEntries() []*Credential {
+	creds := make([]*Credential, 0)
+	for _, cred := range c.Credentials {
+		// Filter out configuration entries
+		if strings.HasPrefix(cred.Server.Realm, "X-CACHECONF") {
+			continue
+		}
+		creds = append(creds, cred)
+	}
+	return creds
+}
+
+func (h *headerField) valid() bool {
+	// See https://web.mit.edu/kerberos/krb5-latest/doc/formats/ccache_file_format.html - Header format
+	switch h.tag {
+	case headerFieldTagKDCOffset:
+		if h.length != 8 || len(h.value) != 8 {
+			return false
+		}
+		return true
+	}
+	return false
+}
+
+func readData(b []byte, p *int, e *binary.ByteOrder) []byte {
+	l := readInt32(b, p, e)
+	return readBytes(b, p, int(l), e)
+}
+
+func readAddress(b []byte, p *int, e *binary.ByteOrder) types.HostAddress {
+	a := types.HostAddress{}
+	a.AddrType = int32(readInt16(b, p, e))
+	a.Address = readData(b, p, e)
+	return a
+}
+
+func readAuthDataEntry(b []byte, p *int, e *binary.ByteOrder) types.AuthorizationDataEntry {
+	a := types.AuthorizationDataEntry{}
+	a.ADType = int32(readInt16(b, p, e))
+	a.ADData = readData(b, p, e)
+	return a
+}
+
+// Read bytes representing a timestamp.
+func readTimestamp(b []byte, p *int, e *binary.ByteOrder) time.Time {
+	return time.Unix(int64(readInt32(b, p, e)), 0)
+}
+
+// Read bytes representing an eight bit integer.
+func readInt8(b []byte, p *int, e *binary.ByteOrder) (i int8) {
+	buf := bytes.NewBuffer(b[*p : *p+1])
+	binary.Read(buf, *e, &i)
+	*p++
+	return
+}
+
+// Read bytes representing a sixteen bit integer.
+func readInt16(b []byte, p *int, e *binary.ByteOrder) (i int16) {
+	buf := bytes.NewBuffer(b[*p : *p+2])
+	binary.Read(buf, *e, &i)
+	*p += 2
+	return
+}
+
+// Read bytes representing a thirty two bit integer.
+func readInt32(b []byte, p *int, e *binary.ByteOrder) (i int32) {
+	buf := bytes.NewBuffer(b[*p : *p+4])
+	binary.Read(buf, *e, &i)
+	*p += 4
+	return
+}
+
+func readBytes(b []byte, p *int, s int, e *binary.ByteOrder) []byte {
+	buf := bytes.NewBuffer(b[*p : *p+s])
+	r := make([]byte, s)
+	binary.Read(buf, *e, &r)
+	*p += s
+	return r
+}
+
+func isNativeEndianLittle() bool {
+	var x = 0x012345678
+	var p = unsafe.Pointer(&x)
+	var bp = (*[4]byte)(p)
+
+	var endian bool
+	if 0x01 == bp[0] {
+		endian = false
+	} else if (0x78 & 0xff) == (bp[0] & 0xff) {
+		endian = true
+	} else {
+		// Default to big endian
+		endian = false
+	}
+	return endian
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/credentials/credentials.go b/vendor/github.com/jcmturner/gokrb5/v8/credentials/credentials.go
new file mode 100644
index 0000000..bddbc7e
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/credentials/credentials.go
@@ -0,0 +1,405 @@
+// Package credentials provides credentials management for Kerberos 5 authentication.
+package credentials
+
+import (
+	"bytes"
+	"encoding/gob"
+	"encoding/json"
+	"time"
+
+	"github.com/hashicorp/go-uuid"
+	"github.com/jcmturner/gokrb5/v8/iana/nametype"
+	"github.com/jcmturner/gokrb5/v8/keytab"
+	"github.com/jcmturner/gokrb5/v8/types"
+)
+
+const (
+	// AttributeKeyADCredentials assigned number for AD credentials.
+	AttributeKeyADCredentials = "gokrb5AttributeKeyADCredentials"
+)
+
+// Credentials struct for a user.
+// Contains either a keytab, password or both.
+// Keytabs are used over passwords if both are defined.
+type Credentials struct {
+	username        string
+	displayName     string
+	realm           string
+	cname           types.PrincipalName
+	keytab          *keytab.Keytab
+	password        string
+	attributes      map[string]interface{}
+	validUntil      time.Time
+	authenticated   bool
+	human           bool
+	authTime        time.Time
+	groupMembership map[string]bool
+	sessionID       string
+}
+
+// marshalCredentials is used to enable marshaling and unmarshaling of credentials
+// without having exported fields on the Credentials struct
+type marshalCredentials struct {
+	Username        string
+	DisplayName     string
+	Realm           string
+	CName           types.PrincipalName `json:"-"`
+	Keytab          bool
+	Password        bool
+	Attributes      map[string]interface{} `json:"-"`
+	ValidUntil      time.Time
+	Authenticated   bool
+	Human           bool
+	AuthTime        time.Time
+	GroupMembership map[string]bool `json:"-"`
+	SessionID       string
+}
+
+// ADCredentials contains information obtained from the PAC.
+type ADCredentials struct {
+	EffectiveName       string
+	FullName            string
+	UserID              int
+	PrimaryGroupID      int
+	LogOnTime           time.Time
+	LogOffTime          time.Time
+	PasswordLastSet     time.Time
+	GroupMembershipSIDs []string
+	LogonDomainName     string
+	LogonDomainID       string
+	LogonServer         string
+}
+
+// New creates a new Credentials instance.
+func New(username string, realm string) *Credentials {
+	uid, err := uuid.GenerateUUID()
+	if err != nil {
+		uid = "00unique-sess-ions-uuid-unavailable0"
+	}
+	return &Credentials{
+		username:        username,
+		displayName:     username,
+		realm:           realm,
+		cname:           types.NewPrincipalName(nametype.KRB_NT_PRINCIPAL, username),
+		keytab:          keytab.New(),
+		attributes:      make(map[string]interface{}),
+		groupMembership: make(map[string]bool),
+		sessionID:       uid,
+		human:           true,
+	}
+}
+
+// NewFromPrincipalName creates a new Credentials instance with the user details provides as a PrincipalName type.
+func NewFromPrincipalName(cname types.PrincipalName, realm string) *Credentials {
+	c := New(cname.PrincipalNameString(), realm)
+	c.cname = cname
+	return c
+}
+
+// WithKeytab sets the Keytab in the Credentials struct.
+func (c *Credentials) WithKeytab(kt *keytab.Keytab) *Credentials {
+	c.keytab = kt
+	c.password = ""
+	return c
+}
+
+// Keytab returns the credential's Keytab.
+func (c *Credentials) Keytab() *keytab.Keytab {
+	return c.keytab
+}
+
+// HasKeytab queries if the Credentials has a keytab defined.
+func (c *Credentials) HasKeytab() bool {
+	if c.keytab != nil && len(c.keytab.Entries) > 0 {
+		return true
+	}
+	return false
+}
+
+// WithPassword sets the password in the Credentials struct.
+func (c *Credentials) WithPassword(password string) *Credentials {
+	c.password = password
+	c.keytab = keytab.New() // clear any keytab
+	return c
+}
+
+// Password returns the credential's password.
+func (c *Credentials) Password() string {
+	return c.password
+}
+
+// HasPassword queries if the Credentials has a password defined.
+func (c *Credentials) HasPassword() bool {
+	if c.password != "" {
+		return true
+	}
+	return false
+}
+
+// SetValidUntil sets the expiry time of the credentials
+func (c *Credentials) SetValidUntil(t time.Time) {
+	c.validUntil = t
+}
+
+// SetADCredentials adds ADCredentials attributes to the credentials
+func (c *Credentials) SetADCredentials(a ADCredentials) {
+	c.SetAttribute(AttributeKeyADCredentials, a)
+	if a.FullName != "" {
+		c.SetDisplayName(a.FullName)
+	}
+	if a.EffectiveName != "" {
+		c.SetUserName(a.EffectiveName)
+	}
+	for i := range a.GroupMembershipSIDs {
+		c.AddAuthzAttribute(a.GroupMembershipSIDs[i])
+	}
+}
+
+// GetADCredentials returns ADCredentials attributes sorted in the credential
+func (c *Credentials) GetADCredentials() ADCredentials {
+	if a, ok := c.attributes[AttributeKeyADCredentials].(ADCredentials); ok {
+		return a
+	}
+	return ADCredentials{}
+}
+
+// Methods to implement goidentity.Identity interface
+
+// UserName returns the credential's username.
+func (c *Credentials) UserName() string {
+	return c.username
+}
+
+// SetUserName sets the username value on the credential.
+func (c *Credentials) SetUserName(s string) {
+	c.username = s
+}
+
+// CName returns the credential's client principal name.
+func (c *Credentials) CName() types.PrincipalName {
+	return c.cname
+}
+
+// SetCName sets the client principal name on the credential.
+func (c *Credentials) SetCName(pn types.PrincipalName) {
+	c.cname = pn
+}
+
+// Domain returns the credential's domain.
+func (c *Credentials) Domain() string {
+	return c.realm
+}
+
+// SetDomain sets the domain value on the credential.
+func (c *Credentials) SetDomain(s string) {
+	c.realm = s
+}
+
+// Realm returns the credential's realm. Same as the domain.
+func (c *Credentials) Realm() string {
+	return c.Domain()
+}
+
+// SetRealm sets the realm value on the credential. Same as the domain
+func (c *Credentials) SetRealm(s string) {
+	c.SetDomain(s)
+}
+
+// DisplayName returns the credential's display name.
+func (c *Credentials) DisplayName() string {
+	return c.displayName
+}
+
+// SetDisplayName sets the display name value on the credential.
+func (c *Credentials) SetDisplayName(s string) {
+	c.displayName = s
+}
+
+// Human returns if the  credential represents a human or not.
+func (c *Credentials) Human() bool {
+	return c.human
+}
+
+// SetHuman sets the credential as human.
+func (c *Credentials) SetHuman(b bool) {
+	c.human = b
+}
+
+// AuthTime returns the time the credential was authenticated.
+func (c *Credentials) AuthTime() time.Time {
+	return c.authTime
+}
+
+// SetAuthTime sets the time the credential was authenticated.
+func (c *Credentials) SetAuthTime(t time.Time) {
+	c.authTime = t
+}
+
+// AuthzAttributes returns the credentials authorizing attributes.
+func (c *Credentials) AuthzAttributes() []string {
+	s := make([]string, len(c.groupMembership))
+	i := 0
+	for a := range c.groupMembership {
+		s[i] = a
+		i++
+	}
+	return s
+}
+
+// Authenticated indicates if the credential has been successfully authenticated or not.
+func (c *Credentials) Authenticated() bool {
+	return c.authenticated
+}
+
+// SetAuthenticated sets the credential as having been successfully authenticated.
+func (c *Credentials) SetAuthenticated(b bool) {
+	c.authenticated = b
+}
+
+// AddAuthzAttribute adds an authorization attribute to the credential.
+func (c *Credentials) AddAuthzAttribute(a string) {
+	c.groupMembership[a] = true
+}
+
+// RemoveAuthzAttribute removes an authorization attribute from the credential.
+func (c *Credentials) RemoveAuthzAttribute(a string) {
+	if _, ok := c.groupMembership[a]; !ok {
+		return
+	}
+	delete(c.groupMembership, a)
+}
+
+// EnableAuthzAttribute toggles an authorization attribute to an enabled state on the credential.
+func (c *Credentials) EnableAuthzAttribute(a string) {
+	if enabled, ok := c.groupMembership[a]; ok && !enabled {
+		c.groupMembership[a] = true
+	}
+}
+
+// DisableAuthzAttribute toggles an authorization attribute to a disabled state on the credential.
+func (c *Credentials) DisableAuthzAttribute(a string) {
+	if enabled, ok := c.groupMembership[a]; ok && enabled {
+		c.groupMembership[a] = false
+	}
+}
+
+// Authorized indicates if the credential has the specified authorizing attribute.
+func (c *Credentials) Authorized(a string) bool {
+	if enabled, ok := c.groupMembership[a]; ok && enabled {
+		return true
+	}
+	return false
+}
+
+// SessionID returns the credential's session ID.
+func (c *Credentials) SessionID() string {
+	return c.sessionID
+}
+
+// Expired indicates if the credential has expired.
+func (c *Credentials) Expired() bool {
+	if !c.validUntil.IsZero() && time.Now().UTC().After(c.validUntil) {
+		return true
+	}
+	return false
+}
+
+// ValidUntil returns the credential's valid until date
+func (c *Credentials) ValidUntil() time.Time {
+	return c.validUntil
+}
+
+// Attributes returns the Credentials' attributes map.
+func (c *Credentials) Attributes() map[string]interface{} {
+	return c.attributes
+}
+
+// SetAttribute sets the value of an attribute.
+func (c *Credentials) SetAttribute(k string, v interface{}) {
+	c.attributes[k] = v
+}
+
+// SetAttributes replaces the attributes map with the one provided.
+func (c *Credentials) SetAttributes(a map[string]interface{}) {
+	c.attributes = a
+}
+
+// RemoveAttribute deletes an attribute from the attribute map that has the key provided.
+func (c *Credentials) RemoveAttribute(k string) {
+	delete(c.attributes, k)
+}
+
+// Marshal the Credentials into a byte slice
+func (c *Credentials) Marshal() ([]byte, error) {
+	gob.Register(map[string]interface{}{})
+	gob.Register(ADCredentials{})
+	buf := new(bytes.Buffer)
+	enc := gob.NewEncoder(buf)
+	mc := marshalCredentials{
+		Username:        c.username,
+		DisplayName:     c.displayName,
+		Realm:           c.realm,
+		CName:           c.cname,
+		Keytab:          c.HasKeytab(),
+		Password:        c.HasPassword(),
+		Attributes:      c.attributes,
+		ValidUntil:      c.validUntil,
+		Authenticated:   c.authenticated,
+		Human:           c.human,
+		AuthTime:        c.authTime,
+		GroupMembership: c.groupMembership,
+		SessionID:       c.sessionID,
+	}
+	err := enc.Encode(&mc)
+	if err != nil {
+		return []byte{}, err
+	}
+	return buf.Bytes(), nil
+}
+
+// Unmarshal a byte slice into Credentials
+func (c *Credentials) Unmarshal(b []byte) error {
+	gob.Register(map[string]interface{}{})
+	gob.Register(ADCredentials{})
+	mc := new(marshalCredentials)
+	buf := bytes.NewBuffer(b)
+	dec := gob.NewDecoder(buf)
+	err := dec.Decode(mc)
+	if err != nil {
+		return err
+	}
+	c.username = mc.Username
+	c.displayName = mc.DisplayName
+	c.realm = mc.Realm
+	c.cname = mc.CName
+	c.attributes = mc.Attributes
+	c.validUntil = mc.ValidUntil
+	c.authenticated = mc.Authenticated
+	c.human = mc.Human
+	c.authTime = mc.AuthTime
+	c.groupMembership = mc.GroupMembership
+	c.sessionID = mc.SessionID
+	return nil
+}
+
+// JSON return details of the Credentials in a JSON format.
+func (c *Credentials) JSON() (string, error) {
+	mc := marshalCredentials{
+		Username:      c.username,
+		DisplayName:   c.displayName,
+		Realm:         c.realm,
+		CName:         c.cname,
+		Keytab:        c.HasKeytab(),
+		Password:      c.HasPassword(),
+		ValidUntil:    c.validUntil,
+		Authenticated: c.authenticated,
+		Human:         c.human,
+		AuthTime:      c.authTime,
+		SessionID:     c.sessionID,
+	}
+	b, err := json.MarshalIndent(mc, "", "  ")
+	if err != nil {
+		return "", err
+	}
+	return string(b), nil
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes128-cts-hmac-sha1-96.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes128-cts-hmac-sha1-96.go
new file mode 100644
index 0000000..dd8babd
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes128-cts-hmac-sha1-96.go
@@ -0,0 +1,129 @@
+package crypto
+
+import (
+	"crypto/aes"
+	"crypto/hmac"
+	"crypto/sha1"
+	"hash"
+
+	"github.com/jcmturner/gokrb5/v8/crypto/common"
+	"github.com/jcmturner/gokrb5/v8/crypto/rfc3961"
+	"github.com/jcmturner/gokrb5/v8/crypto/rfc3962"
+	"github.com/jcmturner/gokrb5/v8/iana/chksumtype"
+	"github.com/jcmturner/gokrb5/v8/iana/etypeID"
+)
+
+// RFC 3962
+
+// Aes128CtsHmacSha96 implements Kerberos encryption type aes128-cts-hmac-sha1-96
+type Aes128CtsHmacSha96 struct {
+}
+
+// GetETypeID returns the EType ID number.
+func (e Aes128CtsHmacSha96) GetETypeID() int32 {
+	return etypeID.AES128_CTS_HMAC_SHA1_96
+}
+
+// GetHashID returns the checksum type ID number.
+func (e Aes128CtsHmacSha96) GetHashID() int32 {
+	return chksumtype.HMAC_SHA1_96_AES128
+}
+
+// GetKeyByteSize returns the number of bytes for key of this etype.
+func (e Aes128CtsHmacSha96) GetKeyByteSize() int {
+	return 128 / 8
+}
+
+// GetKeySeedBitLength returns the number of bits for the seed for key generation.
+func (e Aes128CtsHmacSha96) GetKeySeedBitLength() int {
+	return e.GetKeyByteSize() * 8
+}
+
+// GetHashFunc returns the hash function for this etype.
+func (e Aes128CtsHmacSha96) GetHashFunc() func() hash.Hash {
+	return sha1.New
+}
+
+// GetMessageBlockByteSize returns the block size for the etype's messages.
+func (e Aes128CtsHmacSha96) GetMessageBlockByteSize() int {
+	return 1
+}
+
+// GetDefaultStringToKeyParams returns the default key derivation parameters in string form.
+func (e Aes128CtsHmacSha96) GetDefaultStringToKeyParams() string {
+	return "00001000"
+}
+
+// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations.
+func (e Aes128CtsHmacSha96) GetConfounderByteSize() int {
+	return aes.BlockSize
+}
+
+// GetHMACBitLength returns the bit count size of the integrity hash.
+func (e Aes128CtsHmacSha96) GetHMACBitLength() int {
+	return 96
+}
+
+// GetCypherBlockBitLength returns the bit count size of the cypher block.
+func (e Aes128CtsHmacSha96) GetCypherBlockBitLength() int {
+	return aes.BlockSize * 8
+}
+
+// StringToKey returns a key derived from the string provided.
+func (e Aes128CtsHmacSha96) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) {
+	return rfc3962.StringToKey(secret, salt, s2kparams, e)
+}
+
+// RandomToKey returns a key from the bytes provided.
+func (e Aes128CtsHmacSha96) RandomToKey(b []byte) []byte {
+	return rfc3961.RandomToKey(b)
+}
+
+// EncryptData encrypts the data provided.
+func (e Aes128CtsHmacSha96) EncryptData(key, data []byte) ([]byte, []byte, error) {
+	return rfc3962.EncryptData(key, data, e)
+}
+
+// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message.
+func (e Aes128CtsHmacSha96) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) {
+	return rfc3962.EncryptMessage(key, message, usage, e)
+}
+
+// DecryptData decrypts the data provided.
+func (e Aes128CtsHmacSha96) DecryptData(key, data []byte) ([]byte, error) {
+	return rfc3962.DecryptData(key, data, e)
+}
+
+// DecryptMessage decrypts the message provided and verifies the integrity of the message.
+func (e Aes128CtsHmacSha96) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) {
+	return rfc3962.DecryptMessage(key, ciphertext, usage, e)
+}
+
+// DeriveKey derives a key from the protocol key based on the usage value.
+func (e Aes128CtsHmacSha96) DeriveKey(protocolKey, usage []byte) ([]byte, error) {
+	return rfc3961.DeriveKey(protocolKey, usage, e)
+}
+
+// DeriveRandom generates data needed for key generation.
+func (e Aes128CtsHmacSha96) DeriveRandom(protocolKey, usage []byte) ([]byte, error) {
+	return rfc3961.DeriveRandom(protocolKey, usage, e)
+}
+
+// VerifyIntegrity checks the integrity of the plaintext message.
+func (e Aes128CtsHmacSha96) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool {
+	return rfc3961.VerifyIntegrity(protocolKey, ct, pt, usage, e)
+}
+
+// GetChecksumHash returns a keyed checksum hash of the bytes provided.
+func (e Aes128CtsHmacSha96) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) {
+	return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e)
+}
+
+// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided.
+func (e Aes128CtsHmacSha96) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool {
+	c, err := e.GetChecksumHash(protocolKey, data, usage)
+	if err != nil {
+		return false
+	}
+	return hmac.Equal(chksum, c)
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes128-cts-hmac-sha256-128.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes128-cts-hmac-sha256-128.go
new file mode 100644
index 0000000..b05af7d
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes128-cts-hmac-sha256-128.go
@@ -0,0 +1,132 @@
+package crypto
+
+import (
+	"crypto/aes"
+	"crypto/hmac"
+	"crypto/sha256"
+	"hash"
+
+	"github.com/jcmturner/gokrb5/v8/crypto/common"
+	"github.com/jcmturner/gokrb5/v8/crypto/rfc8009"
+	"github.com/jcmturner/gokrb5/v8/iana/chksumtype"
+	"github.com/jcmturner/gokrb5/v8/iana/etypeID"
+)
+
+// RFC https://tools.ietf.org/html/rfc8009
+
+// Aes128CtsHmacSha256128 implements Kerberos encryption type aes128-cts-hmac-sha256-128
+type Aes128CtsHmacSha256128 struct {
+}
+
+// GetETypeID returns the EType ID number.
+func (e Aes128CtsHmacSha256128) GetETypeID() int32 {
+	return etypeID.AES128_CTS_HMAC_SHA256_128
+}
+
+// GetHashID returns the checksum type ID number.
+func (e Aes128CtsHmacSha256128) GetHashID() int32 {
+	return chksumtype.HMAC_SHA256_128_AES128
+}
+
+// GetKeyByteSize returns the number of bytes for key of this etype.
+func (e Aes128CtsHmacSha256128) GetKeyByteSize() int {
+	return 128 / 8
+}
+
+// GetKeySeedBitLength returns the number of bits for the seed for key generation.
+func (e Aes128CtsHmacSha256128) GetKeySeedBitLength() int {
+	return e.GetKeyByteSize() * 8
+}
+
+// GetHashFunc returns the hash function for this etype.
+func (e Aes128CtsHmacSha256128) GetHashFunc() func() hash.Hash {
+	return sha256.New
+}
+
+// GetMessageBlockByteSize returns the block size for the etype's messages.
+func (e Aes128CtsHmacSha256128) GetMessageBlockByteSize() int {
+	return 1
+}
+
+// GetDefaultStringToKeyParams returns the default key derivation parameters in string form.
+func (e Aes128CtsHmacSha256128) GetDefaultStringToKeyParams() string {
+	return "00008000"
+}
+
+// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations.
+func (e Aes128CtsHmacSha256128) GetConfounderByteSize() int {
+	return aes.BlockSize
+}
+
+// GetHMACBitLength returns the bit count size of the integrity hash.
+func (e Aes128CtsHmacSha256128) GetHMACBitLength() int {
+	return 128
+}
+
+// GetCypherBlockBitLength returns the bit count size of the cypher block.
+func (e Aes128CtsHmacSha256128) GetCypherBlockBitLength() int {
+	return aes.BlockSize * 8
+}
+
+// StringToKey returns a key derived from the string provided.
+func (e Aes128CtsHmacSha256128) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) {
+	saltp := rfc8009.GetSaltP(salt, "aes128-cts-hmac-sha256-128")
+	return rfc8009.StringToKey(secret, saltp, s2kparams, e)
+}
+
+// RandomToKey returns a key from the bytes provided.
+func (e Aes128CtsHmacSha256128) RandomToKey(b []byte) []byte {
+	return rfc8009.RandomToKey(b)
+}
+
+// EncryptData encrypts the data provided.
+func (e Aes128CtsHmacSha256128) EncryptData(key, data []byte) ([]byte, []byte, error) {
+	return rfc8009.EncryptData(key, data, e)
+}
+
+// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message.
+func (e Aes128CtsHmacSha256128) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) {
+	return rfc8009.EncryptMessage(key, message, usage, e)
+}
+
+// DecryptData decrypts the data provided.
+func (e Aes128CtsHmacSha256128) DecryptData(key, data []byte) ([]byte, error) {
+	return rfc8009.DecryptData(key, data, e)
+}
+
+// DecryptMessage decrypts the message provided and verifies the integrity of the message.
+func (e Aes128CtsHmacSha256128) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) {
+	return rfc8009.DecryptMessage(key, ciphertext, usage, e)
+}
+
+// DeriveKey derives a key from the protocol key based on the usage value.
+func (e Aes128CtsHmacSha256128) DeriveKey(protocolKey, usage []byte) ([]byte, error) {
+	return rfc8009.DeriveKey(protocolKey, usage, e), nil
+}
+
+// DeriveRandom generates data needed for key generation.
+func (e Aes128CtsHmacSha256128) DeriveRandom(protocolKey, usage []byte) ([]byte, error) {
+	return rfc8009.DeriveRandom(protocolKey, usage, e)
+}
+
+// VerifyIntegrity checks the integrity of the ciphertext message.
+// As the hash is calculated over the iv concatenated with the AES cipher output not the plaintext the pt value to this
+// interface method is not use. Pass any []byte.
+func (e Aes128CtsHmacSha256128) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool {
+	// We don't need ib just there for the interface
+	return rfc8009.VerifyIntegrity(protocolKey, ct, usage, e)
+}
+
+// GetChecksumHash returns a keyed checksum hash of the bytes provided.
+func (e Aes128CtsHmacSha256128) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) {
+	return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e)
+}
+
+// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided.
+func (e Aes128CtsHmacSha256128) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool {
+	c, err := e.GetChecksumHash(protocolKey, data, usage)
+	if err != nil {
+		return false
+	}
+	return hmac.Equal(chksum, c)
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes256-cts-hmac-sha1-96.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes256-cts-hmac-sha1-96.go
new file mode 100644
index 0000000..45e439a
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes256-cts-hmac-sha1-96.go
@@ -0,0 +1,129 @@
+package crypto
+
+import (
+	"crypto/aes"
+	"crypto/hmac"
+	"crypto/sha1"
+	"hash"
+
+	"github.com/jcmturner/gokrb5/v8/crypto/common"
+	"github.com/jcmturner/gokrb5/v8/crypto/rfc3961"
+	"github.com/jcmturner/gokrb5/v8/crypto/rfc3962"
+	"github.com/jcmturner/gokrb5/v8/iana/chksumtype"
+	"github.com/jcmturner/gokrb5/v8/iana/etypeID"
+)
+
+// RFC 3962
+
+// Aes256CtsHmacSha96 implements Kerberos encryption type aes256-cts-hmac-sha1-96
+type Aes256CtsHmacSha96 struct {
+}
+
+// GetETypeID returns the EType ID number.
+func (e Aes256CtsHmacSha96) GetETypeID() int32 {
+	return etypeID.AES256_CTS_HMAC_SHA1_96
+}
+
+// GetHashID returns the checksum type ID number.
+func (e Aes256CtsHmacSha96) GetHashID() int32 {
+	return chksumtype.HMAC_SHA1_96_AES256
+}
+
+// GetKeyByteSize returns the number of bytes for key of this etype.
+func (e Aes256CtsHmacSha96) GetKeyByteSize() int {
+	return 256 / 8
+}
+
+// GetKeySeedBitLength returns the number of bits for the seed for key generation.
+func (e Aes256CtsHmacSha96) GetKeySeedBitLength() int {
+	return e.GetKeyByteSize() * 8
+}
+
+// GetHashFunc returns the hash function for this etype.
+func (e Aes256CtsHmacSha96) GetHashFunc() func() hash.Hash {
+	return sha1.New
+}
+
+// GetMessageBlockByteSize returns the block size for the etype's messages.
+func (e Aes256CtsHmacSha96) GetMessageBlockByteSize() int {
+	return 1
+}
+
+// GetDefaultStringToKeyParams returns the default key derivation parameters in string form.
+func (e Aes256CtsHmacSha96) GetDefaultStringToKeyParams() string {
+	return "00001000"
+}
+
+// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations.
+func (e Aes256CtsHmacSha96) GetConfounderByteSize() int {
+	return aes.BlockSize
+}
+
+// GetHMACBitLength returns the bit count size of the integrity hash.
+func (e Aes256CtsHmacSha96) GetHMACBitLength() int {
+	return 96
+}
+
+// GetCypherBlockBitLength returns the bit count size of the cypher block.
+func (e Aes256CtsHmacSha96) GetCypherBlockBitLength() int {
+	return aes.BlockSize * 8
+}
+
+// StringToKey returns a key derived from the string provided.
+func (e Aes256CtsHmacSha96) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) {
+	return rfc3962.StringToKey(secret, salt, s2kparams, e)
+}
+
+// RandomToKey returns a key from the bytes provided.
+func (e Aes256CtsHmacSha96) RandomToKey(b []byte) []byte {
+	return rfc3961.RandomToKey(b)
+}
+
+// EncryptData encrypts the data provided.
+func (e Aes256CtsHmacSha96) EncryptData(key, data []byte) ([]byte, []byte, error) {
+	return rfc3962.EncryptData(key, data, e)
+}
+
+// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message.
+func (e Aes256CtsHmacSha96) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) {
+	return rfc3962.EncryptMessage(key, message, usage, e)
+}
+
+// DecryptData decrypts the data provided.
+func (e Aes256CtsHmacSha96) DecryptData(key, data []byte) ([]byte, error) {
+	return rfc3962.DecryptData(key, data, e)
+}
+
+// DecryptMessage decrypts the message provided and verifies the integrity of the message.
+func (e Aes256CtsHmacSha96) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) {
+	return rfc3962.DecryptMessage(key, ciphertext, usage, e)
+}
+
+// DeriveKey derives a key from the protocol key based on the usage value.
+func (e Aes256CtsHmacSha96) DeriveKey(protocolKey, usage []byte) ([]byte, error) {
+	return rfc3961.DeriveKey(protocolKey, usage, e)
+}
+
+// DeriveRandom generates data needed for key generation.
+func (e Aes256CtsHmacSha96) DeriveRandom(protocolKey, usage []byte) ([]byte, error) {
+	return rfc3961.DeriveRandom(protocolKey, usage, e)
+}
+
+// VerifyIntegrity checks the integrity of the plaintext message.
+func (e Aes256CtsHmacSha96) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool {
+	return rfc3961.VerifyIntegrity(protocolKey, ct, pt, usage, e)
+}
+
+// GetChecksumHash returns a keyed checksum hash of the bytes provided.
+func (e Aes256CtsHmacSha96) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) {
+	return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e)
+}
+
+// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided.
+func (e Aes256CtsHmacSha96) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool {
+	c, err := e.GetChecksumHash(protocolKey, data, usage)
+	if err != nil {
+		return false
+	}
+	return hmac.Equal(chksum, c)
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes256-cts-hmac-sha384-192.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes256-cts-hmac-sha384-192.go
new file mode 100644
index 0000000..6a54475
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/aes256-cts-hmac-sha384-192.go
@@ -0,0 +1,132 @@
+package crypto
+
+import (
+	"crypto/aes"
+	"crypto/hmac"
+	"crypto/sha512"
+	"hash"
+
+	"github.com/jcmturner/gokrb5/v8/crypto/common"
+	"github.com/jcmturner/gokrb5/v8/crypto/rfc8009"
+	"github.com/jcmturner/gokrb5/v8/iana/chksumtype"
+	"github.com/jcmturner/gokrb5/v8/iana/etypeID"
+)
+
+// RFC https://tools.ietf.org/html/rfc8009
+
+// Aes256CtsHmacSha384192 implements Kerberos encryption type aes256-cts-hmac-sha384-192
+type Aes256CtsHmacSha384192 struct {
+}
+
+// GetETypeID returns the EType ID number.
+func (e Aes256CtsHmacSha384192) GetETypeID() int32 {
+	return etypeID.AES256_CTS_HMAC_SHA384_192
+}
+
+// GetHashID returns the checksum type ID number.
+func (e Aes256CtsHmacSha384192) GetHashID() int32 {
+	return chksumtype.HMAC_SHA384_192_AES256
+}
+
+// GetKeyByteSize returns the number of bytes for key of this etype.
+func (e Aes256CtsHmacSha384192) GetKeyByteSize() int {
+	return 192 / 8
+}
+
+// GetKeySeedBitLength returns the number of bits for the seed for key generation.
+func (e Aes256CtsHmacSha384192) GetKeySeedBitLength() int {
+	return e.GetKeyByteSize() * 8
+}
+
+// GetHashFunc returns the hash function for this etype.
+func (e Aes256CtsHmacSha384192) GetHashFunc() func() hash.Hash {
+	return sha512.New384
+}
+
+// GetMessageBlockByteSize returns the block size for the etype's messages.
+func (e Aes256CtsHmacSha384192) GetMessageBlockByteSize() int {
+	return 1
+}
+
+// GetDefaultStringToKeyParams returns the default key derivation parameters in string form.
+func (e Aes256CtsHmacSha384192) GetDefaultStringToKeyParams() string {
+	return "00008000"
+}
+
+// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations.
+func (e Aes256CtsHmacSha384192) GetConfounderByteSize() int {
+	return aes.BlockSize
+}
+
+// GetHMACBitLength returns the bit count size of the integrity hash.
+func (e Aes256CtsHmacSha384192) GetHMACBitLength() int {
+	return 192
+}
+
+// GetCypherBlockBitLength returns the bit count size of the cypher block.
+func (e Aes256CtsHmacSha384192) GetCypherBlockBitLength() int {
+	return aes.BlockSize * 8
+}
+
+// StringToKey returns a key derived from the string provided.
+func (e Aes256CtsHmacSha384192) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) {
+	saltp := rfc8009.GetSaltP(salt, "aes256-cts-hmac-sha384-192")
+	return rfc8009.StringToKey(secret, saltp, s2kparams, e)
+}
+
+// RandomToKey returns a key from the bytes provided.
+func (e Aes256CtsHmacSha384192) RandomToKey(b []byte) []byte {
+	return rfc8009.RandomToKey(b)
+}
+
+// EncryptData encrypts the data provided.
+func (e Aes256CtsHmacSha384192) EncryptData(key, data []byte) ([]byte, []byte, error) {
+	return rfc8009.EncryptData(key, data, e)
+}
+
+// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message.
+func (e Aes256CtsHmacSha384192) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) {
+	return rfc8009.EncryptMessage(key, message, usage, e)
+}
+
+// DecryptData decrypts the data provided.
+func (e Aes256CtsHmacSha384192) DecryptData(key, data []byte) ([]byte, error) {
+	return rfc8009.DecryptData(key, data, e)
+}
+
+// DecryptMessage decrypts the message provided and verifies the integrity of the message.
+func (e Aes256CtsHmacSha384192) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) {
+	return rfc8009.DecryptMessage(key, ciphertext, usage, e)
+}
+
+// DeriveKey derives a key from the protocol key based on the usage value.
+func (e Aes256CtsHmacSha384192) DeriveKey(protocolKey, usage []byte) ([]byte, error) {
+	return rfc8009.DeriveKey(protocolKey, usage, e), nil
+}
+
+// DeriveRandom generates data needed for key generation.
+func (e Aes256CtsHmacSha384192) DeriveRandom(protocolKey, usage []byte) ([]byte, error) {
+	return rfc8009.DeriveRandom(protocolKey, usage, e)
+}
+
+// VerifyIntegrity checks the integrity of the ciphertext message.
+// As the hash is calculated over the iv concatenated with the AES cipher output not the plaintext the pt value to this
+// interface method is not use. Pass any []byte.
+func (e Aes256CtsHmacSha384192) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool {
+	// We don't need ib just there for the interface
+	return rfc8009.VerifyIntegrity(protocolKey, ct, usage, e)
+}
+
+// GetChecksumHash returns a keyed checksum hash of the bytes provided.
+func (e Aes256CtsHmacSha384192) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) {
+	return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e)
+}
+
+// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided.
+func (e Aes256CtsHmacSha384192) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool {
+	c, err := e.GetChecksumHash(protocolKey, data, usage)
+	if err != nil {
+		return false
+	}
+	return hmac.Equal(chksum, c)
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/common/common.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/common/common.go
new file mode 100644
index 0000000..dab55be
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/common/common.go
@@ -0,0 +1,132 @@
+// Package common provides encryption methods common across encryption types
+package common
+
+import (
+	"bytes"
+	"crypto/hmac"
+	"encoding/binary"
+	"encoding/hex"
+	"errors"
+	"fmt"
+
+	"github.com/jcmturner/gokrb5/v8/crypto/etype"
+)
+
+// ZeroPad pads bytes with zeros to nearest multiple of message size m.
+func ZeroPad(b []byte, m int) ([]byte, error) {
+	if m <= 0 {
+		return nil, errors.New("Invalid message block size when padding")
+	}
+	if b == nil || len(b) == 0 {
+		return nil, errors.New("Data not valid to pad: Zero size")
+	}
+	if l := len(b) % m; l != 0 {
+		n := m - l
+		z := make([]byte, n)
+		b = append(b, z...)
+	}
+	return b, nil
+}
+
+// PKCS7Pad pads bytes according to RFC 2315 to nearest multiple of message size m.
+func PKCS7Pad(b []byte, m int) ([]byte, error) {
+	if m <= 0 {
+		return nil, errors.New("Invalid message block size when padding")
+	}
+	if b == nil || len(b) == 0 {
+		return nil, errors.New("Data not valid to pad: Zero size")
+	}
+	n := m - (len(b) % m)
+	pb := make([]byte, len(b)+n)
+	copy(pb, b)
+	copy(pb[len(b):], bytes.Repeat([]byte{byte(n)}, n))
+	return pb, nil
+}
+
+// PKCS7Unpad removes RFC 2315 padding from byes where message size is m.
+func PKCS7Unpad(b []byte, m int) ([]byte, error) {
+	if m <= 0 {
+		return nil, errors.New("invalid message block size when unpadding")
+	}
+	if b == nil || len(b) == 0 {
+		return nil, errors.New("padded data not valid: Zero size")
+	}
+	if len(b)%m != 0 {
+		return nil, errors.New("padded data not valid: Not multiple of message block size")
+	}
+	c := b[len(b)-1]
+	n := int(c)
+	if n == 0 || n > len(b) {
+		return nil, errors.New("padded data not valid: Data may not have been padded")
+	}
+	for i := 0; i < n; i++ {
+		if b[len(b)-n+i] != c {
+			return nil, errors.New("padded data not valid")
+		}
+	}
+	return b[:len(b)-n], nil
+}
+
+// GetHash generates the keyed hash value according to the etype's hash function.
+func GetHash(pt, key []byte, usage []byte, etype etype.EType) ([]byte, error) {
+	k, err := etype.DeriveKey(key, usage)
+	if err != nil {
+		return nil, fmt.Errorf("unable to derive key for checksum: %v", err)
+	}
+	mac := hmac.New(etype.GetHashFunc(), k)
+	p := make([]byte, len(pt))
+	copy(p, pt)
+	mac.Write(p)
+	return mac.Sum(nil)[:etype.GetHMACBitLength()/8], nil
+}
+
+// GetChecksumHash returns a keyed checksum hash of the bytes provided.
+func GetChecksumHash(b, key []byte, usage uint32, etype etype.EType) ([]byte, error) {
+	return GetHash(b, key, GetUsageKc(usage), etype)
+}
+
+// GetIntegrityHash returns a keyed integrity hash of the bytes provided.
+func GetIntegrityHash(b, key []byte, usage uint32, etype etype.EType) ([]byte, error) {
+	return GetHash(b, key, GetUsageKi(usage), etype)
+}
+
+// VerifyChecksum compares the checksum of the msg bytes is the same as the checksum provided.
+func VerifyChecksum(key, chksum, msg []byte, usage uint32, etype etype.EType) bool {
+	//The encrypted message is a concatenation of the encrypted output and the hash HMAC.
+	expectedMAC, _ := GetChecksumHash(msg, key, usage, etype)
+	return hmac.Equal(chksum, expectedMAC)
+}
+
+// GetUsageKc returns the checksum key usage value for the usage number un.
+//
+// See RFC 3961 5.3 key-derivation function definition.
+func GetUsageKc(un uint32) []byte {
+	return getUsage(un, 0x99)
+}
+
+// GetUsageKe returns the encryption key usage value for the usage number un
+//
+// See RFC 3961 5.3 key-derivation function definition.
+func GetUsageKe(un uint32) []byte {
+	return getUsage(un, 0xAA)
+}
+
+// GetUsageKi returns the integrity key usage value for the usage number un
+//
+// See RFC 3961 5.3 key-derivation function definition.
+func GetUsageKi(un uint32) []byte {
+	return getUsage(un, 0x55)
+}
+
+func getUsage(un uint32, o byte) []byte {
+	var buf bytes.Buffer
+	binary.Write(&buf, binary.BigEndian, un)
+	return append(buf.Bytes(), o)
+}
+
+// IterationsToS2Kparams converts the number of iterations as an integer to a string representation.
+func IterationsToS2Kparams(i uint32) string {
+	b := make([]byte, 4, 4)
+	binary.BigEndian.PutUint32(b, i)
+	return hex.EncodeToString(b)
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/crypto.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/crypto.go
new file mode 100644
index 0000000..5c96ddf
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/crypto.go
@@ -0,0 +1,175 @@
+// Package crypto implements cryptographic functions for Kerberos 5 implementation.
+package crypto
+
+import (
+	"encoding/hex"
+	"fmt"
+
+	"github.com/jcmturner/gokrb5/v8/crypto/etype"
+	"github.com/jcmturner/gokrb5/v8/iana/chksumtype"
+	"github.com/jcmturner/gokrb5/v8/iana/etypeID"
+	"github.com/jcmturner/gokrb5/v8/iana/patype"
+	"github.com/jcmturner/gokrb5/v8/types"
+)
+
+// GetEtype returns an instances of the required etype struct for the etype ID.
+func GetEtype(id int32) (etype.EType, error) {
+	switch id {
+	case etypeID.AES128_CTS_HMAC_SHA1_96:
+		var et Aes128CtsHmacSha96
+		return et, nil
+	case etypeID.AES256_CTS_HMAC_SHA1_96:
+		var et Aes256CtsHmacSha96
+		return et, nil
+	case etypeID.AES128_CTS_HMAC_SHA256_128:
+		var et Aes128CtsHmacSha256128
+		return et, nil
+	case etypeID.AES256_CTS_HMAC_SHA384_192:
+		var et Aes256CtsHmacSha384192
+		return et, nil
+	case etypeID.DES3_CBC_SHA1_KD:
+		var et Des3CbcSha1Kd
+		return et, nil
+	case etypeID.RC4_HMAC:
+		var et RC4HMAC
+		return et, nil
+	default:
+		return nil, fmt.Errorf("unknown or unsupported EType: %d", id)
+	}
+}
+
+// GetChksumEtype returns an instances of the required etype struct for the checksum ID.
+func GetChksumEtype(id int32) (etype.EType, error) {
+	switch id {
+	case chksumtype.HMAC_SHA1_96_AES128:
+		var et Aes128CtsHmacSha96
+		return et, nil
+	case chksumtype.HMAC_SHA1_96_AES256:
+		var et Aes256CtsHmacSha96
+		return et, nil
+	case chksumtype.HMAC_SHA256_128_AES128:
+		var et Aes128CtsHmacSha256128
+		return et, nil
+	case chksumtype.HMAC_SHA384_192_AES256:
+		var et Aes256CtsHmacSha384192
+		return et, nil
+	case chksumtype.HMAC_SHA1_DES3_KD:
+		var et Des3CbcSha1Kd
+		return et, nil
+	case chksumtype.KERB_CHECKSUM_HMAC_MD5:
+		var et RC4HMAC
+		return et, nil
+	//case chksumtype.KERB_CHECKSUM_HMAC_MD5_UNSIGNED:
+	//	var et RC4HMAC
+	//	return et, nil
+	default:
+		return nil, fmt.Errorf("unknown or unsupported checksum type: %d", id)
+	}
+}
+
+// GetKeyFromPassword generates an encryption key from the principal's password.
+func GetKeyFromPassword(passwd string, cname types.PrincipalName, realm string, etypeID int32, pas types.PADataSequence) (types.EncryptionKey, etype.EType, error) {
+	var key types.EncryptionKey
+	et, err := GetEtype(etypeID)
+	if err != nil {
+		return key, et, fmt.Errorf("error getting encryption type: %v", err)
+	}
+	sk2p := et.GetDefaultStringToKeyParams()
+	var salt string
+	var paID int32
+	for _, pa := range pas {
+		switch pa.PADataType {
+		case patype.PA_PW_SALT:
+			if paID > pa.PADataType {
+				continue
+			}
+			salt = string(pa.PADataValue)
+		case patype.PA_ETYPE_INFO:
+			if paID > pa.PADataType {
+				continue
+			}
+			var eti types.ETypeInfo
+			err := eti.Unmarshal(pa.PADataValue)
+			if err != nil {
+				return key, et, fmt.Errorf("error unmashaling PA Data to PA-ETYPE-INFO2: %v", err)
+			}
+			if etypeID != eti[0].EType {
+				et, err = GetEtype(eti[0].EType)
+				if err != nil {
+					return key, et, fmt.Errorf("error getting encryption type: %v", err)
+				}
+			}
+			salt = string(eti[0].Salt)
+		case patype.PA_ETYPE_INFO2:
+			if paID > pa.PADataType {
+				continue
+			}
+			var et2 types.ETypeInfo2
+			err := et2.Unmarshal(pa.PADataValue)
+			if err != nil {
+				return key, et, fmt.Errorf("error unmashalling PA Data to PA-ETYPE-INFO2: %v", err)
+			}
+			if etypeID != et2[0].EType {
+				et, err = GetEtype(et2[0].EType)
+				if err != nil {
+					return key, et, fmt.Errorf("error getting encryption type: %v", err)
+				}
+			}
+			if len(et2[0].S2KParams) == 4 {
+				sk2p = hex.EncodeToString(et2[0].S2KParams)
+			}
+			salt = et2[0].Salt
+		}
+	}
+	if salt == "" {
+		salt = cname.GetSalt(realm)
+	}
+	k, err := et.StringToKey(passwd, salt, sk2p)
+	if err != nil {
+		return key, et, fmt.Errorf("error deriving key from string: %+v", err)
+	}
+	key = types.EncryptionKey{
+		KeyType:  etypeID,
+		KeyValue: k,
+	}
+	return key, et, nil
+}
+
+// GetEncryptedData encrypts the data provided and returns and EncryptedData type.
+// Pass a usage value of zero to use the key provided directly rather than deriving one.
+func GetEncryptedData(plainBytes []byte, key types.EncryptionKey, usage uint32, kvno int) (types.EncryptedData, error) {
+	var ed types.EncryptedData
+	et, err := GetEtype(key.KeyType)
+	if err != nil {
+		return ed, fmt.Errorf("error getting etype: %v", err)
+	}
+	_, b, err := et.EncryptMessage(key.KeyValue, plainBytes, usage)
+	if err != nil {
+		return ed, err
+	}
+
+	ed = types.EncryptedData{
+		EType:  key.KeyType,
+		Cipher: b,
+		KVNO:   kvno,
+	}
+	return ed, nil
+}
+
+// DecryptEncPart decrypts the EncryptedData.
+func DecryptEncPart(ed types.EncryptedData, key types.EncryptionKey, usage uint32) ([]byte, error) {
+	return DecryptMessage(ed.Cipher, key, usage)
+}
+
+// DecryptMessage decrypts the ciphertext and verifies the integrity.
+func DecryptMessage(ciphertext []byte, key types.EncryptionKey, usage uint32) ([]byte, error) {
+	et, err := GetEtype(key.KeyType)
+	if err != nil {
+		return []byte{}, fmt.Errorf("error decrypting: %v", err)
+	}
+	b, err := et.DecryptMessage(key.KeyValue, ciphertext, usage)
+	if err != nil {
+		return nil, fmt.Errorf("error decrypting: %v", err)
+	}
+	return b, nil
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/des3-cbc-sha1-kd.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/des3-cbc-sha1-kd.go
new file mode 100644
index 0000000..6e650eb
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/des3-cbc-sha1-kd.go
@@ -0,0 +1,139 @@
+package crypto
+
+import (
+	"crypto/des"
+	"crypto/hmac"
+	"crypto/sha1"
+	"errors"
+	"hash"
+
+	"github.com/jcmturner/gokrb5/v8/crypto/common"
+	"github.com/jcmturner/gokrb5/v8/crypto/rfc3961"
+	"github.com/jcmturner/gokrb5/v8/iana/chksumtype"
+	"github.com/jcmturner/gokrb5/v8/iana/etypeID"
+)
+
+//RFC: 3961 Section 6.3
+
+// Des3CbcSha1Kd implements Kerberos encryption type des3-cbc-hmac-sha1-kd
+type Des3CbcSha1Kd struct {
+}
+
+// GetETypeID returns the EType ID number.
+func (e Des3CbcSha1Kd) GetETypeID() int32 {
+	return etypeID.DES3_CBC_SHA1_KD
+}
+
+// GetHashID returns the checksum type ID number.
+func (e Des3CbcSha1Kd) GetHashID() int32 {
+	return chksumtype.HMAC_SHA1_DES3_KD
+}
+
+// GetKeyByteSize returns the number of bytes for key of this etype.
+func (e Des3CbcSha1Kd) GetKeyByteSize() int {
+	return 24
+}
+
+// GetKeySeedBitLength returns the number of bits for the seed for key generation.
+func (e Des3CbcSha1Kd) GetKeySeedBitLength() int {
+	return 21 * 8
+}
+
+// GetHashFunc returns the hash function for this etype.
+func (e Des3CbcSha1Kd) GetHashFunc() func() hash.Hash {
+	return sha1.New
+}
+
+// GetMessageBlockByteSize returns the block size for the etype's messages.
+func (e Des3CbcSha1Kd) GetMessageBlockByteSize() int {
+	//For traditional CBC mode with padding, it would be the underlying cipher's block size
+	return des.BlockSize
+}
+
+// GetDefaultStringToKeyParams returns the default key derivation parameters in string form.
+func (e Des3CbcSha1Kd) GetDefaultStringToKeyParams() string {
+	var s string
+	return s
+}
+
+// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations.
+func (e Des3CbcSha1Kd) GetConfounderByteSize() int {
+	return des.BlockSize
+}
+
+// GetHMACBitLength returns the bit count size of the integrity hash.
+func (e Des3CbcSha1Kd) GetHMACBitLength() int {
+	return e.GetHashFunc()().Size() * 8
+}
+
+// GetCypherBlockBitLength returns the bit count size of the cypher block.
+func (e Des3CbcSha1Kd) GetCypherBlockBitLength() int {
+	return des.BlockSize * 8
+}
+
+// StringToKey returns a key derived from the string provided.
+func (e Des3CbcSha1Kd) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) {
+	if s2kparams != "" {
+		return []byte{}, errors.New("s2kparams must be an empty string")
+	}
+	return rfc3961.DES3StringToKey(secret, salt, e)
+}
+
+// RandomToKey returns a key from the bytes provided.
+func (e Des3CbcSha1Kd) RandomToKey(b []byte) []byte {
+	return rfc3961.DES3RandomToKey(b)
+}
+
+// DeriveRandom generates data needed for key generation.
+func (e Des3CbcSha1Kd) DeriveRandom(protocolKey, usage []byte) ([]byte, error) {
+	r, err := rfc3961.DeriveRandom(protocolKey, usage, e)
+	return r, err
+}
+
+// DeriveKey derives a key from the protocol key based on the usage value.
+func (e Des3CbcSha1Kd) DeriveKey(protocolKey, usage []byte) ([]byte, error) {
+	r, err := e.DeriveRandom(protocolKey, usage)
+	if err != nil {
+		return nil, err
+	}
+	return e.RandomToKey(r), nil
+}
+
+// EncryptData encrypts the data provided.
+func (e Des3CbcSha1Kd) EncryptData(key, data []byte) ([]byte, []byte, error) {
+	return rfc3961.DES3EncryptData(key, data, e)
+}
+
+// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message.
+func (e Des3CbcSha1Kd) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) {
+	return rfc3961.DES3EncryptMessage(key, message, usage, e)
+}
+
+// DecryptData decrypts the data provided.
+func (e Des3CbcSha1Kd) DecryptData(key, data []byte) ([]byte, error) {
+	return rfc3961.DES3DecryptData(key, data, e)
+}
+
+// DecryptMessage decrypts the message provided and verifies the integrity of the message.
+func (e Des3CbcSha1Kd) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) {
+	return rfc3961.DES3DecryptMessage(key, ciphertext, usage, e)
+}
+
+// VerifyIntegrity checks the integrity of the plaintext message.
+func (e Des3CbcSha1Kd) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool {
+	return rfc3961.VerifyIntegrity(protocolKey, ct, pt, usage, e)
+}
+
+// GetChecksumHash returns a keyed checksum hash of the bytes provided.
+func (e Des3CbcSha1Kd) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) {
+	return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e)
+}
+
+// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided.
+func (e Des3CbcSha1Kd) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool {
+	c, err := e.GetChecksumHash(protocolKey, data, usage)
+	if err != nil {
+		return false
+	}
+	return hmac.Equal(chksum, c)
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/etype/etype.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/etype/etype.go
new file mode 100644
index 0000000..ab1496d
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/etype/etype.go
@@ -0,0 +1,29 @@
+// Package etype provides the Kerberos Encryption Type interface
+package etype
+
+import "hash"
+
+// EType is the interface defining the Encryption Type.
+type EType interface {
+	GetETypeID() int32
+	GetHashID() int32
+	GetKeyByteSize() int
+	GetKeySeedBitLength() int
+	GetDefaultStringToKeyParams() string
+	StringToKey(string, salt, s2kparams string) ([]byte, error)
+	RandomToKey(b []byte) []byte
+	GetHMACBitLength() int
+	GetMessageBlockByteSize() int
+	EncryptData(key, data []byte) ([]byte, []byte, error)
+	EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error)
+	DecryptData(key, data []byte) ([]byte, error)
+	DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error)
+	GetCypherBlockBitLength() int
+	GetConfounderByteSize() int
+	DeriveKey(protocolKey, usage []byte) ([]byte, error)
+	DeriveRandom(protocolKey, usage []byte) ([]byte, error)
+	VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool
+	GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error)
+	VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool
+	GetHashFunc() func() hash.Hash
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rc4-hmac.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rc4-hmac.go
new file mode 100644
index 0000000..42f84b8
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rc4-hmac.go
@@ -0,0 +1,133 @@
+package crypto
+
+import (
+	"bytes"
+	"crypto/hmac"
+	"crypto/md5"
+	"hash"
+	"io"
+
+	"github.com/jcmturner/gokrb5/v8/crypto/rfc3961"
+	"github.com/jcmturner/gokrb5/v8/crypto/rfc4757"
+	"github.com/jcmturner/gokrb5/v8/iana/chksumtype"
+	"github.com/jcmturner/gokrb5/v8/iana/etypeID"
+	"golang.org/x/crypto/md4"
+)
+
+// RC4HMAC implements Kerberos encryption type rc4-hmac
+type RC4HMAC struct {
+}
+
+// GetETypeID returns the EType ID number.
+func (e RC4HMAC) GetETypeID() int32 {
+	return etypeID.RC4_HMAC
+}
+
+// GetHashID returns the checksum type ID number.
+func (e RC4HMAC) GetHashID() int32 {
+	return chksumtype.KERB_CHECKSUM_HMAC_MD5
+}
+
+// GetKeyByteSize returns the number of bytes for key of this etype.
+func (e RC4HMAC) GetKeyByteSize() int {
+	return 16
+}
+
+// GetKeySeedBitLength returns the number of bits for the seed for key generation.
+func (e RC4HMAC) GetKeySeedBitLength() int {
+	return e.GetKeyByteSize() * 8
+}
+
+// GetHashFunc returns the hash function for this etype.
+func (e RC4HMAC) GetHashFunc() func() hash.Hash {
+	return md5.New
+}
+
+// GetMessageBlockByteSize returns the block size for the etype's messages.
+func (e RC4HMAC) GetMessageBlockByteSize() int {
+	return 1
+}
+
+// GetDefaultStringToKeyParams returns the default key derivation parameters in string form.
+func (e RC4HMAC) GetDefaultStringToKeyParams() string {
+	return ""
+}
+
+// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations.
+func (e RC4HMAC) GetConfounderByteSize() int {
+	return 8
+}
+
+// GetHMACBitLength returns the bit count size of the integrity hash.
+func (e RC4HMAC) GetHMACBitLength() int {
+	return md5.Size * 8
+}
+
+// GetCypherBlockBitLength returns the bit count size of the cypher block.
+func (e RC4HMAC) GetCypherBlockBitLength() int {
+	return 8 // doesn't really apply
+}
+
+// StringToKey returns a key derived from the string provided.
+func (e RC4HMAC) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) {
+	return rfc4757.StringToKey(secret)
+}
+
+// RandomToKey returns a key from the bytes provided.
+func (e RC4HMAC) RandomToKey(b []byte) []byte {
+	r := bytes.NewReader(b)
+	h := md4.New()
+	io.Copy(h, r)
+	return h.Sum(nil)
+}
+
+// EncryptData encrypts the data provided.
+func (e RC4HMAC) EncryptData(key, data []byte) ([]byte, []byte, error) {
+	b, err := rfc4757.EncryptData(key, data, e)
+	return []byte{}, b, err
+}
+
+// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message.
+func (e RC4HMAC) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) {
+	b, err := rfc4757.EncryptMessage(key, message, usage, false, e)
+	return []byte{}, b, err
+}
+
+// DecryptData decrypts the data provided.
+func (e RC4HMAC) DecryptData(key, data []byte) ([]byte, error) {
+	return rfc4757.DecryptData(key, data, e)
+}
+
+// DecryptMessage decrypts the message provided and verifies the integrity of the message.
+func (e RC4HMAC) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) {
+	return rfc4757.DecryptMessage(key, ciphertext, usage, false, e)
+}
+
+// DeriveKey derives a key from the protocol key based on the usage value.
+func (e RC4HMAC) DeriveKey(protocolKey, usage []byte) ([]byte, error) {
+	return rfc4757.HMAC(protocolKey, usage), nil
+}
+
+// DeriveRandom generates data needed for key generation.
+func (e RC4HMAC) DeriveRandom(protocolKey, usage []byte) ([]byte, error) {
+	return rfc3961.DeriveRandom(protocolKey, usage, e)
+}
+
+// VerifyIntegrity checks the integrity of the plaintext message.
+func (e RC4HMAC) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool {
+	return rfc4757.VerifyIntegrity(protocolKey, pt, ct, e)
+}
+
+// GetChecksumHash returns a keyed checksum hash of the bytes provided.
+func (e RC4HMAC) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) {
+	return rfc4757.Checksum(protocolKey, usage, data)
+}
+
+// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided.
+func (e RC4HMAC) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool {
+	checksum, err := rfc4757.Checksum(protocolKey, usage, data)
+	if err != nil {
+		return false
+	}
+	return hmac.Equal(checksum, chksum)
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/encryption.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/encryption.go
new file mode 100644
index 0000000..1383258
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/encryption.go
@@ -0,0 +1,119 @@
+// Package rfc3961 provides encryption and checksum methods as specified in RFC 3961
+package rfc3961
+
+import (
+	"crypto/cipher"
+	"crypto/des"
+	"crypto/hmac"
+	"crypto/rand"
+	"errors"
+	"fmt"
+
+	"github.com/jcmturner/gokrb5/v8/crypto/common"
+	"github.com/jcmturner/gokrb5/v8/crypto/etype"
+)
+
+// DES3EncryptData encrypts the data provided using DES3 and methods specific to the etype provided.
+func DES3EncryptData(key, data []byte, e etype.EType) ([]byte, []byte, error) {
+	if len(key) != e.GetKeyByteSize() {
+		return nil, nil, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key))
+	}
+	data, _ = common.ZeroPad(data, e.GetMessageBlockByteSize())
+
+	block, err := des.NewTripleDESCipher(key)
+	if err != nil {
+		return nil, nil, fmt.Errorf("error creating cipher: %v", err)
+	}
+
+	//RFC 3961: initial cipher state      All bits zero
+	ivz := make([]byte, des.BlockSize)
+
+	ct := make([]byte, len(data))
+	mode := cipher.NewCBCEncrypter(block, ivz)
+	mode.CryptBlocks(ct, data)
+	return ct[len(ct)-e.GetMessageBlockByteSize():], ct, nil
+}
+
+// DES3EncryptMessage encrypts the message provided using DES3 and methods specific to the etype provided.
+// The encrypted data is concatenated with its integrity hash to create an encrypted message.
+func DES3EncryptMessage(key, message []byte, usage uint32, e etype.EType) ([]byte, []byte, error) {
+	//confounder
+	c := make([]byte, e.GetConfounderByteSize())
+	_, err := rand.Read(c)
+	if err != nil {
+		return []byte{}, []byte{}, fmt.Errorf("could not generate random confounder: %v", err)
+	}
+	plainBytes := append(c, message...)
+	plainBytes, _ = common.ZeroPad(plainBytes, e.GetMessageBlockByteSize())
+
+	// Derive key for encryption from usage
+	var k []byte
+	if usage != 0 {
+		k, err = e.DeriveKey(key, common.GetUsageKe(usage))
+		if err != nil {
+			return []byte{}, []byte{}, fmt.Errorf("error deriving key for encryption: %v", err)
+		}
+	}
+
+	iv, b, err := e.EncryptData(k, plainBytes)
+	if err != nil {
+		return iv, b, fmt.Errorf("error encrypting data: %v", err)
+	}
+
+	// Generate and append integrity hash
+	ih, err := common.GetIntegrityHash(plainBytes, key, usage, e)
+	if err != nil {
+		return iv, b, fmt.Errorf("error encrypting data: %v", err)
+	}
+	b = append(b, ih...)
+	return iv, b, nil
+}
+
+// DES3DecryptData decrypts the data provided using DES3 and methods specific to the etype provided.
+func DES3DecryptData(key, data []byte, e etype.EType) ([]byte, error) {
+	if len(key) != e.GetKeyByteSize() {
+		return []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key))
+	}
+
+	if len(data) < des.BlockSize || len(data)%des.BlockSize != 0 {
+		return []byte{}, errors.New("ciphertext is not a multiple of the block size")
+	}
+	block, err := des.NewTripleDESCipher(key)
+	if err != nil {
+		return []byte{}, fmt.Errorf("error creating cipher: %v", err)
+	}
+	pt := make([]byte, len(data))
+	ivz := make([]byte, des.BlockSize)
+	mode := cipher.NewCBCDecrypter(block, ivz)
+	mode.CryptBlocks(pt, data)
+	return pt, nil
+}
+
+// DES3DecryptMessage decrypts the message provided using DES3 and methods specific to the etype provided.
+// The integrity of the message is also verified.
+func DES3DecryptMessage(key, ciphertext []byte, usage uint32, e etype.EType) ([]byte, error) {
+	//Derive the key
+	k, err := e.DeriveKey(key, common.GetUsageKe(usage))
+	if err != nil {
+		return nil, fmt.Errorf("error deriving key: %v", err)
+	}
+	// Strip off the checksum from the end
+	b, err := e.DecryptData(k, ciphertext[:len(ciphertext)-e.GetHMACBitLength()/8])
+	if err != nil {
+		return nil, fmt.Errorf("error decrypting: %v", err)
+	}
+	//Verify checksum
+	if !e.VerifyIntegrity(key, ciphertext, b, usage) {
+		return nil, errors.New("error decrypting: integrity verification failed")
+	}
+	//Remove the confounder bytes
+	return b[e.GetConfounderByteSize():], nil
+}
+
+// VerifyIntegrity verifies the integrity of cipertext bytes ct.
+func VerifyIntegrity(key, ct, pt []byte, usage uint32, etype etype.EType) bool {
+	h := make([]byte, etype.GetHMACBitLength()/8)
+	copy(h, ct[len(ct)-etype.GetHMACBitLength()/8:])
+	expectedMAC, _ := common.GetIntegrityHash(pt, key, usage, etype)
+	return hmac.Equal(h, expectedMAC)
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/keyDerivation.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/keyDerivation.go
new file mode 100644
index 0000000..ed9b169
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/keyDerivation.go
@@ -0,0 +1,169 @@
+package rfc3961
+
+import (
+	"bytes"
+
+	"github.com/jcmturner/gokrb5/v8/crypto/etype"
+)
+
+const (
+	prfconstant = "prf"
+)
+
+// DeriveRandom implements the RFC 3961 defined function: DR(Key, Constant) = k-truncate(E(Key, Constant, initial-cipher-state)).
+//
+// key: base key or protocol key. Likely to be a key from a keytab file.
+//
+// usage: a constant.
+//
+// n: block size in bits (not bytes) - note if you use something like aes.BlockSize this is in bytes.
+//
+// k: key length / key seed length in bits. Eg. for AES256 this value is 256.
+//
+// e: the encryption etype function to use.
+func DeriveRandom(key, usage []byte, e etype.EType) ([]byte, error) {
+	n := e.GetCypherBlockBitLength()
+	k := e.GetKeySeedBitLength()
+	//Ensure the usage constant is at least the size of the cypher block size. Pass it through the nfold algorithm that will "stretch" it if needs be.
+	nFoldUsage := Nfold(usage, n)
+	//k-truncate implemented by creating a byte array the size of k (k is in bits hence /8)
+	out := make([]byte, k/8)
+	// Keep feeding the output back into the encryption function until it is no longer short than k.
+	_, K, err := e.EncryptData(key, nFoldUsage)
+	if err != nil {
+		return out, err
+	}
+	for i := copy(out, K); i < len(out); {
+		_, K, _ = e.EncryptData(key, K)
+		i = i + copy(out[i:], K)
+	}
+	return out, nil
+}
+
+// DeriveKey derives a key from the protocol key based on the usage and the etype's specific methods.
+func DeriveKey(protocolKey, usage []byte, e etype.EType) ([]byte, error) {
+	r, err := e.DeriveRandom(protocolKey, usage)
+	if err != nil {
+		return nil, err
+	}
+	return e.RandomToKey(r), nil
+}
+
+// RandomToKey returns a key from the bytes provided according to the definition in RFC 3961.
+func RandomToKey(b []byte) []byte {
+	return b
+}
+
+// DES3RandomToKey returns a key from the bytes provided according to the definition in RFC 3961 for DES3 etypes.
+func DES3RandomToKey(b []byte) []byte {
+	r := fixWeakKey(stretch56Bits(b[:7]))
+	r2 := fixWeakKey(stretch56Bits(b[7:14]))
+	r = append(r, r2...)
+	r3 := fixWeakKey(stretch56Bits(b[14:21]))
+	r = append(r, r3...)
+	return r
+}
+
+// DES3StringToKey returns a key derived from the string provided according to the definition in RFC 3961 for DES3 etypes.
+func DES3StringToKey(secret, salt string, e etype.EType) ([]byte, error) {
+	s := secret + salt
+	tkey := e.RandomToKey(Nfold([]byte(s), e.GetKeySeedBitLength()))
+	return e.DeriveKey(tkey, []byte("kerberos"))
+}
+
+// PseudoRandom function as defined in RFC 3961
+func PseudoRandom(key, b []byte, e etype.EType) ([]byte, error) {
+	h := e.GetHashFunc()()
+	h.Write(b)
+	tmp := h.Sum(nil)[:e.GetMessageBlockByteSize()]
+	k, err := e.DeriveKey(key, []byte(prfconstant))
+	if err != nil {
+		return []byte{}, err
+	}
+	_, prf, err := e.EncryptData(k, tmp)
+	if err != nil {
+		return []byte{}, err
+	}
+	return prf, nil
+}
+
+func stretch56Bits(b []byte) []byte {
+	d := make([]byte, len(b), len(b))
+	copy(d, b)
+	var lb byte
+	for i, v := range d {
+		bv, nb := calcEvenParity(v)
+		d[i] = nb
+		if bv != 0 {
+			lb = lb | (1 << uint(i+1))
+		} else {
+			lb = lb &^ (1 << uint(i+1))
+		}
+	}
+	_, lb = calcEvenParity(lb)
+	d = append(d, lb)
+	return d
+}
+
+func calcEvenParity(b byte) (uint8, uint8) {
+	lowestbit := b & 0x01
+	// c counter of 1s in the first 7 bits of the byte
+	var c int
+	// Iterate over the highest 7 bits (hence p starts at 1 not zero) and count the 1s.
+	for p := 1; p < 8; p++ {
+		v := b & (1 << uint(p))
+		if v != 0 {
+			c++
+		}
+	}
+	if c%2 == 0 {
+		//Even number of 1s so set parity to 1
+		b = b | 1
+	} else {
+		//Odd number of 1s so set parity to 0
+		b = b &^ 1
+	}
+	return lowestbit, b
+}
+
+func fixWeakKey(b []byte) []byte {
+	if weak(b) {
+		b[7] ^= 0xF0
+	}
+	return b
+}
+
+func weak(b []byte) bool {
+	// weak keys from https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-67r1.pdf
+	weakKeys := [4][]byte{
+		{0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01},
+		{0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE},
+		{0xE0, 0xE0, 0xE0, 0xE0, 0xF1, 0xF1, 0xF1, 0xF1},
+		{0x1F, 0x1F, 0x1F, 0x1F, 0x0E, 0x0E, 0x0E, 0x0E},
+	}
+	semiWeakKeys := [12][]byte{
+		{0x01, 0x1F, 0x01, 0x1F, 0x01, 0x0E, 0x01, 0x0E},
+		{0x1F, 0x01, 0x1F, 0x01, 0x0E, 0x01, 0x0E, 0x01},
+		{0x01, 0xE0, 0x01, 0xE0, 0x01, 0xF1, 0x01, 0xF1},
+		{0xE0, 0x01, 0xE0, 0x01, 0xF1, 0x01, 0xF1, 0x01},
+		{0x01, 0xFE, 0x01, 0xFE, 0x01, 0xFE, 0x01, 0xFE},
+		{0xFE, 0x01, 0xFE, 0x01, 0xFE, 0x01, 0xFE, 0x01},
+		{0x1F, 0xE0, 0x1F, 0xE0, 0x0E, 0xF1, 0x0E, 0xF1},
+		{0xE0, 0x1F, 0xE0, 0x1F, 0xF1, 0x0E, 0xF1, 0x0E},
+		{0x1F, 0xFE, 0x1F, 0xFE, 0x0E, 0xFE, 0x0E, 0xFE},
+		{0xFE, 0x1F, 0xFE, 0x1F, 0xFE, 0x0E, 0xFE, 0x0E},
+		{0xE0, 0xFE, 0xE0, 0xFE, 0xF1, 0xFE, 0xF1, 0xFE},
+		{0xFE, 0xE0, 0xFE, 0xE0, 0xFE, 0xF1, 0xFE, 0xF1},
+	}
+	for _, k := range weakKeys {
+		if bytes.Equal(b, k) {
+			return true
+		}
+	}
+	for _, k := range semiWeakKeys {
+		if bytes.Equal(b, k) {
+			return true
+		}
+	}
+	return false
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/nfold.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/nfold.go
new file mode 100644
index 0000000..9536b1e
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3961/nfold.go
@@ -0,0 +1,107 @@
+package rfc3961
+
+// Implementation of the n-fold algorithm as defined in RFC 3961.
+
+/* Credits
+This golang implementation of nfold used the following project for help with implementation detail.
+Although their source is in java it was helpful as a reference implementation of the RFC.
+You can find the source code of their open source project along with license information below.
+We acknowledge and are grateful to these developers for their contributions to open source
+
+Project: Apache Directory (http://http://directory.apache.org/)
+https://svn.apache.org/repos/asf/directory/apacheds/tags/1.5.1/kerberos-shared/src/main/java/org/apache/directory/server/kerberos/shared/crypto/encryption/NFold.java
+License: http://www.apache.org/licenses/LICENSE-2.0
+*/
+
+// Nfold expands the key to ensure it is not smaller than one cipher block.
+// Defined in RFC 3961.
+//
+// m input bytes that will be "stretched" to the least common multiple of n bits and the bit length of m.
+func Nfold(m []byte, n int) []byte {
+	k := len(m) * 8
+
+	//Get the lowest common multiple of the two bit sizes
+	lcm := lcm(n, k)
+	relicate := lcm / k
+	var sumBytes []byte
+
+	for i := 0; i < relicate; i++ {
+		rotation := 13 * i
+		sumBytes = append(sumBytes, rotateRight(m, rotation)...)
+	}
+
+	nfold := make([]byte, n/8)
+	sum := make([]byte, n/8)
+	for i := 0; i < lcm/n; i++ {
+		for j := 0; j < n/8; j++ {
+			sum[j] = sumBytes[j+(i*len(sum))]
+		}
+		nfold = onesComplementAddition(nfold, sum)
+	}
+	return nfold
+}
+
+func onesComplementAddition(n1, n2 []byte) []byte {
+	numBits := len(n1) * 8
+	out := make([]byte, numBits/8)
+	carry := 0
+	for i := numBits - 1; i > -1; i-- {
+		n1b := getBit(&n1, i)
+		n2b := getBit(&n2, i)
+		s := n1b + n2b + carry
+
+		if s == 0 || s == 1 {
+			setBit(&out, i, s)
+			carry = 0
+		} else if s == 2 {
+			carry = 1
+		} else if s == 3 {
+			setBit(&out, i, 1)
+			carry = 1
+		}
+	}
+	if carry == 1 {
+		carryArray := make([]byte, len(n1))
+		carryArray[len(carryArray)-1] = 1
+		out = onesComplementAddition(out, carryArray)
+	}
+	return out
+}
+
+func rotateRight(b []byte, step int) []byte {
+	out := make([]byte, len(b))
+	bitLen := len(b) * 8
+	for i := 0; i < bitLen; i++ {
+		v := getBit(&b, i)
+		setBit(&out, (i+step)%bitLen, v)
+	}
+	return out
+}
+
+func lcm(x, y int) int {
+	return (x * y) / gcd(x, y)
+}
+
+func gcd(x, y int) int {
+	for y != 0 {
+		x, y = y, x%y
+	}
+	return x
+}
+
+func getBit(b *[]byte, p int) int {
+	pByte := p / 8
+	pBit := uint(p % 8)
+	vByte := (*b)[pByte]
+	vInt := int(vByte >> (8 - (pBit + 1)) & 0x0001)
+	return vInt
+}
+
+func setBit(b *[]byte, p, v int) {
+	pByte := p / 8
+	pBit := uint(p % 8)
+	oldByte := (*b)[pByte]
+	var newByte byte
+	newByte = byte(v<<(8-(pBit+1))) | oldByte
+	(*b)[pByte] = newByte
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3962/encryption.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3962/encryption.go
new file mode 100644
index 0000000..5ff89e8
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3962/encryption.go
@@ -0,0 +1,89 @@
+// Package rfc3962 provides encryption and checksum methods as specified in RFC 3962
+package rfc3962
+
+import (
+	"crypto/rand"
+	"errors"
+	"fmt"
+
+	"github.com/jcmturner/aescts/v2"
+	"github.com/jcmturner/gokrb5/v8/crypto/common"
+	"github.com/jcmturner/gokrb5/v8/crypto/etype"
+)
+
+// EncryptData encrypts the data provided using methods specific to the etype provided as defined in RFC 3962.
+func EncryptData(key, data []byte, e etype.EType) ([]byte, []byte, error) {
+	if len(key) != e.GetKeyByteSize() {
+		return []byte{}, []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key))
+	}
+	ivz := make([]byte, e.GetCypherBlockBitLength()/8)
+	return aescts.Encrypt(key, ivz, data)
+}
+
+// EncryptMessage encrypts the message provided using the methods specific to the etype provided as defined in RFC 3962.
+// The encrypted data is concatenated with its integrity hash to create an encrypted message.
+func EncryptMessage(key, message []byte, usage uint32, e etype.EType) ([]byte, []byte, error) {
+	if len(key) != e.GetKeyByteSize() {
+		return []byte{}, []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key))
+	}
+	//confounder
+	c := make([]byte, e.GetConfounderByteSize())
+	_, err := rand.Read(c)
+	if err != nil {
+		return []byte{}, []byte{}, fmt.Errorf("could not generate random confounder: %v", err)
+	}
+	plainBytes := append(c, message...)
+
+	// Derive key for encryption from usage
+	var k []byte
+	if usage != 0 {
+		k, err = e.DeriveKey(key, common.GetUsageKe(usage))
+		if err != nil {
+			return []byte{}, []byte{}, fmt.Errorf("error deriving key for encryption: %v", err)
+		}
+	}
+
+	// Encrypt the data
+	iv, b, err := e.EncryptData(k, plainBytes)
+	if err != nil {
+		return iv, b, fmt.Errorf("error encrypting data: %v", err)
+	}
+
+	// Generate and append integrity hash
+	ih, err := common.GetIntegrityHash(plainBytes, key, usage, e)
+	if err != nil {
+		return iv, b, fmt.Errorf("error encrypting data: %v", err)
+	}
+	b = append(b, ih...)
+	return iv, b, nil
+}
+
+// DecryptData decrypts the data provided using the methods specific to the etype provided as defined in RFC 3962.
+func DecryptData(key, data []byte, e etype.EType) ([]byte, error) {
+	if len(key) != e.GetKeyByteSize() {
+		return []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key))
+	}
+	ivz := make([]byte, e.GetCypherBlockBitLength()/8)
+	return aescts.Decrypt(key, ivz, data)
+}
+
+// DecryptMessage decrypts the message provided using the methods specific to the etype provided as defined in RFC 3962.
+// The integrity of the message is also verified.
+func DecryptMessage(key, ciphertext []byte, usage uint32, e etype.EType) ([]byte, error) {
+	//Derive the key
+	k, err := e.DeriveKey(key, common.GetUsageKe(usage))
+	if err != nil {
+		return nil, fmt.Errorf("error deriving key: %v", err)
+	}
+	// Strip off the checksum from the end
+	b, err := e.DecryptData(k, ciphertext[:len(ciphertext)-e.GetHMACBitLength()/8])
+	if err != nil {
+		return nil, err
+	}
+	//Verify checksum
+	if !e.VerifyIntegrity(key, ciphertext, b, usage) {
+		return nil, errors.New("integrity verification failed")
+	}
+	//Remove the confounder bytes
+	return b[e.GetConfounderByteSize():], nil
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3962/keyDerivation.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3962/keyDerivation.go
new file mode 100644
index 0000000..fb402d9
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc3962/keyDerivation.go
@@ -0,0 +1,51 @@
+package rfc3962
+
+import (
+	"encoding/binary"
+	"encoding/hex"
+	"errors"
+
+	"github.com/jcmturner/gofork/x/crypto/pbkdf2"
+	"github.com/jcmturner/gokrb5/v8/crypto/etype"
+)
+
+const (
+	s2kParamsZero = 4294967296
+)
+
+// StringToKey returns a key derived from the string provided according to the definition in RFC 3961.
+func StringToKey(secret, salt, s2kparams string, e etype.EType) ([]byte, error) {
+	i, err := S2KparamsToItertions(s2kparams)
+	if err != nil {
+		return nil, err
+	}
+	return StringToKeyIter(secret, salt, i, e)
+}
+
+// StringToPBKDF2 generates an encryption key from a pass phrase and salt string using the PBKDF2 function from PKCS #5 v2.0
+func StringToPBKDF2(secret, salt string, iterations int64, e etype.EType) []byte {
+	return pbkdf2.Key64([]byte(secret), []byte(salt), iterations, int64(e.GetKeyByteSize()), e.GetHashFunc())
+}
+
+// StringToKeyIter returns a key derived from the string provided according to the definition in RFC 3961.
+func StringToKeyIter(secret, salt string, iterations int64, e etype.EType) ([]byte, error) {
+	tkey := e.RandomToKey(StringToPBKDF2(secret, salt, iterations, e))
+	return e.DeriveKey(tkey, []byte("kerberos"))
+}
+
+// S2KparamsToItertions converts the string representation of iterations to an integer
+func S2KparamsToItertions(s2kparams string) (int64, error) {
+	//The s2kparams string should be hex string representing 4 bytes
+	//The 4 bytes represent a number in big endian order
+	//If the value is zero then the number of iterations should be 4,294,967,296 (2^32)
+	var i uint32
+	if len(s2kparams) != 8 {
+		return int64(s2kParamsZero), errors.New("invalid s2kparams length")
+	}
+	b, err := hex.DecodeString(s2kparams)
+	if err != nil {
+		return int64(s2kParamsZero), errors.New("invalid s2kparams, cannot decode string to bytes")
+	}
+	i = binary.BigEndian.Uint32(b)
+	return int64(i), nil
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/checksum.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/checksum.go
new file mode 100644
index 0000000..45276e9
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/checksum.go
@@ -0,0 +1,40 @@
+package rfc4757
+
+import (
+	"bytes"
+	"crypto/hmac"
+	"crypto/md5"
+	"io"
+)
+
+// Checksum returns a hash of the data in accordance with RFC 4757
+func Checksum(key []byte, usage uint32, data []byte) ([]byte, error) {
+	// Create hashing key
+	s := append([]byte(`signaturekey`), byte(0x00)) //includes zero octet at end
+	mac := hmac.New(md5.New, key)
+	mac.Write(s)
+	Ksign := mac.Sum(nil)
+
+	// Format data
+	tb := UsageToMSMsgType(usage)
+	p := append(tb, data...)
+	h := md5.New()
+	rb := bytes.NewReader(p)
+	_, err := io.Copy(h, rb)
+	if err != nil {
+		return []byte{}, err
+	}
+	tmp := h.Sum(nil)
+
+	// Generate HMAC
+	mac = hmac.New(md5.New, Ksign)
+	mac.Write(tmp)
+	return mac.Sum(nil), nil
+}
+
+// HMAC returns a keyed MD5 checksum of the data
+func HMAC(key []byte, data []byte) []byte {
+	mac := hmac.New(md5.New, key)
+	mac.Write(data)
+	return mac.Sum(nil)
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/encryption.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/encryption.go
new file mode 100644
index 0000000..fdebe73
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/encryption.go
@@ -0,0 +1,80 @@
+// Package rfc4757 provides encryption and checksum methods as specified in RFC 4757
+package rfc4757
+
+import (
+	"crypto/hmac"
+	"crypto/rand"
+	"crypto/rc4"
+	"errors"
+	"fmt"
+
+	"github.com/jcmturner/gokrb5/v8/crypto/etype"
+)
+
+// EncryptData encrypts the data provided using methods specific to the etype provided as defined in RFC 4757.
+func EncryptData(key, data []byte, e etype.EType) ([]byte, error) {
+	if len(key) != e.GetKeyByteSize() {
+		return []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key))
+	}
+	rc4Cipher, err := rc4.NewCipher(key)
+	if err != nil {
+		return []byte{}, fmt.Errorf("error creating RC4 cipher: %v", err)
+	}
+	ed := make([]byte, len(data))
+	copy(ed, data)
+	rc4Cipher.XORKeyStream(ed, ed)
+	rc4Cipher.Reset()
+	return ed, nil
+}
+
+// DecryptData decrypts the data provided using the methods specific to the etype provided as defined in RFC 4757.
+func DecryptData(key, data []byte, e etype.EType) ([]byte, error) {
+	return EncryptData(key, data, e)
+}
+
+// EncryptMessage encrypts the message provided using the methods specific to the etype provided as defined in RFC 4757.
+// The encrypted data is concatenated with its RC4 header containing integrity checksum and confounder to create an encrypted message.
+func EncryptMessage(key, data []byte, usage uint32, export bool, e etype.EType) ([]byte, error) {
+	confounder := make([]byte, e.GetConfounderByteSize()) // size = 8
+	_, err := rand.Read(confounder)
+	if err != nil {
+		return []byte{}, fmt.Errorf("error generating confounder: %v", err)
+	}
+	k1 := key
+	k2 := HMAC(k1, UsageToMSMsgType(usage))
+	toenc := append(confounder, data...)
+	chksum := HMAC(k2, toenc)
+	k3 := HMAC(k2, chksum)
+
+	ed, err := EncryptData(k3, toenc, e)
+	if err != nil {
+		return []byte{}, fmt.Errorf("error encrypting data: %v", err)
+	}
+
+	msg := append(chksum, ed...)
+	return msg, nil
+}
+
+// DecryptMessage decrypts the message provided using the methods specific to the etype provided as defined in RFC 4757.
+// The integrity of the message is also verified.
+func DecryptMessage(key, data []byte, usage uint32, export bool, e etype.EType) ([]byte, error) {
+	checksum := data[:e.GetHMACBitLength()/8]
+	ct := data[e.GetHMACBitLength()/8:]
+	_, k2, k3 := deriveKeys(key, checksum, usage, export)
+
+	pt, err := DecryptData(k3, ct, e)
+	if err != nil {
+		return []byte{}, fmt.Errorf("error decrypting data: %v", err)
+	}
+
+	if !VerifyIntegrity(k2, pt, data, e) {
+		return []byte{}, errors.New("integrity checksum incorrect")
+	}
+	return pt[e.GetConfounderByteSize():], nil
+}
+
+// VerifyIntegrity checks the integrity checksum of the data matches that calculated from the decrypted data.
+func VerifyIntegrity(key, pt, data []byte, e etype.EType) bool {
+	chksum := HMAC(key, pt)
+	return hmac.Equal(chksum, data[:e.GetHMACBitLength()/8])
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/keyDerivation.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/keyDerivation.go
new file mode 100644
index 0000000..d1f90c0
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/keyDerivation.go
@@ -0,0 +1,40 @@
+package rfc4757
+
+import (
+	"bytes"
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"io"
+
+	"golang.org/x/crypto/md4"
+)
+
+// StringToKey returns a key derived from the string provided according to the definition in RFC 4757.
+func StringToKey(secret string) ([]byte, error) {
+	b := make([]byte, len(secret)*2, len(secret)*2)
+	for i, r := range secret {
+		u := fmt.Sprintf("%04x", r)
+		c, err := hex.DecodeString(u)
+		if err != nil {
+			return []byte{}, errors.New("character could not be encoded")
+		}
+		// Swap round the two bytes to make little endian as we put into byte slice
+		b[2*i] = c[1]
+		b[2*i+1] = c[0]
+	}
+	r := bytes.NewReader(b)
+	h := md4.New()
+	_, err := io.Copy(h, r)
+	if err != nil {
+		return []byte{}, err
+	}
+	return h.Sum(nil), nil
+}
+
+func deriveKeys(key, checksum []byte, usage uint32, export bool) (k1, k2, k3 []byte) {
+	k1 = key
+	k2 = HMAC(k1, UsageToMSMsgType(usage))
+	k3 = HMAC(k2, checksum)
+	return
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/msgtype.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/msgtype.go
new file mode 100644
index 0000000..068588d
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc4757/msgtype.go
@@ -0,0 +1,20 @@
+package rfc4757
+
+import "encoding/binary"
+
+// UsageToMSMsgType converts Kerberos key usage numbers to Microsoft message type encoded as a little-endian four byte slice.
+func UsageToMSMsgType(usage uint32) []byte {
+	// Translate usage numbers to the Microsoft T numbers
+	switch usage {
+	case 3:
+		usage = 8
+	case 9:
+		usage = 8
+	case 23:
+		usage = 13
+	}
+	// Now convert to bytes
+	tb := make([]byte, 4) // We force an int32 input so we can't go over 4 bytes
+	binary.PutUvarint(tb, uint64(usage))
+	return tb
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc8009/encryption.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc8009/encryption.go
new file mode 100644
index 0000000..54cff7b
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc8009/encryption.go
@@ -0,0 +1,125 @@
+// Package rfc8009 provides encryption and checksum methods as specified in RFC 8009
+package rfc8009
+
+import (
+	"crypto/aes"
+	"crypto/hmac"
+	"crypto/rand"
+	"errors"
+	"fmt"
+
+	"github.com/jcmturner/aescts/v2"
+	"github.com/jcmturner/gokrb5/v8/crypto/common"
+	"github.com/jcmturner/gokrb5/v8/crypto/etype"
+	"github.com/jcmturner/gokrb5/v8/iana/etypeID"
+)
+
+// EncryptData encrypts the data provided using methods specific to the etype provided as defined in RFC 8009.
+func EncryptData(key, data []byte, e etype.EType) ([]byte, []byte, error) {
+	kl := e.GetKeyByteSize()
+	if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 {
+		kl = 32
+	}
+	if len(key) != kl {
+		return []byte{}, []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key))
+	}
+	ivz := make([]byte, aes.BlockSize)
+	return aescts.Encrypt(key, ivz, data)
+}
+
+// EncryptMessage encrypts the message provided using the methods specific to the etype provided as defined in RFC 8009.
+// The encrypted data is concatenated with its integrity hash to create an encrypted message.
+func EncryptMessage(key, message []byte, usage uint32, e etype.EType) ([]byte, []byte, error) {
+	kl := e.GetKeyByteSize()
+	if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 {
+		kl = 32
+	}
+	if len(key) != kl {
+		return []byte{}, []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", kl, len(key))
+	}
+	if len(key) != e.GetKeyByteSize() {
+	}
+	//confounder
+	c := make([]byte, e.GetConfounderByteSize())
+	_, err := rand.Read(c)
+	if err != nil {
+		return []byte{}, []byte{}, fmt.Errorf("could not generate random confounder: %v", err)
+	}
+	plainBytes := append(c, message...)
+
+	// Derive key for encryption from usage
+	var k []byte
+	if usage != 0 {
+		k, err = e.DeriveKey(key, common.GetUsageKe(usage))
+		if err != nil {
+			return []byte{}, []byte{}, fmt.Errorf("error deriving key for encryption: %v", err)
+		}
+	}
+
+	// Encrypt the data
+	iv, b, err := e.EncryptData(k, plainBytes)
+	if err != nil {
+		return iv, b, fmt.Errorf("error encrypting data: %v", err)
+	}
+
+	ivz := make([]byte, e.GetConfounderByteSize())
+	ih, err := GetIntegityHash(ivz, b, key, usage, e)
+	if err != nil {
+		return iv, b, fmt.Errorf("error encrypting data: %v", err)
+	}
+	b = append(b, ih...)
+	return iv, b, nil
+}
+
+// DecryptData decrypts the data provided using the methods specific to the etype provided as defined in RFC 8009.
+func DecryptData(key, data []byte, e etype.EType) ([]byte, error) {
+	kl := e.GetKeyByteSize()
+	if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 {
+		kl = 32
+	}
+	if len(key) != kl {
+		return []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", kl, len(key))
+	}
+	ivz := make([]byte, aes.BlockSize)
+	return aescts.Decrypt(key, ivz, data)
+}
+
+// DecryptMessage decrypts the message provided using the methods specific to the etype provided as defined in RFC 8009.
+// The integrity of the message is also verified.
+func DecryptMessage(key, ciphertext []byte, usage uint32, e etype.EType) ([]byte, error) {
+	//Derive the key
+	k, err := e.DeriveKey(key, common.GetUsageKe(usage))
+	if err != nil {
+		return nil, fmt.Errorf("error deriving key: %v", err)
+	}
+	// Strip off the checksum from the end
+	b, err := e.DecryptData(k, ciphertext[:len(ciphertext)-e.GetHMACBitLength()/8])
+	if err != nil {
+		return nil, err
+	}
+	//Verify checksum
+	if !e.VerifyIntegrity(key, ciphertext, b, usage) {
+		return nil, errors.New("integrity verification failed")
+	}
+	//Remove the confounder bytes
+	return b[e.GetConfounderByteSize():], nil
+}
+
+// GetIntegityHash returns a keyed integrity hash of the bytes provided as defined in RFC 8009
+func GetIntegityHash(iv, c, key []byte, usage uint32, e etype.EType) ([]byte, error) {
+	// Generate and append integrity hash
+	// Rather than calculating the hash over the confounder and plaintext
+	// it is calculated over the iv concatenated with the AES cipher output.
+	ib := append(iv, c...)
+	return common.GetIntegrityHash(ib, key, usage, e)
+}
+
+// VerifyIntegrity verifies the integrity of cipertext bytes ct.
+func VerifyIntegrity(key, ct []byte, usage uint32, etype etype.EType) bool {
+	h := make([]byte, etype.GetHMACBitLength()/8)
+	copy(h, ct[len(ct)-etype.GetHMACBitLength()/8:])
+	ivz := make([]byte, etype.GetConfounderByteSize())
+	ib := append(ivz, ct[:len(ct)-(etype.GetHMACBitLength()/8)]...)
+	expectedMAC, _ := common.GetIntegrityHash(ib, key, usage, etype)
+	return hmac.Equal(h, expectedMAC)
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc8009/keyDerivation.go b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc8009/keyDerivation.go
new file mode 100644
index 0000000..e947322
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/crypto/rfc8009/keyDerivation.go
@@ -0,0 +1,135 @@
+package rfc8009
+
+import (
+	"crypto/hmac"
+	"encoding/binary"
+	"encoding/hex"
+	"errors"
+
+	"github.com/jcmturner/gokrb5/v8/crypto/etype"
+	"github.com/jcmturner/gokrb5/v8/iana/etypeID"
+	"golang.org/x/crypto/pbkdf2"
+)
+
+const (
+	s2kParamsZero = 32768
+)
+
+// DeriveRandom for key derivation as defined in RFC 8009
+func DeriveRandom(protocolKey, usage []byte, e etype.EType) ([]byte, error) {
+	h := e.GetHashFunc()()
+	return KDF_HMAC_SHA2(protocolKey, []byte("prf"), usage, h.Size(), e), nil
+}
+
+// DeriveKey derives a key from the protocol key based on the usage and the etype's specific methods.
+//
+// https://tools.ietf.org/html/rfc8009#section-5
+func DeriveKey(protocolKey, label []byte, e etype.EType) []byte {
+	var context []byte
+	var kl int
+	// Key length is longer for aes256-cts-hmac-sha384-192 is it is a Ke or from StringToKey (where label is "kerberos")
+	if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 {
+	Swtch:
+		switch label[len(label)-1] {
+		case 0x73:
+			// 0x73 is "s" so label could be kerberos meaning StringToKey so now check if the label is "kerberos"
+			kerblabel := []byte("kerberos")
+			if len(label) != len(kerblabel) {
+				break
+			}
+			for i, b := range label {
+				if b != kerblabel[i] {
+					kl = e.GetKeySeedBitLength()
+					break Swtch
+				}
+			}
+			if kl == 0 {
+				// This is StringToKey
+				kl = 256
+			}
+		case 0xAA:
+			// This is a Ke
+			kl = 256
+		}
+	}
+	if kl == 0 {
+		kl = e.GetKeySeedBitLength()
+	}
+	return e.RandomToKey(KDF_HMAC_SHA2(protocolKey, label, context, kl, e))
+}
+
+// RandomToKey returns a key from the bytes provided according to the definition in RFC 8009.
+func RandomToKey(b []byte) []byte {
+	return b
+}
+
+// StringToKey returns a key derived from the string provided according to the definition in RFC 8009.
+func StringToKey(secret, salt, s2kparams string, e etype.EType) ([]byte, error) {
+	i, err := S2KparamsToItertions(s2kparams)
+	if err != nil {
+		return nil, err
+	}
+	return StringToKeyIter(secret, salt, i, e)
+}
+
+// StringToKeyIter returns a key derived from the string provided according to the definition in RFC 8009.
+func StringToKeyIter(secret, salt string, iterations int, e etype.EType) ([]byte, error) {
+	tkey := e.RandomToKey(StringToPBKDF2(secret, salt, iterations, e))
+	return e.DeriveKey(tkey, []byte("kerberos"))
+}
+
+// StringToPBKDF2 generates an encryption key from a pass phrase and salt string using the PBKDF2 function from PKCS #5 v2.0
+func StringToPBKDF2(secret, salt string, iterations int, e etype.EType) []byte {
+	kl := e.GetKeyByteSize()
+	if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 {
+		kl = 32
+	}
+	return pbkdf2.Key([]byte(secret), []byte(salt), iterations, kl, e.GetHashFunc())
+}
+
+// KDF_HMAC_SHA2 key derivation: https://tools.ietf.org/html/rfc8009#section-3
+func KDF_HMAC_SHA2(protocolKey, label, context []byte, kl int, e etype.EType) []byte {
+	//k: Length in bits of the key to be outputted, expressed in big-endian binary representation in 4 bytes.
+	k := make([]byte, 4, 4)
+	binary.BigEndian.PutUint32(k, uint32(kl))
+
+	c := make([]byte, 4, 4)
+	binary.BigEndian.PutUint32(c, uint32(1))
+	c = append(c, label...)
+	c = append(c, byte(0))
+	if len(context) > 0 {
+		c = append(c, context...)
+	}
+	c = append(c, k...)
+
+	mac := hmac.New(e.GetHashFunc(), protocolKey)
+	mac.Write(c)
+	return mac.Sum(nil)[:(kl / 8)]
+}
+
+// GetSaltP returns the salt value based on the etype name: https://tools.ietf.org/html/rfc8009#section-4
+func GetSaltP(salt, ename string) string {
+	b := []byte(ename)
+	b = append(b, byte(0))
+	b = append(b, []byte(salt)...)
+	return string(b)
+}
+
+// S2KparamsToItertions converts the string representation of iterations to an integer for RFC 8009.
+func S2KparamsToItertions(s2kparams string) (int, error) {
+	var i uint32
+	if len(s2kparams) != 8 {
+		return s2kParamsZero, errors.New("Invalid s2kparams length")
+	}
+	b, err := hex.DecodeString(s2kparams)
+	if err != nil {
+		return s2kParamsZero, errors.New("Invalid s2kparams, cannot decode string to bytes")
+	}
+	i = binary.BigEndian.Uint32(b)
+	//buf := bytes.NewBuffer(b)
+	//err = binary.Read(buf, binary.BigEndian, &i)
+	if err != nil {
+		return s2kParamsZero, errors.New("Invalid s2kparams, cannot convert to big endian int32")
+	}
+	return int(i), nil
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/gssapi/MICToken.go b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/MICToken.go
new file mode 100644
index 0000000..ab8daa2
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/MICToken.go
@@ -0,0 +1,174 @@
+package gssapi
+
+import (
+	"bytes"
+	"crypto/hmac"
+	"encoding/binary"
+	"encoding/hex"
+	"errors"
+	"fmt"
+
+	"github.com/jcmturner/gokrb5/v8/crypto"
+	"github.com/jcmturner/gokrb5/v8/iana/keyusage"
+	"github.com/jcmturner/gokrb5/v8/types"
+)
+
+// RFC 4121, section 4.2.6.1
+
+const (
+	// MICTokenFlagSentByAcceptor - this flag indicates the sender is the context acceptor.  When not set, it indicates the sender is the context initiator
+	MICTokenFlagSentByAcceptor = 1 << iota
+	// MICTokenFlagSealed - this flag indicates confidentiality is provided for.  It SHALL NOT be set in MIC tokens
+	MICTokenFlagSealed
+	// MICTokenFlagAcceptorSubkey - a subkey asserted by the context acceptor is used to protect the message
+	MICTokenFlagAcceptorSubkey
+)
+
+const (
+	micHdrLen = 16 // Length of the MIC Token's header
+)
+
+// MICToken represents a GSS API MIC token, as defined in RFC 4121.
+// It contains the header fields, the payload (this is not transmitted) and
+// the checksum, and provides the logic for converting to/from bytes plus
+// computing and verifying checksums
+type MICToken struct {
+	// const GSS Token ID: 0x0404
+	Flags byte // contains three flags: acceptor, sealed, acceptor subkey
+	// const Filler: 0xFF 0xFF 0xFF 0xFF 0xFF
+	SndSeqNum uint64 // sender's sequence number. big-endian
+	Payload   []byte // your data! :)
+	Checksum  []byte // checksum of { payload | header }
+}
+
+// Return the 2 bytes identifying a GSS API MIC token
+func getGSSMICTokenID() *[2]byte {
+	return &[2]byte{0x04, 0x04}
+}
+
+// Return the filler bytes used in header
+func fillerBytes() *[5]byte {
+	return &[5]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF}
+}
+
+// Marshal the MICToken into a byte slice.
+// The payload should have been set and the checksum computed, otherwise an error is returned.
+func (mt *MICToken) Marshal() ([]byte, error) {
+	if mt.Checksum == nil {
+		return nil, errors.New("checksum has not been set")
+	}
+
+	bytes := make([]byte, micHdrLen+len(mt.Checksum))
+	copy(bytes[0:micHdrLen], mt.getMICChecksumHeader()[:])
+	copy(bytes[micHdrLen:], mt.Checksum)
+
+	return bytes, nil
+}
+
+// SetChecksum uses the passed encryption key and key usage to compute the checksum over the payload and
+// the header, and sets the Checksum field of this MICToken.
+// If the payload has not been set or the checksum has already been set, an error is returned.
+func (mt *MICToken) SetChecksum(key types.EncryptionKey, keyUsage uint32) error {
+	if mt.Checksum != nil {
+		return errors.New("checksum has already been computed")
+	}
+	checksum, err := mt.checksum(key, keyUsage)
+	if err != nil {
+		return err
+	}
+	mt.Checksum = checksum
+	return nil
+}
+
+// Compute and return the checksum of this token, computed using the passed key and key usage.
+// Note: This will NOT update the struct's Checksum field.
+func (mt *MICToken) checksum(key types.EncryptionKey, keyUsage uint32) ([]byte, error) {
+	if mt.Payload == nil {
+		return nil, errors.New("cannot compute checksum with uninitialized payload")
+	}
+	d := make([]byte, micHdrLen+len(mt.Payload))
+	copy(d[0:], mt.Payload)
+	copy(d[len(mt.Payload):], mt.getMICChecksumHeader())
+
+	encType, err := crypto.GetEtype(key.KeyType)
+	if err != nil {
+		return nil, err
+	}
+	return encType.GetChecksumHash(key.KeyValue, d, keyUsage)
+}
+
+// Build a header suitable for a checksum computation
+func (mt *MICToken) getMICChecksumHeader() []byte {
+	header := make([]byte, micHdrLen)
+	copy(header[0:2], getGSSMICTokenID()[:])
+	header[2] = mt.Flags
+	copy(header[3:8], fillerBytes()[:])
+	binary.BigEndian.PutUint64(header[8:16], mt.SndSeqNum)
+	return header
+}
+
+// Verify computes the token's checksum with the provided key and usage,
+// and compares it to the checksum present in the token.
+// In case of any failure, (false, err) is returned, with err an explanatory error.
+func (mt *MICToken) Verify(key types.EncryptionKey, keyUsage uint32) (bool, error) {
+	computed, err := mt.checksum(key, keyUsage)
+	if err != nil {
+		return false, err
+	}
+	if !hmac.Equal(computed, mt.Checksum) {
+		return false, fmt.Errorf(
+			"checksum mismatch. Computed: %s, Contained in token: %s",
+			hex.EncodeToString(computed), hex.EncodeToString(mt.Checksum))
+	}
+	return true, nil
+}
+
+// Unmarshal bytes into the corresponding MICToken.
+// If expectFromAcceptor is true we expect the token to have been emitted by the gss acceptor,
+// and will check the according flag, returning an error if the token does not match the expectation.
+func (mt *MICToken) Unmarshal(b []byte, expectFromAcceptor bool) error {
+	if len(b) < micHdrLen {
+		return errors.New("bytes shorter than header length")
+	}
+	if !bytes.Equal(getGSSMICTokenID()[:], b[0:2]) {
+		return fmt.Errorf("wrong Token ID, Expected %s, was %s",
+			hex.EncodeToString(getGSSMICTokenID()[:]),
+			hex.EncodeToString(b[0:2]))
+	}
+	flags := b[2]
+	isFromAcceptor := flags&MICTokenFlagSentByAcceptor != 0
+	if isFromAcceptor && !expectFromAcceptor {
+		return errors.New("unexpected acceptor flag is set: not expecting a token from the acceptor")
+	}
+	if !isFromAcceptor && expectFromAcceptor {
+		return errors.New("unexpected acceptor flag is not set: expecting a token from the acceptor, not in the initiator")
+	}
+	if !bytes.Equal(b[3:8], fillerBytes()[:]) {
+		return fmt.Errorf("unexpected filler bytes: expecting %s, was %s",
+			hex.EncodeToString(fillerBytes()[:]),
+			hex.EncodeToString(b[3:8]))
+	}
+
+	mt.Flags = flags
+	mt.SndSeqNum = binary.BigEndian.Uint64(b[8:16])
+	mt.Checksum = b[micHdrLen:]
+	return nil
+}
+
+// NewInitiatorMICToken builds a new initiator token (acceptor flag will be set to 0) and computes the authenticated checksum.
+// Other flags are set to 0.
+// Note that in certain circumstances you may need to provide a sequence number that has been defined earlier.
+// This is currently not supported.
+func NewInitiatorMICToken(payload []byte, key types.EncryptionKey) (*MICToken, error) {
+	token := MICToken{
+		Flags:     0x00,
+		SndSeqNum: 0,
+		Payload:   payload,
+	}
+
+	if err := token.SetChecksum(key, keyusage.GSSAPI_INITIATOR_SIGN); err != nil {
+		return nil, err
+	}
+
+	return &token, nil
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/gssapi/README.md b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/README.md
new file mode 100644
index 0000000..8fdcf70
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/README.md
@@ -0,0 +1,20 @@
+# Notes on GSS-API Negotiation Mechanism
+https://tools.ietf.org/html/rfc4178
+
+Client sends an initial negotiation message to the server which specifies the list of mechanisms 
+the client can support in order of decreasing preference.
+This message is generated with the ``NewNegTokenInitKrb5`` method.
+The message generated by this function specifies only a kerberos v5 mechanism is supported.
+
+The RFC states that this message can optionally contain the initial mechanism token 
+for the preferred mechanism (KRB5 in this case) of the client. The ``NewNegTokenInitKrb5`` 
+includes this in the message.
+
+The server side responds to this message with a one of four messages:
+
+| Message Type/State | Description |
+|--------------------|-------------|
+| accept-completed | indicates that the initiator-selected mechanism was acceptable to the target, and that the security mechanism token embedded in the first negotiation message was sufficient to complete the authentication |
+| accept-incomplete | At least one more message is needed from the client to establish security context. |
+| reject | Negotiation is being terminated. |
+| request-mic | (this state can only be present in the first reply message from the target) indicates that the MIC token exchange is REQUIRED if per-message integrity services are available |
\ No newline at end of file
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/gssapi/contextFlags.go b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/contextFlags.go
new file mode 100644
index 0000000..8c91859
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/contextFlags.go
@@ -0,0 +1,27 @@
+package gssapi
+
+import "github.com/jcmturner/gofork/encoding/asn1"
+
+// GSS-API context flags assigned numbers.
+const (
+	ContextFlagDeleg    = 1
+	ContextFlagMutual   = 2
+	ContextFlagReplay   = 4
+	ContextFlagSequence = 8
+	ContextFlagConf     = 16
+	ContextFlagInteg    = 32
+	ContextFlagAnon     = 64
+)
+
+// ContextFlags flags for GSSAPI
+// DEPRECATED - do not use
+type ContextFlags asn1.BitString
+
+// NewContextFlags creates a new ContextFlags instance
+// DEPRECATED - do not use
+func NewContextFlags() ContextFlags {
+	var c ContextFlags
+	c.BitLength = 32
+	c.Bytes = make([]byte, 4)
+	return c
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/gssapi/gssapi.go b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/gssapi.go
new file mode 100644
index 0000000..8082231
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/gssapi.go
@@ -0,0 +1,202 @@
+// Package gssapi implements Generic Security Services Application Program Interface required for SPNEGO kerberos authentication.
+package gssapi
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/jcmturner/gofork/encoding/asn1"
+)
+
+// GSS-API OID names
+const (
+	// GSS-API OID names
+	OIDKRB5         OIDName = "KRB5"         // MechType OID for Kerberos 5
+	OIDMSLegacyKRB5 OIDName = "MSLegacyKRB5" // MechType OID for Kerberos 5
+	OIDSPNEGO       OIDName = "SPNEGO"
+	OIDGSSIAKerb    OIDName = "GSSIAKerb" // Indicates the client cannot get a service ticket and asks the server to serve as an intermediate to the target KDC. http://k5wiki.kerberos.org/wiki/Projects/IAKERB#IAKERB_mech
+)
+
+// GSS-API status values
+const (
+	StatusBadBindings = 1 << iota
+	StatusBadMech
+	StatusBadName
+	StatusBadNameType
+	StatusBadStatus
+	StatusBadSig
+	StatusBadMIC
+	StatusContextExpired
+	StatusCredentialsExpired
+	StatusDefectiveCredential
+	StatusDefectiveToken
+	StatusFailure
+	StatusNoContext
+	StatusNoCred
+	StatusBadQOP
+	StatusUnauthorized
+	StatusUnavailable
+	StatusDuplicateElement
+	StatusNameNotMN
+	StatusComplete
+	StatusContinueNeeded
+	StatusDuplicateToken
+	StatusOldToken
+	StatusUnseqToken
+	StatusGapToken
+)
+
+// ContextToken is an interface for a GSS-API context token.
+type ContextToken interface {
+	Marshal() ([]byte, error)
+	Unmarshal(b []byte) error
+	Verify() (bool, Status)
+	Context() context.Context
+}
+
+/*
+CREDENTIAL MANAGEMENT
+
+GSS_Acquire_cred             acquire credentials for use
+GSS_Release_cred             release credentials after use
+GSS_Inquire_cred             display information about credentials
+GSS_Add_cred                 construct credentials incrementally
+GSS_Inquire_cred_by_mech     display per-mechanism credential information
+
+CONTEXT-LEVEL CALLS
+
+GSS_Init_sec_context         initiate outbound security context
+GSS_Accept_sec_context       accept inbound security context
+GSS_Delete_sec_context       flush context when no longer needed
+GSS_Process_context_token    process received control token on context
+GSS_Context_time             indicate validity time remaining on context
+GSS_Inquire_context          display information about context
+GSS_Wrap_size_limit          determine GSS_Wrap token size limit
+GSS_Export_sec_context       transfer context to other process
+GSS_Import_sec_context       import transferred context
+
+PER-MESSAGE CALLS
+
+GSS_GetMIC                   apply integrity check, receive as token separate from message
+GSS_VerifyMIC                validate integrity check token along with message
+GSS_Wrap                     sign, optionally encrypt, encapsulate
+GSS_Unwrap                   decapsulate, decrypt if needed, validate integrity check
+
+SUPPORT CALLS
+
+GSS_Display_status           translate status codes to printable form
+GSS_Indicate_mechs           indicate mech_types supported on local system
+GSS_Compare_name             compare two names for equality
+GSS_Display_name             translate name to printable form
+GSS_Import_name              convert printable name to normalized form
+GSS_Release_name             free storage of normalized-form name
+GSS_Release_buffer           free storage of general GSS-allocated object
+GSS_Release_OID_set          free storage of OID set object
+GSS_Create_empty_OID_set     create empty OID set
+GSS_Add_OID_set_member       add member to OID set
+GSS_Test_OID_set_member      test if OID is member of OID set
+GSS_Inquire_names_for_mech   indicate name types supported by mechanism
+GSS_Inquire_mechs_for_name   indicates mechanisms supporting name type
+GSS_Canonicalize_name        translate name to per-mechanism form
+GSS_Export_name              externalize per-mechanism name
+GSS_Duplicate_name           duplicate name object
+*/
+
+// Mechanism is the GSS-API interface for authentication mechanisms.
+type Mechanism interface {
+	OID() asn1.ObjectIdentifier
+	AcquireCred() error                                               // acquire credentials for use (eg. AS exchange for KRB5)
+	InitSecContext() (ContextToken, error)                            // initiate outbound security context (eg TGS exchange builds AP_REQ to go into ContextToken to send to service)
+	AcceptSecContext(ct ContextToken) (bool, context.Context, Status) // service verifies the token server side to establish a context
+	MIC() MICToken                                                    // apply integrity check, receive as token separate from message
+	VerifyMIC(mt MICToken) (bool, error)                              // validate integrity check token along with message
+	Wrap(msg []byte) WrapToken                                        // sign, optionally encrypt, encapsulate
+	Unwrap(wt WrapToken) []byte                                       // decapsulate, decrypt if needed, validate integrity check
+}
+
+// OIDName is the type for defined GSS-API OIDs.
+type OIDName string
+
+// OID returns the OID for the provided OID name.
+func (o OIDName) OID() asn1.ObjectIdentifier {
+	switch o {
+	case OIDSPNEGO:
+		return asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 2}
+	case OIDKRB5:
+		return asn1.ObjectIdentifier{1, 2, 840, 113554, 1, 2, 2}
+	case OIDMSLegacyKRB5:
+		return asn1.ObjectIdentifier{1, 2, 840, 48018, 1, 2, 2}
+	case OIDGSSIAKerb:
+		return asn1.ObjectIdentifier{1, 3, 6, 1, 5, 2, 5}
+	}
+	return asn1.ObjectIdentifier{}
+}
+
+// Status is the GSS-API status and implements the error interface.
+type Status struct {
+	Code    int
+	Message string
+}
+
+// Error returns the Status description.
+func (s Status) Error() string {
+	var str string
+	switch s.Code {
+	case StatusBadBindings:
+		str = "channel binding mismatch"
+	case StatusBadMech:
+		str = "unsupported mechanism requested"
+	case StatusBadName:
+		str = "invalid name provided"
+	case StatusBadNameType:
+		str = "name of unsupported type provided"
+	case StatusBadStatus:
+		str = "invalid input status selector"
+	case StatusBadSig:
+		str = "token had invalid integrity check"
+	case StatusBadMIC:
+		str = "preferred alias for GSS_S_BAD_SIG"
+	case StatusContextExpired:
+		str = "specified security context expired"
+	case StatusCredentialsExpired:
+		str = "expired credentials detected"
+	case StatusDefectiveCredential:
+		str = "defective credential detected"
+	case StatusDefectiveToken:
+		str = "defective token detected"
+	case StatusFailure:
+		str = "failure, unspecified at GSS-API level"
+	case StatusNoContext:
+		str = "no valid security context specified"
+	case StatusNoCred:
+		str = "no valid credentials provided"
+	case StatusBadQOP:
+		str = "unsupported QOP valu"
+	case StatusUnauthorized:
+		str = "operation unauthorized"
+	case StatusUnavailable:
+		str = "operation unavailable"
+	case StatusDuplicateElement:
+		str = "duplicate credential element requested"
+	case StatusNameNotMN:
+		str = "name contains multi-mechanism elements"
+	case StatusComplete:
+		str = "normal completion"
+	case StatusContinueNeeded:
+		str = "continuation call to routine required"
+	case StatusDuplicateToken:
+		str = "duplicate per-message token detected"
+	case StatusOldToken:
+		str = "timed-out per-message token detected"
+	case StatusUnseqToken:
+		str = "reordered (early) per-message token detected"
+	case StatusGapToken:
+		str = "skipped predecessor token(s) detected"
+	default:
+		str = "unknown GSS-API error status"
+	}
+	if s.Message != "" {
+		return fmt.Sprintf("%s: %s", str, s.Message)
+	}
+	return str
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/gssapi/wrapToken.go b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/wrapToken.go
new file mode 100644
index 0000000..ea7d054
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/gssapi/wrapToken.go
@@ -0,0 +1,195 @@
+package gssapi
+
+import (
+	"bytes"
+	"crypto/hmac"
+	"encoding/binary"
+	"encoding/hex"
+	"errors"
+	"fmt"
+
+	"github.com/jcmturner/gokrb5/v8/crypto"
+	"github.com/jcmturner/gokrb5/v8/iana/keyusage"
+	"github.com/jcmturner/gokrb5/v8/types"
+)
+
+// RFC 4121, section 4.2.6.2
+
+const (
+	// HdrLen is the length of the Wrap Token's header
+	HdrLen = 16
+	// FillerByte is a filler in the WrapToken structure
+	FillerByte byte = 0xFF
+)
+
+// WrapToken represents a GSS API Wrap token, as defined in RFC 4121.
+// It contains the header fields, the payload and the checksum, and provides
+// the logic for converting to/from bytes plus computing and verifying checksums
+type WrapToken struct {
+	// const GSS Token ID: 0x0504
+	Flags byte // contains three flags: acceptor, sealed, acceptor subkey
+	// const Filler: 0xFF
+	EC        uint16 // checksum length. big-endian
+	RRC       uint16 // right rotation count. big-endian
+	SndSeqNum uint64 // sender's sequence number. big-endian
+	Payload   []byte // your data! :)
+	CheckSum  []byte // authenticated checksum of { payload | header }
+}
+
+// Return the 2 bytes identifying a GSS API Wrap token
+func getGssWrapTokenId() *[2]byte {
+	return &[2]byte{0x05, 0x04}
+}
+
+// Marshal the WrapToken into a byte slice.
+// The payload should have been set and the checksum computed, otherwise an error is returned.
+func (wt *WrapToken) Marshal() ([]byte, error) {
+	if wt.CheckSum == nil {
+		return nil, errors.New("checksum has not been set")
+	}
+	if wt.Payload == nil {
+		return nil, errors.New("payload has not been set")
+	}
+
+	pldOffset := HdrLen                    // Offset of the payload in the token
+	chkSOffset := HdrLen + len(wt.Payload) // Offset of the checksum in the token
+
+	bytes := make([]byte, chkSOffset+int(wt.EC))
+	copy(bytes[0:], getGssWrapTokenId()[:])
+	bytes[2] = wt.Flags
+	bytes[3] = FillerByte
+	binary.BigEndian.PutUint16(bytes[4:6], wt.EC)
+	binary.BigEndian.PutUint16(bytes[6:8], wt.RRC)
+	binary.BigEndian.PutUint64(bytes[8:16], wt.SndSeqNum)
+	copy(bytes[pldOffset:], wt.Payload)
+	copy(bytes[chkSOffset:], wt.CheckSum)
+	return bytes, nil
+}
+
+// SetCheckSum uses the passed encryption key and key usage to compute the checksum over the payload and
+// the header, and sets the CheckSum field of this WrapToken.
+// If the payload has not been set or the checksum has already been set, an error is returned.
+func (wt *WrapToken) SetCheckSum(key types.EncryptionKey, keyUsage uint32) error {
+	if wt.Payload == nil {
+		return errors.New("payload has not been set")
+	}
+	if wt.CheckSum != nil {
+		return errors.New("checksum has already been computed")
+	}
+	chkSum, cErr := wt.computeCheckSum(key, keyUsage)
+	if cErr != nil {
+		return cErr
+	}
+	wt.CheckSum = chkSum
+	return nil
+}
+
+// ComputeCheckSum computes and returns the checksum of this token, computed using the passed key and key usage.
+// Note: This will NOT update the struct's Checksum field.
+func (wt *WrapToken) computeCheckSum(key types.EncryptionKey, keyUsage uint32) ([]byte, error) {
+	if wt.Payload == nil {
+		return nil, errors.New("cannot compute checksum with uninitialized payload")
+	}
+	// Build a slice containing { payload | header }
+	checksumMe := make([]byte, HdrLen+len(wt.Payload))
+	copy(checksumMe[0:], wt.Payload)
+	copy(checksumMe[len(wt.Payload):], getChecksumHeader(wt.Flags, wt.SndSeqNum))
+
+	encType, err := crypto.GetEtype(key.KeyType)
+	if err != nil {
+		return nil, err
+	}
+	return encType.GetChecksumHash(key.KeyValue, checksumMe, keyUsage)
+}
+
+// Build a header suitable for a checksum computation
+func getChecksumHeader(flags byte, senderSeqNum uint64) []byte {
+	header := make([]byte, 16)
+	copy(header[0:], []byte{0x05, 0x04, flags, 0xFF, 0x00, 0x00, 0x00, 0x00})
+	binary.BigEndian.PutUint64(header[8:], senderSeqNum)
+	return header
+}
+
+// Verify computes the token's checksum with the provided key and usage,
+// and compares it to the checksum present in the token.
+// In case of any failure, (false, Err) is returned, with Err an explanatory error.
+func (wt *WrapToken) Verify(key types.EncryptionKey, keyUsage uint32) (bool, error) {
+	computed, cErr := wt.computeCheckSum(key, keyUsage)
+	if cErr != nil {
+		return false, cErr
+	}
+	if !hmac.Equal(computed, wt.CheckSum) {
+		return false, fmt.Errorf(
+			"checksum mismatch. Computed: %s, Contained in token: %s",
+			hex.EncodeToString(computed), hex.EncodeToString(wt.CheckSum))
+	}
+	return true, nil
+}
+
+// Unmarshal bytes into the corresponding WrapToken.
+// If expectFromAcceptor is true, we expect the token to have been emitted by the gss acceptor,
+// and will check the according flag, returning an error if the token does not match the expectation.
+func (wt *WrapToken) Unmarshal(b []byte, expectFromAcceptor bool) error {
+	// Check if we can read a whole header
+	if len(b) < 16 {
+		return errors.New("bytes shorter than header length")
+	}
+	// Is the Token ID correct?
+	if !bytes.Equal(getGssWrapTokenId()[:], b[0:2]) {
+		return fmt.Errorf("wrong Token ID. Expected %s, was %s",
+			hex.EncodeToString(getGssWrapTokenId()[:]),
+			hex.EncodeToString(b[0:2]))
+	}
+	// Check the acceptor flag
+	flags := b[2]
+	isFromAcceptor := flags&0x01 == 1
+	if isFromAcceptor && !expectFromAcceptor {
+		return errors.New("unexpected acceptor flag is set: not expecting a token from the acceptor")
+	}
+	if !isFromAcceptor && expectFromAcceptor {
+		return errors.New("expected acceptor flag is not set: expecting a token from the acceptor, not the initiator")
+	}
+	// Check the filler byte
+	if b[3] != FillerByte {
+		return fmt.Errorf("unexpected filler byte: expecting 0xFF, was %s ", hex.EncodeToString(b[3:4]))
+	}
+	checksumL := binary.BigEndian.Uint16(b[4:6])
+	// Sanity check on the checksum length
+	if int(checksumL) > len(b)-HdrLen {
+		return fmt.Errorf("inconsistent checksum length: %d bytes to parse, checksum length is %d", len(b), checksumL)
+	}
+
+	wt.Flags = flags
+	wt.EC = checksumL
+	wt.RRC = binary.BigEndian.Uint16(b[6:8])
+	wt.SndSeqNum = binary.BigEndian.Uint64(b[8:16])
+	wt.Payload = b[16 : len(b)-int(checksumL)]
+	wt.CheckSum = b[len(b)-int(checksumL):]
+	return nil
+}
+
+// NewInitiatorWrapToken builds a new initiator token (acceptor flag will be set to 0) and computes the authenticated checksum.
+// Other flags are set to 0, and the RRC and sequence number are initialized to 0.
+// Note that in certain circumstances you may need to provide a sequence number that has been defined earlier.
+// This is currently not supported.
+func NewInitiatorWrapToken(payload []byte, key types.EncryptionKey) (*WrapToken, error) {
+	encType, err := crypto.GetEtype(key.KeyType)
+	if err != nil {
+		return nil, err
+	}
+
+	token := WrapToken{
+		Flags: 0x00, // all zeroed out (this is a token sent by the initiator)
+		// Checksum size: length of output of the HMAC function, in bytes.
+		EC:        uint16(encType.GetHMACBitLength() / 8),
+		RRC:       0,
+		SndSeqNum: 0,
+		Payload:   payload,
+	}
+
+	if err := token.SetCheckSum(key, keyusage.GSSAPI_INITIATOR_SEAL); err != nil {
+		return nil, err
+	}
+
+	return &token, nil
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/addrtype/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/addrtype/constants.go
new file mode 100644
index 0000000..457b89d
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/addrtype/constants.go
@@ -0,0 +1,15 @@
+// Package addrtype provides Address type assigned numbers.
+package addrtype
+
+// Address type IDs.
+const (
+	IPv4          int32 = 2
+	Directional   int32 = 3
+	ChaosNet      int32 = 5
+	XNS           int32 = 6
+	ISO           int32 = 7
+	DECNETPhaseIV int32 = 12
+	AppleTalkDDP  int32 = 16
+	NetBios       int32 = 20
+	IPv6          int32 = 24
+)
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/adtype/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/adtype/constants.go
new file mode 100644
index 0000000..e805b74
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/adtype/constants.go
@@ -0,0 +1,23 @@
+// Package adtype provides Authenticator type assigned numbers.
+package adtype
+
+// Authenticator type IDs.
+const (
+	ADIfRelevant                  int32 = 1
+	ADIntendedForServer           int32 = 2
+	ADIntendedForApplicationClass int32 = 3
+	ADKDCIssued                   int32 = 4
+	ADAndOr                       int32 = 5
+	ADMandatoryTicketExtensions   int32 = 6
+	ADInTicketExtensions          int32 = 7
+	ADMandatoryForKDC             int32 = 8
+	OSFDCE                        int32 = 64
+	SESAME                        int32 = 65
+	ADOSFDCEPKICertID             int32 = 66
+	ADAuthenticationStrength      int32 = 70
+	ADFXFastArmor                 int32 = 71
+	ADFXFastUsed                  int32 = 72
+	ADWin2KPAC                    int32 = 128
+	ADEtypeNegotiation            int32 = 129
+	//Reserved values                   9-63
+)
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/asnAppTag/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/asnAppTag/constants.go
new file mode 100644
index 0000000..d74cd60
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/asnAppTag/constants.go
@@ -0,0 +1,24 @@
+// Package asnAppTag provides ASN1 application tag numbers.
+package asnAppTag
+
+// ASN1 application tag numbers.
+const (
+	Ticket         = 1
+	Authenticator  = 2
+	EncTicketPart  = 3
+	ASREQ          = 10
+	TGSREQ         = 12
+	ASREP          = 11
+	TGSREP         = 13
+	APREQ          = 14
+	APREP          = 15
+	KRBSafe        = 20
+	KRBPriv        = 21
+	KRBCred        = 22
+	EncASRepPart   = 25
+	EncTGSRepPart  = 26
+	EncAPRepPart   = 27
+	EncKrbPrivPart = 28
+	EncKrbCredPart = 29
+	KRBError       = 30
+)
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/chksumtype/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/chksumtype/constants.go
new file mode 100644
index 0000000..93db952
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/chksumtype/constants.go
@@ -0,0 +1,32 @@
+// Package chksumtype provides Kerberos 5 checksum type assigned numbers.
+package chksumtype
+
+// Checksum type IDs.
+const (
+	//RESERVED : 0
+	CRC32         int32 = 1
+	RSA_MD4       int32 = 2
+	RSA_MD4_DES   int32 = 3
+	DES_MAC       int32 = 4
+	DES_MAC_K     int32 = 5
+	RSA_MD4_DES_K int32 = 6
+	RSA_MD5       int32 = 7
+	RSA_MD5_DES   int32 = 8
+	RSA_MD5_DES3  int32 = 9
+	SHA1_ID10     int32 = 10
+	//UNASSIGNED : 11
+	HMAC_SHA1_DES3_KD      int32 = 12
+	HMAC_SHA1_DES3         int32 = 13
+	SHA1_ID14              int32 = 14
+	HMAC_SHA1_96_AES128    int32 = 15
+	HMAC_SHA1_96_AES256    int32 = 16
+	CMAC_CAMELLIA128       int32 = 17
+	CMAC_CAMELLIA256       int32 = 18
+	HMAC_SHA256_128_AES128 int32 = 19
+	HMAC_SHA384_192_AES256 int32 = 20
+	//UNASSIGNED : 21-32770
+	GSSAPI int32 = 32771
+	//UNASSIGNED : 32772-2147483647
+	KERB_CHECKSUM_HMAC_MD5_UNSIGNED uint32 = 4294967158 // 0xFFFFFF76 documentation says this is -138 but in an unsigned int this is 4294967158
+	KERB_CHECKSUM_HMAC_MD5          int32  = -138
+)
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/constants.go
new file mode 100644
index 0000000..0b8e916
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/constants.go
@@ -0,0 +1,5 @@
+// Package iana provides Kerberos 5 assigned numbers.
+package iana
+
+// PVNO is the Protocol Version Number.
+const PVNO = 5
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/errorcode/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/errorcode/constants.go
new file mode 100644
index 0000000..fd756bc
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/errorcode/constants.go
@@ -0,0 +1,155 @@
+// Package errorcode provides Kerberos 5 assigned error codes.
+package errorcode
+
+import "fmt"
+
+// Kerberos error codes.
+const (
+	KDC_ERR_NONE                          int32 = 0  //No error
+	KDC_ERR_NAME_EXP                      int32 = 1  //Client's entry in database has expired
+	KDC_ERR_SERVICE_EXP                   int32 = 2  //Server's entry in database has expired
+	KDC_ERR_BAD_PVNO                      int32 = 3  //Requested protocol version number not supported
+	KDC_ERR_C_OLD_MAST_KVNO               int32 = 4  //Client's key encrypted in old master key
+	KDC_ERR_S_OLD_MAST_KVNO               int32 = 5  //Server's key encrypted in old master key
+	KDC_ERR_C_PRINCIPAL_UNKNOWN           int32 = 6  //Client not found in Kerberos database
+	KDC_ERR_S_PRINCIPAL_UNKNOWN           int32 = 7  //Server not found in Kerberos database
+	KDC_ERR_PRINCIPAL_NOT_UNIQUE          int32 = 8  //Multiple principal entries in database
+	KDC_ERR_NULL_KEY                      int32 = 9  //The client or server has a null key
+	KDC_ERR_CANNOT_POSTDATE               int32 = 10 //Ticket not eligible for  postdating
+	KDC_ERR_NEVER_VALID                   int32 = 11 //Requested starttime is later than end time
+	KDC_ERR_POLICY                        int32 = 12 //KDC policy rejects request
+	KDC_ERR_BADOPTION                     int32 = 13 //KDC cannot accommodate requested option
+	KDC_ERR_ETYPE_NOSUPP                  int32 = 14 //KDC has no support for  encryption type
+	KDC_ERR_SUMTYPE_NOSUPP                int32 = 15 //KDC has no support for  checksum type
+	KDC_ERR_PADATA_TYPE_NOSUPP            int32 = 16 //KDC has no support for  padata type
+	KDC_ERR_TRTYPE_NOSUPP                 int32 = 17 //KDC has no support for  transited type
+	KDC_ERR_CLIENT_REVOKED                int32 = 18 //Clients credentials have been revoked
+	KDC_ERR_SERVICE_REVOKED               int32 = 19 //Credentials for server have been revoked
+	KDC_ERR_TGT_REVOKED                   int32 = 20 //TGT has been revoked
+	KDC_ERR_CLIENT_NOTYET                 int32 = 21 //Client not yet valid; try again later
+	KDC_ERR_SERVICE_NOTYET                int32 = 22 //Server not yet valid; try again later
+	KDC_ERR_KEY_EXPIRED                   int32 = 23 //Password has expired; change password to reset
+	KDC_ERR_PREAUTH_FAILED                int32 = 24 //Pre-authentication information was invalid
+	KDC_ERR_PREAUTH_REQUIRED              int32 = 25 //Additional pre-authentication required
+	KDC_ERR_SERVER_NOMATCH                int32 = 26 //Requested server and ticket don't match
+	KDC_ERR_MUST_USE_USER2USER            int32 = 27 //Server principal valid for  user2user only
+	KDC_ERR_PATH_NOT_ACCEPTED             int32 = 28 //KDC Policy rejects transited path
+	KDC_ERR_SVC_UNAVAILABLE               int32 = 29 //A service is not available
+	KRB_AP_ERR_BAD_INTEGRITY              int32 = 31 //Integrity check on decrypted field failed
+	KRB_AP_ERR_TKT_EXPIRED                int32 = 32 //Ticket expired
+	KRB_AP_ERR_TKT_NYV                    int32 = 33 //Ticket not yet valid
+	KRB_AP_ERR_REPEAT                     int32 = 34 //Request is a replay
+	KRB_AP_ERR_NOT_US                     int32 = 35 //The ticket isn't for us
+	KRB_AP_ERR_BADMATCH                   int32 = 36 //Ticket and authenticator don't match
+	KRB_AP_ERR_SKEW                       int32 = 37 //Clock skew too great
+	KRB_AP_ERR_BADADDR                    int32 = 38 //Incorrect net address
+	KRB_AP_ERR_BADVERSION                 int32 = 39 //Protocol version mismatch
+	KRB_AP_ERR_MSG_TYPE                   int32 = 40 //Invalid msg type
+	KRB_AP_ERR_MODIFIED                   int32 = 41 //Message stream modified
+	KRB_AP_ERR_BADORDER                   int32 = 42 //Message out of order
+	KRB_AP_ERR_BADKEYVER                  int32 = 44 //Specified version of key is not available
+	KRB_AP_ERR_NOKEY                      int32 = 45 //Service key not available
+	KRB_AP_ERR_MUT_FAIL                   int32 = 46 //Mutual authentication failed
+	KRB_AP_ERR_BADDIRECTION               int32 = 47 //Incorrect message direction
+	KRB_AP_ERR_METHOD                     int32 = 48 //Alternative authentication method required
+	KRB_AP_ERR_BADSEQ                     int32 = 49 //Incorrect sequence number in message
+	KRB_AP_ERR_INAPP_CKSUM                int32 = 50 //Inappropriate type of checksum in message
+	KRB_AP_PATH_NOT_ACCEPTED              int32 = 51 //Policy rejects transited path
+	KRB_ERR_RESPONSE_TOO_BIG              int32 = 52 //Response too big for UDP;  retry with TCP
+	KRB_ERR_GENERIC                       int32 = 60 //Generic error (description in e-text)
+	KRB_ERR_FIELD_TOOLONG                 int32 = 61 //Field is too long for this implementation
+	KDC_ERROR_CLIENT_NOT_TRUSTED          int32 = 62 //Reserved for PKINIT
+	KDC_ERROR_KDC_NOT_TRUSTED             int32 = 63 //Reserved for PKINIT
+	KDC_ERROR_INVALID_SIG                 int32 = 64 //Reserved for PKINIT
+	KDC_ERR_KEY_TOO_WEAK                  int32 = 65 //Reserved for PKINIT
+	KDC_ERR_CERTIFICATE_MISMATCH          int32 = 66 //Reserved for PKINIT
+	KRB_AP_ERR_NO_TGT                     int32 = 67 //No TGT available to validate USER-TO-USER
+	KDC_ERR_WRONG_REALM                   int32 = 68 //Reserved for future use
+	KRB_AP_ERR_USER_TO_USER_REQUIRED      int32 = 69 //Ticket must be for  USER-TO-USER
+	KDC_ERR_CANT_VERIFY_CERTIFICATE       int32 = 70 //Reserved for PKINIT
+	KDC_ERR_INVALID_CERTIFICATE           int32 = 71 //Reserved for PKINIT
+	KDC_ERR_REVOKED_CERTIFICATE           int32 = 72 //Reserved for PKINIT
+	KDC_ERR_REVOCATION_STATUS_UNKNOWN     int32 = 73 //Reserved for PKINIT
+	KDC_ERR_REVOCATION_STATUS_UNAVAILABLE int32 = 74 //Reserved for PKINIT
+	KDC_ERR_CLIENT_NAME_MISMATCH          int32 = 75 //Reserved for PKINIT
+	KDC_ERR_KDC_NAME_MISMATCH             int32 = 76 //Reserved for PKINIT
+)
+
+// Lookup an error code description.
+func Lookup(i int32) string {
+	if s, ok := errorcodeLookup[i]; ok {
+		return fmt.Sprintf("(%d) %s", i, s)
+	}
+	return fmt.Sprintf("Unknown ErrorCode %d", i)
+}
+
+var errorcodeLookup = map[int32]string{
+	KDC_ERR_NONE:                          "KDC_ERR_NONE No error",
+	KDC_ERR_NAME_EXP:                      "KDC_ERR_NAME_EXP Client's entry in database has expired",
+	KDC_ERR_SERVICE_EXP:                   "KDC_ERR_SERVICE_EXP Server's entry in database has expired",
+	KDC_ERR_BAD_PVNO:                      "KDC_ERR_BAD_PVNO Requested protocol version number not supported",
+	KDC_ERR_C_OLD_MAST_KVNO:               "KDC_ERR_C_OLD_MAST_KVNO Client's key encrypted in old master key",
+	KDC_ERR_S_OLD_MAST_KVNO:               "KDC_ERR_S_OLD_MAST_KVNO Server's key encrypted in old master key",
+	KDC_ERR_C_PRINCIPAL_UNKNOWN:           "KDC_ERR_C_PRINCIPAL_UNKNOWN Client not found in Kerberos database",
+	KDC_ERR_S_PRINCIPAL_UNKNOWN:           "KDC_ERR_S_PRINCIPAL_UNKNOWN Server not found in Kerberos database",
+	KDC_ERR_PRINCIPAL_NOT_UNIQUE:          "KDC_ERR_PRINCIPAL_NOT_UNIQUE Multiple principal entries in database",
+	KDC_ERR_NULL_KEY:                      "KDC_ERR_NULL_KEY The client or server has a null key",
+	KDC_ERR_CANNOT_POSTDATE:               "KDC_ERR_CANNOT_POSTDATE Ticket not eligible for postdating",
+	KDC_ERR_NEVER_VALID:                   "KDC_ERR_NEVER_VALID Requested starttime is later than end time",
+	KDC_ERR_POLICY:                        "KDC_ERR_POLICY KDC policy rejects request",
+	KDC_ERR_BADOPTION:                     "KDC_ERR_BADOPTION KDC cannot accommodate requested option",
+	KDC_ERR_ETYPE_NOSUPP:                  "KDC_ERR_ETYPE_NOSUPP KDC has no support for encryption type",
+	KDC_ERR_SUMTYPE_NOSUPP:                "KDC_ERR_SUMTYPE_NOSUPP KDC has no support for checksum type",
+	KDC_ERR_PADATA_TYPE_NOSUPP:            "KDC_ERR_PADATA_TYPE_NOSUPP KDC has no support for padata type",
+	KDC_ERR_TRTYPE_NOSUPP:                 "KDC_ERR_TRTYPE_NOSUPP KDC has no support for transited type",
+	KDC_ERR_CLIENT_REVOKED:                "KDC_ERR_CLIENT_REVOKED Clients credentials have been revoked",
+	KDC_ERR_SERVICE_REVOKED:               "KDC_ERR_SERVICE_REVOKED Credentials for server have been revoked",
+	KDC_ERR_TGT_REVOKED:                   "KDC_ERR_TGT_REVOKED TGT has been revoked",
+	KDC_ERR_CLIENT_NOTYET:                 "KDC_ERR_CLIENT_NOTYET Client not yet valid; try again later",
+	KDC_ERR_SERVICE_NOTYET:                "KDC_ERR_SERVICE_NOTYET Server not yet valid; try again later",
+	KDC_ERR_KEY_EXPIRED:                   "KDC_ERR_KEY_EXPIRED Password has expired; change password to reset",
+	KDC_ERR_PREAUTH_FAILED:                "KDC_ERR_PREAUTH_FAILED Pre-authentication information was invalid",
+	KDC_ERR_PREAUTH_REQUIRED:              "KDC_ERR_PREAUTH_REQUIRED Additional pre-authentication required",
+	KDC_ERR_SERVER_NOMATCH:                "KDC_ERR_SERVER_NOMATCH Requested server and ticket don't match",
+	KDC_ERR_MUST_USE_USER2USER:            "KDC_ERR_MUST_USE_USER2USER Server principal valid for  user2user only",
+	KDC_ERR_PATH_NOT_ACCEPTED:             "KDC_ERR_PATH_NOT_ACCEPTED KDC Policy rejects transited path",
+	KDC_ERR_SVC_UNAVAILABLE:               "KDC_ERR_SVC_UNAVAILABLE A service is not available",
+	KRB_AP_ERR_BAD_INTEGRITY:              "KRB_AP_ERR_BAD_INTEGRITY Integrity check on decrypted field failed",
+	KRB_AP_ERR_TKT_EXPIRED:                "KRB_AP_ERR_TKT_EXPIRED Ticket expired",
+	KRB_AP_ERR_TKT_NYV:                    "KRB_AP_ERR_TKT_NYV Ticket not yet valid",
+	KRB_AP_ERR_REPEAT:                     "KRB_AP_ERR_REPEAT Request is a replay",
+	KRB_AP_ERR_NOT_US:                     "KRB_AP_ERR_NOT_US The ticket isn't for us",
+	KRB_AP_ERR_BADMATCH:                   "KRB_AP_ERR_BADMATCH Ticket and authenticator don't match",
+	KRB_AP_ERR_SKEW:                       "KRB_AP_ERR_SKEW Clock skew too great",
+	KRB_AP_ERR_BADADDR:                    "KRB_AP_ERR_BADADDR Incorrect net address",
+	KRB_AP_ERR_BADVERSION:                 "KRB_AP_ERR_BADVERSION Protocol version mismatch",
+	KRB_AP_ERR_MSG_TYPE:                   "KRB_AP_ERR_MSG_TYPE Invalid msg type",
+	KRB_AP_ERR_MODIFIED:                   "KRB_AP_ERR_MODIFIED Message stream modified",
+	KRB_AP_ERR_BADORDER:                   "KRB_AP_ERR_BADORDER Message out of order",
+	KRB_AP_ERR_BADKEYVER:                  "KRB_AP_ERR_BADKEYVER Specified version of key is not available",
+	KRB_AP_ERR_NOKEY:                      "KRB_AP_ERR_NOKEY Service key not available",
+	KRB_AP_ERR_MUT_FAIL:                   "KRB_AP_ERR_MUT_FAIL Mutual authentication failed",
+	KRB_AP_ERR_BADDIRECTION:               "KRB_AP_ERR_BADDIRECTION Incorrect message direction",
+	KRB_AP_ERR_METHOD:                     "KRB_AP_ERR_METHOD Alternative authentication method required",
+	KRB_AP_ERR_BADSEQ:                     "KRB_AP_ERR_BADSEQ Incorrect sequence number in message",
+	KRB_AP_ERR_INAPP_CKSUM:                "KRB_AP_ERR_INAPP_CKSUM Inappropriate type of checksum in message",
+	KRB_AP_PATH_NOT_ACCEPTED:              "KRB_AP_PATH_NOT_ACCEPTED Policy rejects transited path",
+	KRB_ERR_RESPONSE_TOO_BIG:              "KRB_ERR_RESPONSE_TOO_BIG Response too big for UDP; retry with TCP",
+	KRB_ERR_GENERIC:                       "KRB_ERR_GENERIC Generic error (description in e-text)",
+	KRB_ERR_FIELD_TOOLONG:                 "KRB_ERR_FIELD_TOOLONG Field is too long for this implementation",
+	KDC_ERROR_CLIENT_NOT_TRUSTED:          "KDC_ERROR_CLIENT_NOT_TRUSTED Reserved for PKINIT",
+	KDC_ERROR_KDC_NOT_TRUSTED:             "KDC_ERROR_KDC_NOT_TRUSTED Reserved for PKINIT",
+	KDC_ERROR_INVALID_SIG:                 "KDC_ERROR_INVALID_SIG Reserved for PKINIT",
+	KDC_ERR_KEY_TOO_WEAK:                  "KDC_ERR_KEY_TOO_WEAK Reserved for PKINIT",
+	KDC_ERR_CERTIFICATE_MISMATCH:          "KDC_ERR_CERTIFICATE_MISMATCH Reserved for PKINIT",
+	KRB_AP_ERR_NO_TGT:                     "KRB_AP_ERR_NO_TGT No TGT available to validate USER-TO-USER",
+	KDC_ERR_WRONG_REALM:                   "KDC_ERR_WRONG_REALM Reserved for future use",
+	KRB_AP_ERR_USER_TO_USER_REQUIRED:      "KRB_AP_ERR_USER_TO_USER_REQUIRED Ticket must be for USER-TO-USER",
+	KDC_ERR_CANT_VERIFY_CERTIFICATE:       "KDC_ERR_CANT_VERIFY_CERTIFICATE Reserved for PKINIT",
+	KDC_ERR_INVALID_CERTIFICATE:           "KDC_ERR_INVALID_CERTIFICATE Reserved for PKINIT",
+	KDC_ERR_REVOKED_CERTIFICATE:           "KDC_ERR_REVOKED_CERTIFICATE Reserved for PKINIT",
+	KDC_ERR_REVOCATION_STATUS_UNKNOWN:     "KDC_ERR_REVOCATION_STATUS_UNKNOWN Reserved for PKINIT",
+	KDC_ERR_REVOCATION_STATUS_UNAVAILABLE: "KDC_ERR_REVOCATION_STATUS_UNAVAILABLE Reserved for PKINIT",
+	KDC_ERR_CLIENT_NAME_MISMATCH:          "KDC_ERR_CLIENT_NAME_MISMATCH Reserved for PKINIT",
+	KDC_ERR_KDC_NAME_MISMATCH:             "KDC_ERR_KDC_NAME_MISMATCH Reserved for PKINIT",
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/etypeID/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/etypeID/constants.go
new file mode 100644
index 0000000..46a0d74
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/etypeID/constants.go
@@ -0,0 +1,101 @@
+// Package etypeID provides Kerberos 5 encryption type assigned numbers.
+package etypeID
+
+// Kerberos encryption type assigned numbers.
+const (
+	//RESERVED : 0
+	DES_CBC_CRC                  int32 = 1
+	DES_CBC_MD4                  int32 = 2
+	DES_CBC_MD5                  int32 = 3
+	DES_CBC_RAW                  int32 = 4
+	DES3_CBC_MD5                 int32 = 5
+	DES3_CBC_RAW                 int32 = 6
+	DES3_CBC_SHA1                int32 = 7
+	DES_HMAC_SHA1                int32 = 8
+	DSAWITHSHA1_CMSOID           int32 = 9
+	MD5WITHRSAENCRYPTION_CMSOID  int32 = 10
+	SHA1WITHRSAENCRYPTION_CMSOID int32 = 11
+	RC2CBC_ENVOID                int32 = 12
+	RSAENCRYPTION_ENVOID         int32 = 13
+	RSAES_OAEP_ENV_OID           int32 = 14
+	DES_EDE3_CBC_ENV_OID         int32 = 15
+	DES3_CBC_SHA1_KD             int32 = 16
+	AES128_CTS_HMAC_SHA1_96      int32 = 17
+	AES256_CTS_HMAC_SHA1_96      int32 = 18
+	AES128_CTS_HMAC_SHA256_128   int32 = 19
+	AES256_CTS_HMAC_SHA384_192   int32 = 20
+	//UNASSIGNED : 21-22
+	RC4_HMAC             int32 = 23
+	RC4_HMAC_EXP         int32 = 24
+	CAMELLIA128_CTS_CMAC int32 = 25
+	CAMELLIA256_CTS_CMAC int32 = 26
+	//UNASSIGNED : 27-64
+	SUBKEY_KEYMATERIAL int32 = 65
+	//UNASSIGNED : 66-2147483647
+)
+
+// ETypesByName is a map of EncType names to their assigned EncType number.
+var ETypesByName = map[string]int32{
+	"des-cbc-crc":                  DES_CBC_CRC,
+	"des-cbc-md4":                  DES_CBC_MD4,
+	"des-cbc-md5":                  DES_CBC_MD5,
+	"des-cbc-raw":                  DES_CBC_RAW,
+	"des3-cbc-md5":                 DES3_CBC_MD5,
+	"des3-cbc-raw":                 DES3_CBC_RAW,
+	"des3-cbc-sha1":                DES3_CBC_SHA1,
+	"des3-hmac-sha1":               DES_HMAC_SHA1,
+	"des3-cbc-sha1-kd":             DES3_CBC_SHA1_KD,
+	"des-hmac-sha1":                DES_HMAC_SHA1,
+	"dsaWithSHA1-CmsOID":           DSAWITHSHA1_CMSOID,
+	"md5WithRSAEncryption-CmsOID":  MD5WITHRSAENCRYPTION_CMSOID,
+	"sha1WithRSAEncryption-CmsOID": SHA1WITHRSAENCRYPTION_CMSOID,
+	"rc2CBC-EnvOID":                RC2CBC_ENVOID,
+	"rsaEncryption-EnvOID":         RSAENCRYPTION_ENVOID,
+	"rsaES-OAEP-ENV-OID":           RSAES_OAEP_ENV_OID,
+	"des-ede3-cbc-Env-OID":         DES_EDE3_CBC_ENV_OID,
+	"aes128-cts-hmac-sha1-96":      AES128_CTS_HMAC_SHA1_96,
+	"aes128-cts":                   AES128_CTS_HMAC_SHA1_96,
+	"aes128-sha1":                  AES128_CTS_HMAC_SHA1_96,
+	"aes256-cts-hmac-sha1-96":      AES256_CTS_HMAC_SHA1_96,
+	"aes256-cts":                   AES256_CTS_HMAC_SHA1_96,
+	"aes256-sha1":                  AES256_CTS_HMAC_SHA1_96,
+	"aes128-cts-hmac-sha256-128":   AES128_CTS_HMAC_SHA256_128,
+	"aes128-sha2":                  AES128_CTS_HMAC_SHA256_128,
+	"aes256-cts-hmac-sha384-192":   AES256_CTS_HMAC_SHA384_192,
+	"aes256-sha2":                  AES256_CTS_HMAC_SHA384_192,
+	"arcfour-hmac":                 RC4_HMAC,
+	"rc4-hmac":                     RC4_HMAC,
+	"arcfour-hmac-md5":             RC4_HMAC,
+	"arcfour-hmac-exp":             RC4_HMAC_EXP,
+	"rc4-hmac-exp":                 RC4_HMAC_EXP,
+	"arcfour-hmac-md5-exp":         RC4_HMAC_EXP,
+	"camellia128-cts-cmac":         CAMELLIA128_CTS_CMAC,
+	"camellia128-cts":              CAMELLIA128_CTS_CMAC,
+	"camellia256-cts-cmac":         CAMELLIA256_CTS_CMAC,
+	"camellia256-cts":              CAMELLIA256_CTS_CMAC,
+	"subkey-keymaterial":           SUBKEY_KEYMATERIAL,
+}
+
+// EtypeSupported resolves the etype name string to the etype ID.
+// If zero is returned the etype is not supported by gokrb5.
+func EtypeSupported(etype string) int32 {
+	// Slice of supported enctype IDs
+	s := []int32{
+		AES128_CTS_HMAC_SHA1_96,
+		AES256_CTS_HMAC_SHA1_96,
+		AES128_CTS_HMAC_SHA256_128,
+		AES256_CTS_HMAC_SHA384_192,
+		DES3_CBC_SHA1_KD,
+		RC4_HMAC,
+	}
+	id := ETypesByName[etype]
+	if id == 0 {
+		return id
+	}
+	for _, sid := range s {
+		if id == sid {
+			return id
+		}
+	}
+	return 0
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/flags/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/flags/constants.go
new file mode 100644
index 0000000..787801f
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/flags/constants.go
@@ -0,0 +1,36 @@
+// Package flags provides Kerberos 5 flag assigned numbers.
+package flags
+
+// Flag values for KRB5 messages and tickets.
+const (
+	Reserved               = 0
+	Forwardable            = 1
+	Forwarded              = 2
+	Proxiable              = 3
+	Proxy                  = 4
+	AllowPostDate          = 5
+	MayPostDate            = 5
+	PostDated              = 6
+	Invalid                = 7
+	Renewable              = 8
+	Initial                = 9
+	PreAuthent             = 10
+	HWAuthent              = 11
+	OptHardwareAuth        = 11
+	RequestAnonymous       = 12
+	TransitedPolicyChecked = 12
+	OKAsDelegate           = 13
+	EncPARep               = 15
+	Canonicalize           = 15
+	DisableTransitedCheck  = 26
+	RenewableOK            = 27
+	EncTktInSkey           = 28
+	Renew                  = 30
+	Validate               = 31
+
+	// AP Option Flags
+	// 0 Reserved for future use.
+	APOptionUseSessionKey  = 1
+	APOptionMutualRequired = 2
+	// 3-31 Reserved for future use.
+)
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/keyusage/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/keyusage/constants.go
new file mode 100644
index 0000000..5b232d1
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/keyusage/constants.go
@@ -0,0 +1,42 @@
+// Package keyusage provides Kerberos 5 key usage assigned numbers.
+package keyusage
+
+// Key usage numbers.
+const (
+	AS_REQ_PA_ENC_TIMESTAMP                        = 1
+	KDC_REP_TICKET                                 = 2
+	AS_REP_ENCPART                                 = 3
+	TGS_REQ_KDC_REQ_BODY_AUTHDATA_SESSION_KEY      = 4
+	TGS_REQ_KDC_REQ_BODY_AUTHDATA_SUB_KEY          = 5
+	TGS_REQ_PA_TGS_REQ_AP_REQ_AUTHENTICATOR_CHKSUM = 6
+	TGS_REQ_PA_TGS_REQ_AP_REQ_AUTHENTICATOR        = 7
+	TGS_REP_ENCPART_SESSION_KEY                    = 8
+	TGS_REP_ENCPART_AUTHENTICATOR_SUB_KEY          = 9
+	AP_REQ_AUTHENTICATOR_CHKSUM                    = 10
+	AP_REQ_AUTHENTICATOR                           = 11
+	AP_REP_ENCPART                                 = 12
+	KRB_PRIV_ENCPART                               = 13
+	KRB_CRED_ENCPART                               = 14
+	KRB_SAFE_CHKSUM                                = 15
+	KERB_NON_KERB_SALT                             = 16
+	KERB_NON_KERB_CKSUM_SALT                       = 17
+	//18.  Reserved for future use in Kerberos and related protocols.
+	AD_KDC_ISSUED_CHKSUM = 19
+	//20-21.  Reserved for future use in Kerberos and related protocols.
+	GSSAPI_ACCEPTOR_SEAL           = 22
+	GSSAPI_ACCEPTOR_SIGN           = 23
+	GSSAPI_INITIATOR_SEAL          = 24
+	GSSAPI_INITIATOR_SIGN          = 25
+	KEY_USAGE_FAST_REQ_CHKSUM      = 50
+	KEY_USAGE_FAST_ENC             = 51
+	KEY_USAGE_FAST_REP             = 52
+	KEY_USAGE_FAST_FINISHED        = 53
+	KEY_USAGE_ENC_CHALLENGE_CLIENT = 54
+	KEY_USAGE_ENC_CHALLENGE_KDC    = 55
+	KEY_USAGE_AS_REQ               = 56
+	//26-511.  Reserved for future use in Kerberos and related protocols.
+	//512-1023.  Reserved for uses internal to a Kerberos implementation.
+	//1024.  Encryption for application use in protocols that do not specify key usage values
+	//1025.  Checksums for application use in protocols that do not specify key usage values
+	//1026-2047.  Reserved for application use.
+)
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/msgtype/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/msgtype/constants.go
new file mode 100644
index 0000000..ad21810
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/msgtype/constants.go
@@ -0,0 +1,18 @@
+// Package msgtype provides Kerberos 5 message type assigned numbers.
+package msgtype
+
+// KRB message type IDs.
+const (
+	KRB_AS_REQ     = 10 //Request for initial authentication
+	KRB_AS_REP     = 11 //Response to KRB_AS_REQ request
+	KRB_TGS_REQ    = 12 //Request for authentication based on TGT
+	KRB_TGS_REP    = 13 //Response to KRB_TGS_REQ request
+	KRB_AP_REQ     = 14 //Application request to server
+	KRB_AP_REP     = 15 //Response to KRB_AP_REQ_MUTUAL
+	KRB_RESERVED16 = 16 //Reserved for user-to-user krb_tgt_request
+	KRB_RESERVED17 = 17 //Reserved for user-to-user krb_tgt_reply
+	KRB_SAFE       = 20 // Safe (checksummed) application message
+	KRB_PRIV       = 21 // Private (encrypted) application message
+	KRB_CRED       = 22 //Private (encrypted) message to forward credentials
+	KRB_ERROR      = 30 //Error response
+)
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/nametype/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/nametype/constants.go
new file mode 100644
index 0000000..c111a05
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/nametype/constants.go
@@ -0,0 +1,15 @@
+// Package nametype provides Kerberos 5 principal name type numbers.
+package nametype
+
+// Kerberos name type IDs.
+const (
+	KRB_NT_UNKNOWN        int32 = 0  //Name type not known
+	KRB_NT_PRINCIPAL      int32 = 1  //Just the name of the principal as in DCE,  or for users
+	KRB_NT_SRV_INST       int32 = 2  //Service and other unique instance (krbtgt)
+	KRB_NT_SRV_HST        int32 = 3  //Service with host name as instance (telnet, rcommands)
+	KRB_NT_SRV_XHST       int32 = 4  //Service with host as remaining components
+	KRB_NT_UID            int32 = 5  //Unique ID
+	KRB_NT_X500_PRINCIPAL int32 = 6  //Encoded X.509 Distinguished name [RFC2253]
+	KRB_NT_SMTP_NAME      int32 = 7  //Name in form of SMTP email name (e.g., user@example.com)
+	KRB_NT_ENTERPRISE     int32 = 10 //Enterprise name; may be mapped to principal name
+)
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/iana/patype/constants.go b/vendor/github.com/jcmturner/gokrb5/v8/iana/patype/constants.go
new file mode 100644
index 0000000..aa04f63
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/iana/patype/constants.go
@@ -0,0 +1,77 @@
+// Package patype provides Kerberos 5 pre-authentication type assigned numbers.
+package patype
+
+// Kerberos pre-authentication type assigned numbers.
+const (
+	PA_TGS_REQ       int32 = 1
+	PA_ENC_TIMESTAMP int32 = 2
+	PA_PW_SALT       int32 = 3
+	//RESERVED : 4
+	PA_ENC_UNIX_TIME       int32 = 5
+	PA_SANDIA_SECUREID     int32 = 6
+	PA_SESAME              int32 = 7
+	PA_OSF_DCE             int32 = 8
+	PA_CYBERSAFE_SECUREID  int32 = 9
+	PA_AFS3_SALT           int32 = 10
+	PA_ETYPE_INFO          int32 = 11
+	PA_SAM_CHALLENGE       int32 = 12
+	PA_SAM_RESPONSE        int32 = 13
+	PA_PK_AS_REQ_OLD       int32 = 14
+	PA_PK_AS_REP_OLD       int32 = 15
+	PA_PK_AS_REQ           int32 = 16
+	PA_PK_AS_REP           int32 = 17
+	PA_PK_OCSP_RESPONSE    int32 = 18
+	PA_ETYPE_INFO2         int32 = 19
+	PA_USE_SPECIFIED_KVNO  int32 = 20
+	PA_SVR_REFERRAL_INFO   int32 = 20
+	PA_SAM_REDIRECT        int32 = 21
+	PA_GET_FROM_TYPED_DATA int32 = 22
+	TD_PADATA              int32 = 22
+	PA_SAM_ETYPE_INFO      int32 = 23
+	PA_ALT_PRINC           int32 = 24
+	PA_SERVER_REFERRAL     int32 = 25
+	//UNASSIGNED : 26-29
+	PA_SAM_CHALLENGE2 int32 = 30
+	PA_SAM_RESPONSE2  int32 = 31
+	//UNASSIGNED : 32-40
+	PA_EXTRA_TGT int32 = 41
+	//UNASSIGNED : 42-100
+	TD_PKINIT_CMS_CERTIFICATES int32 = 101
+	TD_KRB_PRINCIPAL           int32 = 102
+	TD_KRB_REALM               int32 = 103
+	TD_TRUSTED_CERTIFIERS      int32 = 104
+	TD_CERTIFICATE_INDEX       int32 = 105
+	TD_APP_DEFINED_ERROR       int32 = 106
+	TD_REQ_NONCE               int32 = 107
+	TD_REQ_SEQ                 int32 = 108
+	TD_DH_PARAMETERS           int32 = 109
+	//UNASSIGNED : 110
+	TD_CMS_DIGEST_ALGORITHMS  int32 = 111
+	TD_CERT_DIGEST_ALGORITHMS int32 = 112
+	//UNASSIGNED : 113-127
+	PA_PAC_REQUEST         int32 = 128
+	PA_FOR_USER            int32 = 129
+	PA_FOR_X509_USER       int32 = 130
+	PA_FOR_CHECK_DUPS      int32 = 131
+	PA_AS_CHECKSUM         int32 = 132
+	PA_FX_COOKIE           int32 = 133
+	PA_AUTHENTICATION_SET  int32 = 134
+	PA_AUTH_SET_SELECTED   int32 = 135
+	PA_FX_FAST             int32 = 136
+	PA_FX_ERROR            int32 = 137
+	PA_ENCRYPTED_CHALLENGE int32 = 138
+	//UNASSIGNED : 139-140
+	PA_OTP_CHALLENGE  int32 = 141
+	PA_OTP_REQUEST    int32 = 142
+	PA_OTP_CONFIRM    int32 = 143
+	PA_OTP_PIN_CHANGE int32 = 144
+	PA_EPAK_AS_REQ    int32 = 145
+	PA_EPAK_AS_REP    int32 = 146
+	PA_PKINIT_KX      int32 = 147
+	PA_PKU2U_NAME     int32 = 148
+	PA_REQ_ENC_PA_REP int32 = 149
+	PA_AS_FRESHNESS   int32 = 150
+	//UNASSIGNED : 151-164
+	PA_SUPPORTED_ETYPES int32 = 165
+	PA_EXTENDED_ERROR   int32 = 166
+)
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/kadmin/changepasswddata.go b/vendor/github.com/jcmturner/gokrb5/v8/kadmin/changepasswddata.go
new file mode 100644
index 0000000..2d68eda
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/kadmin/changepasswddata.go
@@ -0,0 +1,23 @@
+package kadmin
+
+import (
+	"github.com/jcmturner/gofork/encoding/asn1"
+	"github.com/jcmturner/gokrb5/v8/types"
+)
+
+// ChangePasswdData is the payload to a password change message.
+type ChangePasswdData struct {
+	NewPasswd []byte              `asn1:"explicit,tag:0"`
+	TargName  types.PrincipalName `asn1:"explicit,optional,tag:1"`
+	TargRealm string              `asn1:"generalstring,optional,explicit,tag:2"`
+}
+
+// Marshal ChangePasswdData into a byte slice.
+func (c *ChangePasswdData) Marshal() ([]byte, error) {
+	b, err := asn1.Marshal(*c)
+	if err != nil {
+		return []byte{}, err
+	}
+	//b = asn1tools.AddASNAppTag(b, asnAppTag.)
+	return b, nil
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/kadmin/message.go b/vendor/github.com/jcmturner/gokrb5/v8/kadmin/message.go
new file mode 100644
index 0000000..d1864c9
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/kadmin/message.go
@@ -0,0 +1,114 @@
+package kadmin
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"math"
+
+	"github.com/jcmturner/gokrb5/v8/messages"
+	"github.com/jcmturner/gokrb5/v8/types"
+)
+
+const (
+	verisonHex = "ff80"
+)
+
+// Request message for changing password.
+type Request struct {
+	APREQ   messages.APReq
+	KRBPriv messages.KRBPriv
+}
+
+// Reply message for a password change.
+type Reply struct {
+	MessageLength int
+	Version       int
+	APREPLength   int
+	APREP         messages.APRep
+	KRBPriv       messages.KRBPriv
+	KRBError      messages.KRBError
+	IsKRBError    bool
+	ResultCode    uint16
+	Result        string
+}
+
+// Marshal a Request into a byte slice.
+func (m *Request) Marshal() (b []byte, err error) {
+	b = []byte{255, 128} // protocol version number: contains the hex constant 0xff80 (big-endian integer).
+	ab, e := m.APREQ.Marshal()
+	if e != nil {
+		err = fmt.Errorf("error marshaling AP_REQ: %v", e)
+		return
+	}
+	if len(ab) > math.MaxUint16 {
+		err = errors.New("length of AP_REQ greater then max Uint16 size")
+		return
+	}
+	al := make([]byte, 2)
+	binary.BigEndian.PutUint16(al, uint16(len(ab)))
+	b = append(b, al...)
+	b = append(b, ab...)
+	pb, e := m.KRBPriv.Marshal()
+	if e != nil {
+		err = fmt.Errorf("error marshaling KRB_Priv: %v", e)
+		return
+	}
+	b = append(b, pb...)
+	if len(b)+2 > math.MaxUint16 {
+		err = errors.New("length of message greater then max Uint16 size")
+		return
+	}
+	ml := make([]byte, 2)
+	binary.BigEndian.PutUint16(ml, uint16(len(b)+2))
+	b = append(ml, b...)
+	return
+}
+
+// Unmarshal a byte slice into a Reply.
+func (m *Reply) Unmarshal(b []byte) error {
+	m.MessageLength = int(binary.BigEndian.Uint16(b[0:2]))
+	m.Version = int(binary.BigEndian.Uint16(b[2:4]))
+	if m.Version != 1 {
+		return fmt.Errorf("kadmin reply has incorrect protocol version number: %d", m.Version)
+	}
+	m.APREPLength = int(binary.BigEndian.Uint16(b[4:6]))
+	if m.APREPLength != 0 {
+		err := m.APREP.Unmarshal(b[6 : 6+m.APREPLength])
+		if err != nil {
+			return err
+		}
+		err = m.KRBPriv.Unmarshal(b[6+m.APREPLength : m.MessageLength])
+		if err != nil {
+			return err
+		}
+	} else {
+		m.IsKRBError = true
+		m.KRBError.Unmarshal(b[6:m.MessageLength])
+		m.ResultCode, m.Result = parseResponse(m.KRBError.EData)
+	}
+	return nil
+}
+
+func parseResponse(b []byte) (c uint16, s string) {
+	c = binary.BigEndian.Uint16(b[0:2])
+	buf := bytes.NewBuffer(b[2:])
+	m := make([]byte, len(b)-2)
+	binary.Read(buf, binary.BigEndian, &m)
+	s = string(m)
+	return
+}
+
+// Decrypt the encrypted part of the KRBError within the change password Reply.
+func (m *Reply) Decrypt(key types.EncryptionKey) error {
+	if m.IsKRBError {
+		return m.KRBError
+	}
+	err := m.KRBPriv.DecryptEncPart(key)
+	if err != nil {
+		return err
+	}
+	m.ResultCode, m.Result = parseResponse(m.KRBPriv.DecryptedEncPart.UserData)
+	return nil
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/kadmin/passwd.go b/vendor/github.com/jcmturner/gokrb5/v8/kadmin/passwd.go
new file mode 100644
index 0000000..db199bf
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/kadmin/passwd.go
@@ -0,0 +1,68 @@
+// Package kadmin provides Kerberos administration capabilities.
+package kadmin
+
+import (
+	"github.com/jcmturner/gokrb5/v8/crypto"
+	"github.com/jcmturner/gokrb5/v8/krberror"
+	"github.com/jcmturner/gokrb5/v8/messages"
+	"github.com/jcmturner/gokrb5/v8/types"
+)
+
+// ChangePasswdMsg generate a change password request and also return the key needed to decrypt the reply.
+func ChangePasswdMsg(cname types.PrincipalName, realm, password string, tkt messages.Ticket, sessionKey types.EncryptionKey) (r Request, k types.EncryptionKey, err error) {
+	// Create change password data struct and marshal to bytes
+	chgpasswd := ChangePasswdData{
+		NewPasswd: []byte(password),
+		TargName:  cname,
+		TargRealm: realm,
+	}
+	chpwdb, err := chgpasswd.Marshal()
+	if err != nil {
+		err = krberror.Errorf(err, krberror.KRBMsgError, "error marshaling change passwd data")
+		return
+	}
+
+	// Generate authenticator
+	auth, err := types.NewAuthenticator(realm, cname)
+	if err != nil {
+		err = krberror.Errorf(err, krberror.KRBMsgError, "error generating new authenticator")
+		return
+	}
+	etype, err := crypto.GetEtype(sessionKey.KeyType)
+	if err != nil {
+		err = krberror.Errorf(err, krberror.KRBMsgError, "error generating subkey etype")
+		return
+	}
+	err = auth.GenerateSeqNumberAndSubKey(etype.GetETypeID(), etype.GetKeyByteSize())
+	if err != nil {
+		err = krberror.Errorf(err, krberror.KRBMsgError, "error generating subkey")
+		return
+	}
+	k = auth.SubKey
+
+	// Generate AP_REQ
+	APreq, err := messages.NewAPReq(tkt, sessionKey, auth)
+	if err != nil {
+		return
+	}
+
+	// Form the KRBPriv encpart data
+	kp := messages.EncKrbPrivPart{
+		UserData:       chpwdb,
+		Timestamp:      auth.CTime,
+		Usec:           auth.Cusec,
+		SequenceNumber: auth.SeqNumber,
+	}
+	kpriv := messages.NewKRBPriv(kp)
+	err = kpriv.EncryptEncPart(k)
+	if err != nil {
+		err = krberror.Errorf(err, krberror.EncryptingError, "error encrypting change passwd data")
+		return
+	}
+
+	r = Request{
+		APREQ:   APreq,
+		KRBPriv: kpriv,
+	}
+	return
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/keytab/keytab.go b/vendor/github.com/jcmturner/gokrb5/v8/keytab/keytab.go
new file mode 100644
index 0000000..5c2e9d7
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/keytab/keytab.go
@@ -0,0 +1,530 @@
+// Package keytab implements Kerberos keytabs: https://web.mit.edu/kerberos/krb5-devel/doc/formats/keytab_file_format.html.
+package keytab
+
+import (
+	"bytes"
+	"encoding/binary"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"strings"
+	"time"
+	"unsafe"
+
+	"github.com/jcmturner/gokrb5/v8/crypto"
+	"github.com/jcmturner/gokrb5/v8/types"
+)
+
+const (
+	keytabFirstByte byte = 05
+)
+
+// Keytab struct.
+type Keytab struct {
+	version uint8
+	Entries []entry
+}
+
+// Keytab entry struct.
+type entry struct {
+	Principal principal
+	Timestamp time.Time
+	KVNO8     uint8
+	Key       types.EncryptionKey
+	KVNO      uint32
+}
+
+func (e entry) String() string {
+	return fmt.Sprintf("% 4d %s %-56s %2d %-64x",
+		e.KVNO8,
+		e.Timestamp.Format("02/01/06 15:04:05"),
+		e.Principal.String(),
+		e.Key.KeyType,
+		e.Key.KeyValue,
+	)
+}
+
+// Keytab entry principal struct.
+type principal struct {
+	NumComponents int16 `json:"-"`
+	Realm         string
+	Components    []string
+	NameType      int32
+}
+
+func (p principal) String() string {
+	return fmt.Sprintf("%s@%s", strings.Join(p.Components, "/"), p.Realm)
+}
+
+// New creates new, empty Keytab type.
+func New() *Keytab {
+	var e []entry
+	return &Keytab{
+		version: 2,
+		Entries: e,
+	}
+}
+
+// GetEncryptionKey returns the EncryptionKey from the Keytab for the newest entry with the required kvno, etype and matching principal.
+// If the kvno is zero then the latest kvno will be returned. The kvno is also returned for
+func (kt *Keytab) GetEncryptionKey(princName types.PrincipalName, realm string, kvno int, etype int32) (types.EncryptionKey, int, error) {
+	var key types.EncryptionKey
+	var t time.Time
+	var kv int
+	for _, k := range kt.Entries {
+		if k.Principal.Realm == realm && len(k.Principal.Components) == len(princName.NameString) &&
+			k.Key.KeyType == etype &&
+			(k.KVNO == uint32(kvno) || kvno == 0) &&
+			k.Timestamp.After(t) {
+			p := true
+			for i, n := range k.Principal.Components {
+				if princName.NameString[i] != n {
+					p = false
+					break
+				}
+			}
+			if p {
+				key = k.Key
+				kv = int(k.KVNO)
+				t = k.Timestamp
+			}
+		}
+	}
+	if len(key.KeyValue) < 1 {
+		return key, 0, fmt.Errorf("matching key not found in keytab. Looking for %v realm: %v kvno: %v etype: %v", princName.NameString, realm, kvno, etype)
+	}
+	return key, kv, nil
+}
+
+// Create a new Keytab entry.
+func newEntry() entry {
+	var b []byte
+	return entry{
+		Principal: newPrincipal(),
+		Timestamp: time.Time{},
+		KVNO8:     0,
+		Key: types.EncryptionKey{
+			KeyType:  0,
+			KeyValue: b,
+		},
+		KVNO: 0,
+	}
+}
+
+func (kt Keytab) String() string {
+	var s string
+	s = `KVNO Timestamp         Principal                                                ET Key
+---- ----------------- -------------------------------------------------------- -- ----------------------------------------------------------------
+`
+	for _, entry := range kt.Entries {
+		s += entry.String() + "\n"
+	}
+	return s
+}
+
+// AddEntry adds an entry to the keytab. The password should be provided in plain text and it will be converted using the defined enctype to be stored.
+func (kt *Keytab) AddEntry(principalName, realm, password string, ts time.Time, KVNO uint8, encType int32) error {
+	// Generate a key from the password
+	princ, _ := types.ParseSPNString(principalName)
+	key, _, err := crypto.GetKeyFromPassword(password, princ, realm, encType, types.PADataSequence{})
+	if err != nil {
+		return err
+	}
+
+	// Populate the keytab entry principal
+	ktep := newPrincipal()
+	ktep.NumComponents = int16(len(princ.NameString))
+	if kt.version == 1 {
+		ktep.NumComponents += 1
+	}
+
+	ktep.Realm = realm
+	ktep.Components = princ.NameString
+	ktep.NameType = princ.NameType
+
+	// Populate the keytab entry
+	e := newEntry()
+	e.Principal = ktep
+	e.Timestamp = ts
+	e.KVNO8 = KVNO
+	e.KVNO = uint32(KVNO)
+	e.Key = key
+
+	kt.Entries = append(kt.Entries, e)
+	return nil
+}
+
+// Create a new principal.
+func newPrincipal() principal {
+	var c []string
+	return principal{
+		NumComponents: 0,
+		Realm:         "",
+		Components:    c,
+		NameType:      0,
+	}
+}
+
+// Load a Keytab file into a Keytab type.
+func Load(ktPath string) (*Keytab, error) {
+	kt := new(Keytab)
+	b, err := ioutil.ReadFile(ktPath)
+	if err != nil {
+		return kt, err
+	}
+	err = kt.Unmarshal(b)
+	return kt, err
+}
+
+// Marshal keytab into byte slice
+func (kt *Keytab) Marshal() ([]byte, error) {
+	b := []byte{keytabFirstByte, kt.version}
+	for _, e := range kt.Entries {
+		eb, err := e.marshal(int(kt.version))
+		if err != nil {
+			return b, err
+		}
+		b = append(b, eb...)
+	}
+	return b, nil
+}
+
+// Write the keytab bytes to io.Writer.
+// Returns the number of bytes written
+func (kt *Keytab) Write(w io.Writer) (int, error) {
+	b, err := kt.Marshal()
+	if err != nil {
+		return 0, fmt.Errorf("error marshaling keytab: %v", err)
+	}
+	return w.Write(b)
+}
+
+// Unmarshal byte slice of Keytab data into Keytab type.
+func (kt *Keytab) Unmarshal(b []byte) error {
+	if len(b) < 2 {
+		return fmt.Errorf("byte array is less than 2 bytes: %d", len(b))
+	}
+
+	//The first byte of the file always has the value 5
+	if b[0] != keytabFirstByte {
+		return errors.New("invalid keytab data. First byte does not equal 5")
+	}
+	//Get keytab version
+	//The 2nd byte contains the version number (1 or 2)
+	kt.version = b[1]
+	if kt.version != 1 && kt.version != 2 {
+		return errors.New("invalid keytab data. Keytab version is neither 1 nor 2")
+	}
+	//Version 1 of the file format uses native byte order for integer representations. Version 2 always uses big-endian byte order
+	var endian binary.ByteOrder
+	endian = binary.BigEndian
+	if kt.version == 1 && isNativeEndianLittle() {
+		endian = binary.LittleEndian
+	}
+	// n tracks position in the byte array
+	n := 2
+	l, err := readInt32(b, &n, &endian)
+	if err != nil {
+		return err
+	}
+	for l != 0 {
+		if l < 0 {
+			//Zero padded so skip over
+			l = l * -1
+			n = n + int(l)
+		} else {
+			if n < 0 {
+				return fmt.Errorf("%d can't be less than zero", n)
+			}
+			if n+int(l) > len(b) {
+				return fmt.Errorf("%s's length is less than %d", b, n+int(l))
+			}
+			eb := b[n : n+int(l)]
+			n = n + int(l)
+			ke := newEntry()
+			// p keeps track as to where we are in the byte stream
+			var p int
+			var err error
+			parsePrincipal(eb, &p, kt, &ke, &endian)
+			ke.Timestamp, err = readTimestamp(eb, &p, &endian)
+			if err != nil {
+				return err
+			}
+			rei8, err := readInt8(eb, &p, &endian)
+			if err != nil {
+				return err
+			}
+			ke.KVNO8 = uint8(rei8)
+			rei16, err := readInt16(eb, &p, &endian)
+			if err != nil {
+				return err
+			}
+			ke.Key.KeyType = int32(rei16)
+			rei16, err = readInt16(eb, &p, &endian)
+			if err != nil {
+				return err
+			}
+			kl := int(rei16)
+			ke.Key.KeyValue, err = readBytes(eb, &p, kl, &endian)
+			if err != nil {
+				return err
+			}
+			// The 32-bit key version overrides the 8-bit key version.
+			// If at least 4 bytes are left after the other fields are read and they are non-zero
+			// this indicates the 32-bit version is present.
+			if len(eb)-p >= 4 {
+				// The 32-bit key may be present
+				ri32, err := readInt32(eb, &p, &endian)
+				if err != nil {
+					return err
+				}
+				ke.KVNO = uint32(ri32)
+			}
+			if ke.KVNO == 0 {
+				// Handles if the value from the last 4 bytes was zero and also if there are not the 4 bytes present. Makes sense to put the same value here as KVNO8
+				ke.KVNO = uint32(ke.KVNO8)
+			}
+			// Add the entry to the keytab
+			kt.Entries = append(kt.Entries, ke)
+		}
+		// Check if there are still 4 bytes left to read
+		// Also check that n is greater than zero
+		if n < 0 || n > len(b) || len(b[n:]) < 4 {
+			break
+		}
+		// Read the size of the next entry
+		l, err = readInt32(b, &n, &endian)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (e entry) marshal(v int) ([]byte, error) {
+	var b []byte
+	pb, err := e.Principal.marshal(v)
+	if err != nil {
+		return b, err
+	}
+	b = append(b, pb...)
+
+	var endian binary.ByteOrder
+	endian = binary.BigEndian
+	if v == 1 && isNativeEndianLittle() {
+		endian = binary.LittleEndian
+	}
+
+	t := make([]byte, 9)
+	endian.PutUint32(t[0:4], uint32(e.Timestamp.Unix()))
+	t[4] = e.KVNO8
+	endian.PutUint16(t[5:7], uint16(e.Key.KeyType))
+	endian.PutUint16(t[7:9], uint16(len(e.Key.KeyValue)))
+	b = append(b, t...)
+
+	buf := new(bytes.Buffer)
+	err = binary.Write(buf, endian, e.Key.KeyValue)
+	if err != nil {
+		return b, err
+	}
+	b = append(b, buf.Bytes()...)
+
+	t = make([]byte, 4)
+	endian.PutUint32(t, e.KVNO)
+	b = append(b, t...)
+
+	// Add the length header
+	t = make([]byte, 4)
+	endian.PutUint32(t, uint32(len(b)))
+	b = append(t, b...)
+	return b, nil
+}
+
+// Parse the Keytab bytes of a principal into a Keytab entry's principal.
+func parsePrincipal(b []byte, p *int, kt *Keytab, ke *entry, e *binary.ByteOrder) error {
+	var err error
+	ke.Principal.NumComponents, err = readInt16(b, p, e)
+	if err != nil {
+		return err
+	}
+	if kt.version == 1 {
+		//In version 1 the number of components includes the realm. Minus 1 to make consistent with version 2
+		ke.Principal.NumComponents--
+	}
+	lenRealm, err := readInt16(b, p, e)
+	if err != nil {
+		return err
+	}
+	realmB, err := readBytes(b, p, int(lenRealm), e)
+	if err != nil {
+		return err
+	}
+	ke.Principal.Realm = string(realmB)
+	for i := 0; i < int(ke.Principal.NumComponents); i++ {
+		l, err := readInt16(b, p, e)
+		if err != nil {
+			return err
+		}
+		compB, err := readBytes(b, p, int(l), e)
+		if err != nil {
+			return err
+		}
+		ke.Principal.Components = append(ke.Principal.Components, string(compB))
+	}
+	if kt.version != 1 {
+		//Name Type is omitted in version 1
+		ke.Principal.NameType, err = readInt32(b, p, e)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (p principal) marshal(v int) ([]byte, error) {
+	//var b []byte
+	b := make([]byte, 2)
+	var endian binary.ByteOrder
+	endian = binary.BigEndian
+	if v == 1 && isNativeEndianLittle() {
+		endian = binary.LittleEndian
+	}
+	endian.PutUint16(b[0:], uint16(p.NumComponents))
+	realm, err := marshalString(p.Realm, v)
+	if err != nil {
+		return b, err
+	}
+	b = append(b, realm...)
+	for _, c := range p.Components {
+		cb, err := marshalString(c, v)
+		if err != nil {
+			return b, err
+		}
+		b = append(b, cb...)
+	}
+	if v != 1 {
+		t := make([]byte, 4)
+		endian.PutUint32(t, uint32(p.NameType))
+		b = append(b, t...)
+	}
+	return b, nil
+}
+
+func marshalString(s string, v int) ([]byte, error) {
+	sb := []byte(s)
+	b := make([]byte, 2)
+	var endian binary.ByteOrder
+	endian = binary.BigEndian
+	if v == 1 && isNativeEndianLittle() {
+		endian = binary.LittleEndian
+	}
+	endian.PutUint16(b[0:], uint16(len(sb)))
+	buf := new(bytes.Buffer)
+	err := binary.Write(buf, endian, sb)
+	if err != nil {
+		return b, err
+	}
+	b = append(b, buf.Bytes()...)
+	return b, err
+}
+
+// Read bytes representing a timestamp.
+func readTimestamp(b []byte, p *int, e *binary.ByteOrder) (time.Time, error) {
+	i32, err := readInt32(b, p, e)
+	if err != nil {
+		return time.Time{}, err
+	}
+	return time.Unix(int64(i32), 0), nil
+}
+
+// Read bytes representing an eight bit integer.
+func readInt8(b []byte, p *int, e *binary.ByteOrder) (i int8, err error) {
+	if *p < 0 {
+		return 0, fmt.Errorf("%d cannot be less than zero", *p)
+	}
+
+	if (*p + 1) > len(b) {
+		return 0, fmt.Errorf("%s's length is less than %d", b, *p+1)
+	}
+	buf := bytes.NewBuffer(b[*p : *p+1])
+	binary.Read(buf, *e, &i)
+	*p++
+	return
+}
+
+// Read bytes representing a sixteen bit integer.
+func readInt16(b []byte, p *int, e *binary.ByteOrder) (i int16, err error) {
+	if *p < 0 {
+		return 0, fmt.Errorf("%d cannot be less than zero", *p)
+	}
+
+	if (*p + 2) > len(b) {
+		return 0, fmt.Errorf("%s's length is less than %d", b, *p+2)
+	}
+
+	buf := bytes.NewBuffer(b[*p : *p+2])
+	binary.Read(buf, *e, &i)
+	*p += 2
+	return
+}
+
+// Read bytes representing a thirty two bit integer.
+func readInt32(b []byte, p *int, e *binary.ByteOrder) (i int32, err error) {
+	if *p < 0 {
+		return 0, fmt.Errorf("%d cannot be less than zero", *p)
+	}
+
+	if (*p + 4) > len(b) {
+		return 0, fmt.Errorf("%s's length is less than %d", b, *p+4)
+	}
+
+	buf := bytes.NewBuffer(b[*p : *p+4])
+	binary.Read(buf, *e, &i)
+	*p += 4
+	return
+}
+
+func readBytes(b []byte, p *int, s int, e *binary.ByteOrder) ([]byte, error) {
+	if s < 0 {
+		return nil, fmt.Errorf("%d cannot be less than zero", s)
+	}
+	i := *p + s
+	if i > len(b) {
+		return nil, fmt.Errorf("%s's length is greater than %d", b, i)
+	}
+	buf := bytes.NewBuffer(b[*p:i])
+	r := make([]byte, s)
+	if err := binary.Read(buf, *e, &r); err != nil {
+		return nil, err
+	}
+	*p += s
+	return r, nil
+}
+
+func isNativeEndianLittle() bool {
+	var x = 0x012345678
+	var p = unsafe.Pointer(&x)
+	var bp = (*[4]byte)(p)
+
+	var endian bool
+	if 0x01 == bp[0] {
+		endian = false
+	} else if (0x78 & 0xff) == (bp[0] & 0xff) {
+		endian = true
+	} else {
+		// Default to big endian
+		endian = false
+	}
+	return endian
+}
+
+// JSON return information about the keys held in the keytab in a JSON format.
+func (kt *Keytab) JSON() (string, error) {
+	b, err := json.MarshalIndent(kt, "", "  ")
+	if err != nil {
+		return "", err
+	}
+	return string(b), nil
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/krberror/error.go b/vendor/github.com/jcmturner/gokrb5/v8/krberror/error.go
new file mode 100644
index 0000000..01c6d99
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/krberror/error.go
@@ -0,0 +1,67 @@
+// Package krberror provides error type and functions for gokrb5.
+package krberror
+
+import (
+	"fmt"
+	"strings"
+)
+
+// Error type descriptions.
+const (
+	separator       = " < "
+	EncodingError   = "Encoding_Error"
+	NetworkingError = "Networking_Error"
+	DecryptingError = "Decrypting_Error"
+	EncryptingError = "Encrypting_Error"
+	ChksumError     = "Checksum_Error"
+	KRBMsgError     = "KRBMessage_Handling_Error"
+	ConfigError     = "Configuration_Error"
+	KDCError        = "KDC_Error"
+)
+
+// Krberror is an error type for gokrb5
+type Krberror struct {
+	RootCause string
+	EText     []string
+}
+
+// Error function to implement the error interface.
+func (e Krberror) Error() string {
+	return fmt.Sprintf("[Root cause: %s] ", e.RootCause) + strings.Join(e.EText, separator)
+}
+
+// Add another error statement to the error.
+func (e *Krberror) Add(et string, s string) {
+	e.EText = append([]string{fmt.Sprintf("%s: %s", et, s)}, e.EText...)
+}
+
+// New creates a new instance of Krberror.
+func New(et, s string) Krberror {
+	return Krberror{
+		RootCause: et,
+		EText:     []string{s},
+	}
+}
+
+// Errorf appends to or creates a new Krberror.
+func Errorf(err error, et, format string, a ...interface{}) Krberror {
+	if e, ok := err.(Krberror); ok {
+		e.Add(et, fmt.Sprintf(format, a...))
+		return e
+	}
+	return NewErrorf(et, format+": %s", append(a, err)...)
+}
+
+// NewErrorf creates a new Krberror from a formatted string.
+func NewErrorf(et, format string, a ...interface{}) Krberror {
+	var s string
+	if len(a) > 0 {
+		s = fmt.Sprintf("%s: %s", et, fmt.Sprintf(format, a...))
+	} else {
+		s = fmt.Sprintf("%s: %s", et, format)
+	}
+	return Krberror{
+		RootCause: et,
+		EText:     []string{s},
+	}
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/messages/APRep.go b/vendor/github.com/jcmturner/gokrb5/v8/messages/APRep.go
new file mode 100644
index 0000000..555fb80
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/messages/APRep.go
@@ -0,0 +1,49 @@
+package messages
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/jcmturner/gofork/encoding/asn1"
+	"github.com/jcmturner/gokrb5/v8/iana/asnAppTag"
+	"github.com/jcmturner/gokrb5/v8/iana/msgtype"
+	"github.com/jcmturner/gokrb5/v8/krberror"
+	"github.com/jcmturner/gokrb5/v8/types"
+)
+
+// APRep implements RFC 4120 KRB_AP_REP: https://tools.ietf.org/html/rfc4120#section-5.5.2.
+type APRep struct {
+	PVNO    int                 `asn1:"explicit,tag:0"`
+	MsgType int                 `asn1:"explicit,tag:1"`
+	EncPart types.EncryptedData `asn1:"explicit,tag:2"`
+}
+
+// EncAPRepPart is the encrypted part of KRB_AP_REP.
+type EncAPRepPart struct {
+	CTime          time.Time           `asn1:"generalized,explicit,tag:0"`
+	Cusec          int                 `asn1:"explicit,tag:1"`
+	Subkey         types.EncryptionKey `asn1:"optional,explicit,tag:2"`
+	SequenceNumber int64               `asn1:"optional,explicit,tag:3"`
+}
+
+// Unmarshal bytes b into the APRep struct.
+func (a *APRep) Unmarshal(b []byte) error {
+	_, err := asn1.UnmarshalWithParams(b, a, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.APREP))
+	if err != nil {
+		return processUnmarshalReplyError(b, err)
+	}
+	expectedMsgType := msgtype.KRB_AP_REP
+	if a.MsgType != expectedMsgType {
+		return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_AP_REP. Expected: %v; Actual: %v", expectedMsgType, a.MsgType)
+	}
+	return nil
+}
+
+// Unmarshal bytes b into the APRep encrypted part struct.
+func (a *EncAPRepPart) Unmarshal(b []byte) error {
+	_, err := asn1.UnmarshalWithParams(b, a, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncAPRepPart))
+	if err != nil {
+		return krberror.Errorf(err, krberror.EncodingError, "AP_REP unmarshal error")
+	}
+	return nil
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/messages/APReq.go b/vendor/github.com/jcmturner/gokrb5/v8/messages/APReq.go
new file mode 100644
index 0000000..1836079
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/messages/APReq.go
@@ -0,0 +1,199 @@
+package messages
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/jcmturner/gofork/encoding/asn1"
+	"github.com/jcmturner/gokrb5/v8/asn1tools"
+	"github.com/jcmturner/gokrb5/v8/crypto"
+	"github.com/jcmturner/gokrb5/v8/iana"
+	"github.com/jcmturner/gokrb5/v8/iana/asnAppTag"
+	"github.com/jcmturner/gokrb5/v8/iana/errorcode"
+	"github.com/jcmturner/gokrb5/v8/iana/keyusage"
+	"github.com/jcmturner/gokrb5/v8/iana/msgtype"
+	"github.com/jcmturner/gokrb5/v8/keytab"
+	"github.com/jcmturner/gokrb5/v8/krberror"
+	"github.com/jcmturner/gokrb5/v8/types"
+)
+
+type marshalAPReq struct {
+	PVNO      int            `asn1:"explicit,tag:0"`
+	MsgType   int            `asn1:"explicit,tag:1"`
+	APOptions asn1.BitString `asn1:"explicit,tag:2"`
+	// Ticket needs to be a raw value as it is wrapped in an APPLICATION tag
+	Ticket                 asn1.RawValue       `asn1:"explicit,tag:3"`
+	EncryptedAuthenticator types.EncryptedData `asn1:"explicit,tag:4"`
+}
+
+// APReq implements RFC 4120 KRB_AP_REQ: https://tools.ietf.org/html/rfc4120#section-5.5.1.
+type APReq struct {
+	PVNO                   int                 `asn1:"explicit,tag:0"`
+	MsgType                int                 `asn1:"explicit,tag:1"`
+	APOptions              asn1.BitString      `asn1:"explicit,tag:2"`
+	Ticket                 Ticket              `asn1:"explicit,tag:3"`
+	EncryptedAuthenticator types.EncryptedData `asn1:"explicit,tag:4"`
+	Authenticator          types.Authenticator `asn1:"optional"`
+}
+
+// NewAPReq generates a new KRB_AP_REQ struct.
+func NewAPReq(tkt Ticket, sessionKey types.EncryptionKey, auth types.Authenticator) (APReq, error) {
+	var a APReq
+	ed, err := encryptAuthenticator(auth, sessionKey, tkt)
+	if err != nil {
+		return a, krberror.Errorf(err, krberror.KRBMsgError, "error creating Authenticator for AP_REQ")
+	}
+	a = APReq{
+		PVNO:                   iana.PVNO,
+		MsgType:                msgtype.KRB_AP_REQ,
+		APOptions:              types.NewKrbFlags(),
+		Ticket:                 tkt,
+		EncryptedAuthenticator: ed,
+	}
+	return a, nil
+}
+
+// Encrypt Authenticator
+func encryptAuthenticator(a types.Authenticator, sessionKey types.EncryptionKey, tkt Ticket) (types.EncryptedData, error) {
+	var ed types.EncryptedData
+	m, err := a.Marshal()
+	if err != nil {
+		return ed, krberror.Errorf(err, krberror.EncodingError, "marshaling error of EncryptedData form of Authenticator")
+	}
+	usage := authenticatorKeyUsage(tkt.SName)
+	ed, err = crypto.GetEncryptedData(m, sessionKey, uint32(usage), tkt.EncPart.KVNO)
+	if err != nil {
+		return ed, krberror.Errorf(err, krberror.EncryptingError, "error encrypting Authenticator")
+	}
+	return ed, nil
+}
+
+// DecryptAuthenticator decrypts the Authenticator within the AP_REQ.
+// sessionKey may simply be the key within the decrypted EncPart of the ticket within the AP_REQ.
+func (a *APReq) DecryptAuthenticator(sessionKey types.EncryptionKey) error {
+	usage := authenticatorKeyUsage(a.Ticket.SName)
+	ab, e := crypto.DecryptEncPart(a.EncryptedAuthenticator, sessionKey, uint32(usage))
+	if e != nil {
+		return fmt.Errorf("error decrypting authenticator: %v", e)
+	}
+	err := a.Authenticator.Unmarshal(ab)
+	if err != nil {
+		return fmt.Errorf("error unmarshaling authenticator: %v", err)
+	}
+	return nil
+}
+
+func authenticatorKeyUsage(pn types.PrincipalName) int {
+	if pn.NameString[0] == "krbtgt" {
+		return keyusage.TGS_REQ_PA_TGS_REQ_AP_REQ_AUTHENTICATOR
+	}
+	return keyusage.AP_REQ_AUTHENTICATOR
+}
+
+// Unmarshal bytes b into the APReq struct.
+func (a *APReq) Unmarshal(b []byte) error {
+	var m marshalAPReq
+	_, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.APREQ))
+	if err != nil {
+		return krberror.Errorf(err, krberror.EncodingError, "unmarshal error of AP_REQ")
+	}
+	if m.MsgType != msgtype.KRB_AP_REQ {
+		return NewKRBError(types.PrincipalName{}, "", errorcode.KRB_AP_ERR_MSG_TYPE, errorcode.Lookup(errorcode.KRB_AP_ERR_MSG_TYPE))
+	}
+	a.PVNO = m.PVNO
+	a.MsgType = m.MsgType
+	a.APOptions = m.APOptions
+	a.EncryptedAuthenticator = m.EncryptedAuthenticator
+	a.Ticket, err = unmarshalTicket(m.Ticket.Bytes)
+	if err != nil {
+		return krberror.Errorf(err, krberror.EncodingError, "unmarshaling error of Ticket within AP_REQ")
+	}
+	return nil
+}
+
+// Marshal APReq struct.
+func (a *APReq) Marshal() ([]byte, error) {
+	m := marshalAPReq{
+		PVNO:                   a.PVNO,
+		MsgType:                a.MsgType,
+		APOptions:              a.APOptions,
+		EncryptedAuthenticator: a.EncryptedAuthenticator,
+	}
+	var b []byte
+	b, err := a.Ticket.Marshal()
+	if err != nil {
+		return b, err
+	}
+	m.Ticket = asn1.RawValue{
+		Class:      asn1.ClassContextSpecific,
+		IsCompound: true,
+		Tag:        3,
+		Bytes:      b,
+	}
+	mk, err := asn1.Marshal(m)
+	if err != nil {
+		return mk, krberror.Errorf(err, krberror.EncodingError, "marshaling error of AP_REQ")
+	}
+	mk = asn1tools.AddASNAppTag(mk, asnAppTag.APREQ)
+	return mk, nil
+}
+
+// Verify an AP_REQ using service's keytab, spn and max acceptable clock skew duration.
+// The service ticket encrypted part and authenticator will be decrypted as part of this operation.
+func (a *APReq) Verify(kt *keytab.Keytab, d time.Duration, cAddr types.HostAddress, snameOverride *types.PrincipalName) (bool, error) {
+	// Decrypt ticket's encrypted part with service key
+	//TODO decrypt with service's session key from its TGT is use-to-user. Need to figure out how to get TGT.
+	//if types.IsFlagSet(&a.APOptions, flags.APOptionUseSessionKey) {
+	//	err := a.Ticket.Decrypt(tgt.DecryptedEncPart.Key)
+	//	if err != nil {
+	//		return false, krberror.Errorf(err, krberror.DecryptingError, "error decrypting encpart of ticket provided using session key")
+	//	}
+	//} else {
+	//	err := a.Ticket.DecryptEncPart(*kt, &a.Ticket.SName)
+	//	if err != nil {
+	//		return false, krberror.Errorf(err, krberror.DecryptingError, "error decrypting encpart of service ticket provided")
+	//	}
+	//}
+	sname := &a.Ticket.SName
+	if snameOverride != nil {
+		sname = snameOverride
+	}
+	err := a.Ticket.DecryptEncPart(kt, sname)
+	if err != nil {
+		return false, krberror.Errorf(err, krberror.DecryptingError, "error decrypting encpart of service ticket provided")
+	}
+
+	// Check time validity of ticket
+	ok, err := a.Ticket.Valid(d)
+	if err != nil || !ok {
+		return ok, err
+	}
+
+	// Check client's address is listed in the client addresses in the ticket
+	if len(a.Ticket.DecryptedEncPart.CAddr) > 0 {
+		//If client addresses are present check if any of them match the source IP that sent the APReq
+		//If there is no match return KRB_AP_ERR_BADADDR error.
+		if !types.HostAddressesContains(a.Ticket.DecryptedEncPart.CAddr, cAddr) {
+			return false, NewKRBError(a.Ticket.SName, a.Ticket.Realm, errorcode.KRB_AP_ERR_BADADDR, "client address not within the list contained in the service ticket")
+		}
+	}
+
+	// Decrypt authenticator with session key from ticket's encrypted part
+	err = a.DecryptAuthenticator(a.Ticket.DecryptedEncPart.Key)
+	if err != nil {
+		return false, NewKRBError(a.Ticket.SName, a.Ticket.Realm, errorcode.KRB_AP_ERR_BAD_INTEGRITY, "could not decrypt authenticator")
+	}
+
+	// Check CName in authenticator is the same as that in the ticket
+	if !a.Authenticator.CName.Equal(a.Ticket.DecryptedEncPart.CName) {
+		return false, NewKRBError(a.Ticket.SName, a.Ticket.Realm, errorcode.KRB_AP_ERR_BADMATCH, "CName in Authenticator does not match that in service ticket")
+	}
+
+	// Check the clock skew between the client and the service server
+	ct := a.Authenticator.CTime.Add(time.Duration(a.Authenticator.Cusec) * time.Microsecond)
+	t := time.Now().UTC()
+	if t.Sub(ct) > d || ct.Sub(t) > d {
+		return false, NewKRBError(a.Ticket.SName, a.Ticket.Realm, errorcode.KRB_AP_ERR_SKEW, fmt.Sprintf("clock skew with client too large. greater than %v seconds", d))
+	}
+	return true, nil
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/messages/KDCRep.go b/vendor/github.com/jcmturner/gokrb5/v8/messages/KDCRep.go
new file mode 100644
index 0000000..69df9f0
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/messages/KDCRep.go
@@ -0,0 +1,360 @@
+package messages
+
+// Reference: https://www.ietf.org/rfc/rfc4120.txt
+// Section: 5.4.2
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/jcmturner/gofork/encoding/asn1"
+	"github.com/jcmturner/gokrb5/v8/asn1tools"
+	"github.com/jcmturner/gokrb5/v8/config"
+	"github.com/jcmturner/gokrb5/v8/credentials"
+	"github.com/jcmturner/gokrb5/v8/crypto"
+	"github.com/jcmturner/gokrb5/v8/iana/asnAppTag"
+	"github.com/jcmturner/gokrb5/v8/iana/flags"
+	"github.com/jcmturner/gokrb5/v8/iana/keyusage"
+	"github.com/jcmturner/gokrb5/v8/iana/msgtype"
+	"github.com/jcmturner/gokrb5/v8/iana/patype"
+	"github.com/jcmturner/gokrb5/v8/krberror"
+	"github.com/jcmturner/gokrb5/v8/types"
+)
+
+type marshalKDCRep struct {
+	PVNO    int                  `asn1:"explicit,tag:0"`
+	MsgType int                  `asn1:"explicit,tag:1"`
+	PAData  types.PADataSequence `asn1:"explicit,optional,tag:2"`
+	CRealm  string               `asn1:"generalstring,explicit,tag:3"`
+	CName   types.PrincipalName  `asn1:"explicit,tag:4"`
+	// Ticket needs to be a raw value as it is wrapped in an APPLICATION tag
+	Ticket  asn1.RawValue       `asn1:"explicit,tag:5"`
+	EncPart types.EncryptedData `asn1:"explicit,tag:6"`
+}
+
+// KDCRepFields represents the KRB_KDC_REP fields.
+type KDCRepFields struct {
+	PVNO             int
+	MsgType          int
+	PAData           []types.PAData
+	CRealm           string
+	CName            types.PrincipalName
+	Ticket           Ticket
+	EncPart          types.EncryptedData
+	DecryptedEncPart EncKDCRepPart
+}
+
+// ASRep implements RFC 4120 KRB_AS_REP: https://tools.ietf.org/html/rfc4120#section-5.4.2.
+type ASRep struct {
+	KDCRepFields
+}
+
+// TGSRep implements RFC 4120 KRB_TGS_REP: https://tools.ietf.org/html/rfc4120#section-5.4.2.
+type TGSRep struct {
+	KDCRepFields
+}
+
+// EncKDCRepPart is the encrypted part of KRB_KDC_REP.
+type EncKDCRepPart struct {
+	Key           types.EncryptionKey  `asn1:"explicit,tag:0"`
+	LastReqs      []LastReq            `asn1:"explicit,tag:1"`
+	Nonce         int                  `asn1:"explicit,tag:2"`
+	KeyExpiration time.Time            `asn1:"generalized,explicit,optional,tag:3"`
+	Flags         asn1.BitString       `asn1:"explicit,tag:4"`
+	AuthTime      time.Time            `asn1:"generalized,explicit,tag:5"`
+	StartTime     time.Time            `asn1:"generalized,explicit,optional,tag:6"`
+	EndTime       time.Time            `asn1:"generalized,explicit,tag:7"`
+	RenewTill     time.Time            `asn1:"generalized,explicit,optional,tag:8"`
+	SRealm        string               `asn1:"generalstring,explicit,tag:9"`
+	SName         types.PrincipalName  `asn1:"explicit,tag:10"`
+	CAddr         []types.HostAddress  `asn1:"explicit,optional,tag:11"`
+	EncPAData     types.PADataSequence `asn1:"explicit,optional,tag:12"`
+}
+
+// LastReq part of KRB_KDC_REP.
+type LastReq struct {
+	LRType  int32     `asn1:"explicit,tag:0"`
+	LRValue time.Time `asn1:"generalized,explicit,tag:1"`
+}
+
+// Unmarshal bytes b into the ASRep struct.
+func (k *ASRep) Unmarshal(b []byte) error {
+	var m marshalKDCRep
+	_, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.ASREP))
+	if err != nil {
+		return processUnmarshalReplyError(b, err)
+	}
+	if m.MsgType != msgtype.KRB_AS_REP {
+		return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate an AS_REP. Expected: %v; Actual: %v", msgtype.KRB_AS_REP, m.MsgType)
+	}
+	//Process the raw ticket within
+	tkt, err := unmarshalTicket(m.Ticket.Bytes)
+	if err != nil {
+		return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling Ticket within AS_REP")
+	}
+	k.KDCRepFields = KDCRepFields{
+		PVNO:    m.PVNO,
+		MsgType: m.MsgType,
+		PAData:  m.PAData,
+		CRealm:  m.CRealm,
+		CName:   m.CName,
+		Ticket:  tkt,
+		EncPart: m.EncPart,
+	}
+	return nil
+}
+
+// Marshal ASRep struct.
+func (k *ASRep) Marshal() ([]byte, error) {
+	m := marshalKDCRep{
+		PVNO:    k.PVNO,
+		MsgType: k.MsgType,
+		PAData:  k.PAData,
+		CRealm:  k.CRealm,
+		CName:   k.CName,
+		EncPart: k.EncPart,
+	}
+	b, err := k.Ticket.Marshal()
+	if err != nil {
+		return []byte{}, err
+	}
+	m.Ticket = asn1.RawValue{
+		Class:      asn1.ClassContextSpecific,
+		IsCompound: true,
+		Tag:        5,
+		Bytes:      b,
+	}
+	mk, err := asn1.Marshal(m)
+	if err != nil {
+		return mk, krberror.Errorf(err, krberror.EncodingError, "error marshaling AS_REP")
+	}
+	mk = asn1tools.AddASNAppTag(mk, asnAppTag.ASREP)
+	return mk, nil
+}
+
+// Unmarshal bytes b into the TGSRep struct.
+func (k *TGSRep) Unmarshal(b []byte) error {
+	var m marshalKDCRep
+	_, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.TGSREP))
+	if err != nil {
+		return processUnmarshalReplyError(b, err)
+	}
+	if m.MsgType != msgtype.KRB_TGS_REP {
+		return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate an TGS_REP. Expected: %v; Actual: %v", msgtype.KRB_TGS_REP, m.MsgType)
+	}
+	//Process the raw ticket within
+	tkt, err := unmarshalTicket(m.Ticket.Bytes)
+	if err != nil {
+		return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling Ticket within TGS_REP")
+	}
+	k.KDCRepFields = KDCRepFields{
+		PVNO:    m.PVNO,
+		MsgType: m.MsgType,
+		PAData:  m.PAData,
+		CRealm:  m.CRealm,
+		CName:   m.CName,
+		Ticket:  tkt,
+		EncPart: m.EncPart,
+	}
+	return nil
+}
+
+// Marshal TGSRep struct.
+func (k *TGSRep) Marshal() ([]byte, error) {
+	m := marshalKDCRep{
+		PVNO:    k.PVNO,
+		MsgType: k.MsgType,
+		PAData:  k.PAData,
+		CRealm:  k.CRealm,
+		CName:   k.CName,
+		EncPart: k.EncPart,
+	}
+	b, err := k.Ticket.Marshal()
+	if err != nil {
+		return []byte{}, err
+	}
+	m.Ticket = asn1.RawValue{
+		Class:      asn1.ClassContextSpecific,
+		IsCompound: true,
+		Tag:        5,
+		Bytes:      b,
+	}
+	mk, err := asn1.Marshal(m)
+	if err != nil {
+		return mk, krberror.Errorf(err, krberror.EncodingError, "error marshaling TGS_REP")
+	}
+	mk = asn1tools.AddASNAppTag(mk, asnAppTag.TGSREP)
+	return mk, nil
+}
+
+// Unmarshal bytes b into encrypted part of KRB_KDC_REP.
+func (e *EncKDCRepPart) Unmarshal(b []byte) error {
+	_, err := asn1.UnmarshalWithParams(b, e, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncASRepPart))
+	if err != nil {
+		// Try using tag 26
+		// Ref: RFC 4120 - mentions that some implementations use application tag number 26 wether or not the reply is
+		// a AS-REP or a TGS-REP.
+		_, err = asn1.UnmarshalWithParams(b, e, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncTGSRepPart))
+		if err != nil {
+			return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling encrypted part within KDC_REP")
+		}
+	}
+	return nil
+}
+
+// Marshal encrypted part of KRB_KDC_REP.
+func (e *EncKDCRepPart) Marshal() ([]byte, error) {
+	b, err := asn1.Marshal(*e)
+	if err != nil {
+		return b, krberror.Errorf(err, krberror.EncodingError, "marshaling error of AS_REP encpart")
+	}
+	b = asn1tools.AddASNAppTag(b, asnAppTag.EncASRepPart)
+	return b, nil
+}
+
+// DecryptEncPart decrypts the encrypted part of an AS_REP.
+func (k *ASRep) DecryptEncPart(c *credentials.Credentials) (types.EncryptionKey, error) {
+	var key types.EncryptionKey
+	var err error
+	if c.HasKeytab() {
+		key, _, err = c.Keytab().GetEncryptionKey(k.CName, k.CRealm, k.EncPart.KVNO, k.EncPart.EType)
+		if err != nil {
+			return key, krberror.Errorf(err, krberror.DecryptingError, "error decrypting AS_REP encrypted part")
+		}
+	}
+	if c.HasPassword() {
+		key, _, err = crypto.GetKeyFromPassword(c.Password(), k.CName, k.CRealm, k.EncPart.EType, k.PAData)
+		if err != nil {
+			return key, krberror.Errorf(err, krberror.DecryptingError, "error decrypting AS_REP encrypted part")
+		}
+	}
+	if !c.HasKeytab() && !c.HasPassword() {
+		return key, krberror.NewErrorf(krberror.DecryptingError, "no secret available in credentials to perform decryption of AS_REP encrypted part")
+	}
+	b, err := crypto.DecryptEncPart(k.EncPart, key, keyusage.AS_REP_ENCPART)
+	if err != nil {
+		return key, krberror.Errorf(err, krberror.DecryptingError, "error decrypting AS_REP encrypted part")
+	}
+	var denc EncKDCRepPart
+	err = denc.Unmarshal(b)
+	if err != nil {
+		return key, krberror.Errorf(err, krberror.EncodingError, "error unmarshaling decrypted encpart of AS_REP")
+	}
+	k.DecryptedEncPart = denc
+	return key, nil
+}
+
+// Verify checks the validity of AS_REP message.
+func (k *ASRep) Verify(cfg *config.Config, creds *credentials.Credentials, asReq ASReq) (bool, error) {
+	//Ref RFC 4120 Section 3.1.5
+	if !k.CName.Equal(asReq.ReqBody.CName) {
+		return false, krberror.NewErrorf(krberror.KRBMsgError, "CName in response does not match what was requested. Requested: %+v; Reply: %+v", asReq.ReqBody.CName, k.CName)
+	}
+	if k.CRealm != asReq.ReqBody.Realm {
+		return false, krberror.NewErrorf(krberror.KRBMsgError, "CRealm in response does not match what was requested. Requested: %s; Reply: %s", asReq.ReqBody.Realm, k.CRealm)
+	}
+	key, err := k.DecryptEncPart(creds)
+	if err != nil {
+		return false, krberror.Errorf(err, krberror.DecryptingError, "error decrypting EncPart of AS_REP")
+	}
+	if k.DecryptedEncPart.Nonce != asReq.ReqBody.Nonce {
+		return false, krberror.NewErrorf(krberror.KRBMsgError, "possible replay attack, nonce in response does not match that in request")
+	}
+	if !k.DecryptedEncPart.SName.Equal(asReq.ReqBody.SName) {
+		return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response does not match what was requested. Requested: %v; Reply: %v", asReq.ReqBody.SName, k.DecryptedEncPart.SName)
+	}
+	if k.DecryptedEncPart.SRealm != asReq.ReqBody.Realm {
+		return false, krberror.NewErrorf(krberror.KRBMsgError, "SRealm in response does not match what was requested. Requested: %s; Reply: %s", asReq.ReqBody.Realm, k.DecryptedEncPart.SRealm)
+	}
+	if len(asReq.ReqBody.Addresses) > 0 {
+		if !types.HostAddressesEqual(k.DecryptedEncPart.CAddr, asReq.ReqBody.Addresses) {
+			return false, krberror.NewErrorf(krberror.KRBMsgError, "addresses listed in the AS_REP does not match those listed in the AS_REQ")
+		}
+	}
+	t := time.Now().UTC()
+	if t.Sub(k.DecryptedEncPart.AuthTime) > cfg.LibDefaults.Clockskew || k.DecryptedEncPart.AuthTime.Sub(t) > cfg.LibDefaults.Clockskew {
+		return false, krberror.NewErrorf(krberror.KRBMsgError, "clock skew with KDC too large. Greater than %v seconds", cfg.LibDefaults.Clockskew.Seconds())
+	}
+	// RFC 6806 https://tools.ietf.org/html/rfc6806.html#section-11
+	if asReq.PAData.Contains(patype.PA_REQ_ENC_PA_REP) && types.IsFlagSet(&k.DecryptedEncPart.Flags, flags.EncPARep) {
+		if len(k.DecryptedEncPart.EncPAData) < 2 || !k.DecryptedEncPart.EncPAData.Contains(patype.PA_FX_FAST) {
+			return false, krberror.NewErrorf(krberror.KRBMsgError, "KDC did not respond appropriately to FAST negotiation")
+		}
+		for _, pa := range k.DecryptedEncPart.EncPAData {
+			if pa.PADataType == patype.PA_REQ_ENC_PA_REP {
+				var pafast types.PAReqEncPARep
+				err := pafast.Unmarshal(pa.PADataValue)
+				if err != nil {
+					return false, krberror.Errorf(err, krberror.EncodingError, "KDC FAST negotiation response error, could not unmarshal PA_REQ_ENC_PA_REP")
+				}
+				etype, err := crypto.GetChksumEtype(pafast.ChksumType)
+				if err != nil {
+					return false, krberror.Errorf(err, krberror.ChksumError, "KDC FAST negotiation response error")
+				}
+				ab, _ := asReq.Marshal()
+				if !etype.VerifyChecksum(key.KeyValue, ab, pafast.Chksum, keyusage.KEY_USAGE_AS_REQ) {
+					return false, krberror.Errorf(err, krberror.ChksumError, "KDC FAST negotiation response checksum invalid")
+				}
+			}
+		}
+	}
+	return true, nil
+}
+
+// DecryptEncPart decrypts the encrypted part of an TGS_REP.
+func (k *TGSRep) DecryptEncPart(key types.EncryptionKey) error {
+	b, err := crypto.DecryptEncPart(k.EncPart, key, keyusage.TGS_REP_ENCPART_SESSION_KEY)
+	if err != nil {
+		return krberror.Errorf(err, krberror.DecryptingError, "error decrypting TGS_REP EncPart")
+	}
+	var denc EncKDCRepPart
+	err = denc.Unmarshal(b)
+	if err != nil {
+		return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling encrypted part")
+	}
+	k.DecryptedEncPart = denc
+	return nil
+}
+
+// Verify checks the validity of the TGS_REP message.
+func (k *TGSRep) Verify(cfg *config.Config, tgsReq TGSReq) (bool, error) {
+	if !k.CName.Equal(tgsReq.ReqBody.CName) {
+		return false, krberror.NewErrorf(krberror.KRBMsgError, "CName in response does not match what was requested. Requested: %+v; Reply: %+v", tgsReq.ReqBody.CName, k.CName)
+	}
+	if k.Ticket.Realm != tgsReq.ReqBody.Realm {
+		return false, krberror.NewErrorf(krberror.KRBMsgError, "realm in response ticket does not match what was requested. Requested: %s; Reply: %s", tgsReq.ReqBody.Realm, k.Ticket.Realm)
+	}
+	if k.DecryptedEncPart.Nonce != tgsReq.ReqBody.Nonce {
+		return false, krberror.NewErrorf(krberror.KRBMsgError, "possible replay attack, nonce in response does not match that in request")
+	}
+	//if k.Ticket.SName.NameType != tgsReq.ReqBody.SName.NameType || k.Ticket.SName.NameString == nil {
+	//	return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response ticket does not match what was requested. Requested: %v; Reply: %v", tgsReq.ReqBody.SName, k.Ticket.SName)
+	//}
+	//for i := range k.Ticket.SName.NameString {
+	//	if k.Ticket.SName.NameString[i] != tgsReq.ReqBody.SName.NameString[i] {
+	//		return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response ticket does not match what was requested. Requested: %+v; Reply: %+v", tgsReq.ReqBody.SName, k.Ticket.SName)
+	//	}
+	//}
+	//if k.DecryptedEncPart.SName.NameType != tgsReq.ReqBody.SName.NameType || k.DecryptedEncPart.SName.NameString == nil {
+	//	return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response does not match what was requested. Requested: %v; Reply: %v", tgsReq.ReqBody.SName, k.DecryptedEncPart.SName)
+	//}
+	//for i := range k.DecryptedEncPart.SName.NameString {
+	//	if k.DecryptedEncPart.SName.NameString[i] != tgsReq.ReqBody.SName.NameString[i] {
+	//		return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response does not match what was requested. Requested: %+v; Reply: %+v", tgsReq.ReqBody.SName, k.DecryptedEncPart.SName)
+	//	}
+	//}
+	if k.DecryptedEncPart.SRealm != tgsReq.ReqBody.Realm {
+		return false, krberror.NewErrorf(krberror.KRBMsgError, "SRealm in response does not match what was requested. Requested: %s; Reply: %s", tgsReq.ReqBody.Realm, k.DecryptedEncPart.SRealm)
+	}
+	if len(k.DecryptedEncPart.CAddr) > 0 {
+		if !types.HostAddressesEqual(k.DecryptedEncPart.CAddr, tgsReq.ReqBody.Addresses) {
+			return false, krberror.NewErrorf(krberror.KRBMsgError, "addresses listed in the TGS_REP does not match those listed in the TGS_REQ")
+		}
+	}
+	if time.Since(k.DecryptedEncPart.StartTime) > cfg.LibDefaults.Clockskew || k.DecryptedEncPart.StartTime.Sub(time.Now().UTC()) > cfg.LibDefaults.Clockskew {
+		if time.Since(k.DecryptedEncPart.AuthTime) > cfg.LibDefaults.Clockskew || k.DecryptedEncPart.AuthTime.Sub(time.Now().UTC()) > cfg.LibDefaults.Clockskew {
+			return false, krberror.NewErrorf(krberror.KRBMsgError, "clock skew with KDC too large. Greater than %v seconds.", cfg.LibDefaults.Clockskew.Seconds())
+		}
+	}
+	return true, nil
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/messages/KDCReq.go b/vendor/github.com/jcmturner/gokrb5/v8/messages/KDCReq.go
new file mode 100644
index 0000000..3745afe
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/messages/KDCReq.go
@@ -0,0 +1,432 @@
+package messages
+
+// Reference: https://www.ietf.org/rfc/rfc4120.txt
+// Section: 5.4.1
+
+import (
+	"crypto/rand"
+	"fmt"
+	"math"
+	"math/big"
+	"time"
+
+	"github.com/jcmturner/gofork/encoding/asn1"
+	"github.com/jcmturner/gokrb5/v8/asn1tools"
+	"github.com/jcmturner/gokrb5/v8/config"
+	"github.com/jcmturner/gokrb5/v8/crypto"
+	"github.com/jcmturner/gokrb5/v8/iana"
+	"github.com/jcmturner/gokrb5/v8/iana/asnAppTag"
+	"github.com/jcmturner/gokrb5/v8/iana/flags"
+	"github.com/jcmturner/gokrb5/v8/iana/keyusage"
+	"github.com/jcmturner/gokrb5/v8/iana/msgtype"
+	"github.com/jcmturner/gokrb5/v8/iana/nametype"
+	"github.com/jcmturner/gokrb5/v8/iana/patype"
+	"github.com/jcmturner/gokrb5/v8/krberror"
+	"github.com/jcmturner/gokrb5/v8/types"
+)
+
+type marshalKDCReq struct {
+	PVNO    int                  `asn1:"explicit,tag:1"`
+	MsgType int                  `asn1:"explicit,tag:2"`
+	PAData  types.PADataSequence `asn1:"explicit,optional,tag:3"`
+	ReqBody asn1.RawValue        `asn1:"explicit,tag:4"`
+}
+
+// KDCReqFields represents the KRB_KDC_REQ fields.
+type KDCReqFields struct {
+	PVNO    int
+	MsgType int
+	PAData  types.PADataSequence
+	ReqBody KDCReqBody
+	Renewal bool
+}
+
+// ASReq implements RFC 4120 KRB_AS_REQ: https://tools.ietf.org/html/rfc4120#section-5.4.1.
+type ASReq struct {
+	KDCReqFields
+}
+
+// TGSReq implements RFC 4120 KRB_TGS_REQ: https://tools.ietf.org/html/rfc4120#section-5.4.1.
+type TGSReq struct {
+	KDCReqFields
+}
+
+type marshalKDCReqBody struct {
+	KDCOptions  asn1.BitString      `asn1:"explicit,tag:0"`
+	CName       types.PrincipalName `asn1:"explicit,optional,tag:1"`
+	Realm       string              `asn1:"generalstring,explicit,tag:2"`
+	SName       types.PrincipalName `asn1:"explicit,optional,tag:3"`
+	From        time.Time           `asn1:"generalized,explicit,optional,tag:4"`
+	Till        time.Time           `asn1:"generalized,explicit,tag:5"`
+	RTime       time.Time           `asn1:"generalized,explicit,optional,tag:6"`
+	Nonce       int                 `asn1:"explicit,tag:7"`
+	EType       []int32             `asn1:"explicit,tag:8"`
+	Addresses   []types.HostAddress `asn1:"explicit,optional,tag:9"`
+	EncAuthData types.EncryptedData `asn1:"explicit,optional,tag:10"`
+	// Ticket needs to be a raw value as it is wrapped in an APPLICATION tag
+	AdditionalTickets asn1.RawValue `asn1:"explicit,optional,tag:11"`
+}
+
+// KDCReqBody implements the KRB_KDC_REQ request body.
+type KDCReqBody struct {
+	KDCOptions        asn1.BitString      `asn1:"explicit,tag:0"`
+	CName             types.PrincipalName `asn1:"explicit,optional,tag:1"`
+	Realm             string              `asn1:"generalstring,explicit,tag:2"`
+	SName             types.PrincipalName `asn1:"explicit,optional,tag:3"`
+	From              time.Time           `asn1:"generalized,explicit,optional,tag:4"`
+	Till              time.Time           `asn1:"generalized,explicit,tag:5"`
+	RTime             time.Time           `asn1:"generalized,explicit,optional,tag:6"`
+	Nonce             int                 `asn1:"explicit,tag:7"`
+	EType             []int32             `asn1:"explicit,tag:8"`
+	Addresses         []types.HostAddress `asn1:"explicit,optional,tag:9"`
+	EncAuthData       types.EncryptedData `asn1:"explicit,optional,tag:10"`
+	AdditionalTickets []Ticket            `asn1:"explicit,optional,tag:11"`
+}
+
+// NewASReqForTGT generates a new KRB_AS_REQ struct for a TGT request.
+func NewASReqForTGT(realm string, c *config.Config, cname types.PrincipalName) (ASReq, error) {
+	sname := types.PrincipalName{
+		NameType:   nametype.KRB_NT_SRV_INST,
+		NameString: []string{"krbtgt", realm},
+	}
+	return NewASReq(realm, c, cname, sname)
+}
+
+// NewASReqForChgPasswd generates a new KRB_AS_REQ struct for a change password request.
+func NewASReqForChgPasswd(realm string, c *config.Config, cname types.PrincipalName) (ASReq, error) {
+	sname := types.PrincipalName{
+		NameType:   nametype.KRB_NT_PRINCIPAL,
+		NameString: []string{"kadmin", "changepw"},
+	}
+	return NewASReq(realm, c, cname, sname)
+}
+
+// NewASReq generates a new KRB_AS_REQ struct for a given SNAME.
+func NewASReq(realm string, c *config.Config, cname, sname types.PrincipalName) (ASReq, error) {
+	nonce, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt32))
+	if err != nil {
+		return ASReq{}, err
+	}
+	t := time.Now().UTC()
+	// Copy the default options to make this thread safe
+	kopts := types.NewKrbFlags()
+	copy(kopts.Bytes, c.LibDefaults.KDCDefaultOptions.Bytes)
+	kopts.BitLength = c.LibDefaults.KDCDefaultOptions.BitLength
+	a := ASReq{
+		KDCReqFields{
+			PVNO:    iana.PVNO,
+			MsgType: msgtype.KRB_AS_REQ,
+			PAData:  types.PADataSequence{},
+			ReqBody: KDCReqBody{
+				KDCOptions: kopts,
+				Realm:      realm,
+				CName:      cname,
+				SName:      sname,
+				Till:       t.Add(c.LibDefaults.TicketLifetime),
+				Nonce:      int(nonce.Int64()),
+				EType:      c.LibDefaults.DefaultTktEnctypeIDs,
+			},
+		},
+	}
+	if c.LibDefaults.Forwardable {
+		types.SetFlag(&a.ReqBody.KDCOptions, flags.Forwardable)
+	}
+	if c.LibDefaults.Canonicalize {
+		types.SetFlag(&a.ReqBody.KDCOptions, flags.Canonicalize)
+	}
+	if c.LibDefaults.Proxiable {
+		types.SetFlag(&a.ReqBody.KDCOptions, flags.Proxiable)
+	}
+	if c.LibDefaults.RenewLifetime != 0 {
+		types.SetFlag(&a.ReqBody.KDCOptions, flags.Renewable)
+		a.ReqBody.RTime = t.Add(c.LibDefaults.RenewLifetime)
+		a.ReqBody.RTime = t.Add(time.Duration(48) * time.Hour)
+	}
+	if !c.LibDefaults.NoAddresses {
+		ha, err := types.LocalHostAddresses()
+		if err != nil {
+			return a, fmt.Errorf("could not get local addresses: %v", err)
+		}
+		ha = append(ha, types.HostAddressesFromNetIPs(c.LibDefaults.ExtraAddresses)...)
+		a.ReqBody.Addresses = ha
+	}
+	return a, nil
+}
+
+// NewTGSReq generates a new KRB_TGS_REQ struct.
+func NewTGSReq(cname types.PrincipalName, kdcRealm string, c *config.Config, tgt Ticket, sessionKey types.EncryptionKey, sname types.PrincipalName, renewal bool) (TGSReq, error) {
+	a, err := tgsReq(cname, sname, kdcRealm, renewal, c)
+	if err != nil {
+		return a, err
+	}
+	err = a.setPAData(tgt, sessionKey)
+	return a, err
+}
+
+// NewUser2UserTGSReq returns a TGS-REQ suitable for user-to-user authentication (https://tools.ietf.org/html/rfc4120#section-3.7)
+func NewUser2UserTGSReq(cname types.PrincipalName, kdcRealm string, c *config.Config, clientTGT Ticket, sessionKey types.EncryptionKey, sname types.PrincipalName, renewal bool, verifyingTGT Ticket) (TGSReq, error) {
+	a, err := tgsReq(cname, sname, kdcRealm, renewal, c)
+	if err != nil {
+		return a, err
+	}
+	a.ReqBody.AdditionalTickets = []Ticket{verifyingTGT}
+	types.SetFlag(&a.ReqBody.KDCOptions, flags.EncTktInSkey)
+	err = a.setPAData(clientTGT, sessionKey)
+	return a, err
+}
+
+// tgsReq populates the fields for a TGS_REQ
+func tgsReq(cname, sname types.PrincipalName, kdcRealm string, renewal bool, c *config.Config) (TGSReq, error) {
+	nonce, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt32))
+	if err != nil {
+		return TGSReq{}, err
+	}
+	t := time.Now().UTC()
+	k := KDCReqFields{
+		PVNO:    iana.PVNO,
+		MsgType: msgtype.KRB_TGS_REQ,
+		ReqBody: KDCReqBody{
+			KDCOptions: types.NewKrbFlags(),
+			Realm:      kdcRealm,
+			CName:      cname, // Add the CName to make validation of the reply easier
+			SName:      sname,
+			Till:       t.Add(c.LibDefaults.TicketLifetime),
+			Nonce:      int(nonce.Int64()),
+			EType:      c.LibDefaults.DefaultTGSEnctypeIDs,
+		},
+		Renewal: renewal,
+	}
+	if c.LibDefaults.Forwardable {
+		types.SetFlag(&k.ReqBody.KDCOptions, flags.Forwardable)
+	}
+	if c.LibDefaults.Canonicalize {
+		types.SetFlag(&k.ReqBody.KDCOptions, flags.Canonicalize)
+	}
+	if c.LibDefaults.Proxiable {
+		types.SetFlag(&k.ReqBody.KDCOptions, flags.Proxiable)
+	}
+	if c.LibDefaults.RenewLifetime > time.Duration(0) {
+		types.SetFlag(&k.ReqBody.KDCOptions, flags.Renewable)
+		k.ReqBody.RTime = t.Add(c.LibDefaults.RenewLifetime)
+	}
+	if !c.LibDefaults.NoAddresses {
+		ha, err := types.LocalHostAddresses()
+		if err != nil {
+			return TGSReq{}, fmt.Errorf("could not get local addresses: %v", err)
+		}
+		ha = append(ha, types.HostAddressesFromNetIPs(c.LibDefaults.ExtraAddresses)...)
+		k.ReqBody.Addresses = ha
+	}
+	if renewal {
+		types.SetFlag(&k.ReqBody.KDCOptions, flags.Renew)
+		types.SetFlag(&k.ReqBody.KDCOptions, flags.Renewable)
+	}
+	return TGSReq{
+		k,
+	}, nil
+}
+
+func (k *TGSReq) setPAData(tgt Ticket, sessionKey types.EncryptionKey) error {
+	// Marshal the request and calculate checksum
+	b, err := k.ReqBody.Marshal()
+	if err != nil {
+		return krberror.Errorf(err, krberror.EncodingError, "error marshaling TGS_REQ body")
+	}
+	etype, err := crypto.GetEtype(sessionKey.KeyType)
+	if err != nil {
+		return krberror.Errorf(err, krberror.EncryptingError, "error getting etype to encrypt authenticator")
+	}
+	cb, err := etype.GetChecksumHash(sessionKey.KeyValue, b, keyusage.TGS_REQ_PA_TGS_REQ_AP_REQ_AUTHENTICATOR_CHKSUM)
+	if err != nil {
+		return krberror.Errorf(err, krberror.ChksumError, "error getting etype checksum hash")
+	}
+
+	// Form PAData for TGS_REQ
+	// Create authenticator
+	auth, err := types.NewAuthenticator(tgt.Realm, k.ReqBody.CName)
+	if err != nil {
+		return krberror.Errorf(err, krberror.KRBMsgError, "error generating new authenticator")
+	}
+	auth.Cksum = types.Checksum{
+		CksumType: etype.GetHashID(),
+		Checksum:  cb,
+	}
+	// Create AP_REQ
+	apReq, err := NewAPReq(tgt, sessionKey, auth)
+	if err != nil {
+		return krberror.Errorf(err, krberror.KRBMsgError, "error generating new AP_REQ")
+	}
+	apb, err := apReq.Marshal()
+	if err != nil {
+		return krberror.Errorf(err, krberror.EncodingError, "error marshaling AP_REQ for pre-authentication data")
+	}
+	k.PAData = types.PADataSequence{
+		types.PAData{
+			PADataType:  patype.PA_TGS_REQ,
+			PADataValue: apb,
+		},
+	}
+	return nil
+}
+
+// Unmarshal bytes b into the ASReq struct.
+func (k *ASReq) Unmarshal(b []byte) error {
+	var m marshalKDCReq
+	_, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.ASREQ))
+	if err != nil {
+		return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling AS_REQ")
+	}
+	expectedMsgType := msgtype.KRB_AS_REQ
+	if m.MsgType != expectedMsgType {
+		return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a AS_REQ. Expected: %v; Actual: %v", expectedMsgType, m.MsgType)
+	}
+	var reqb KDCReqBody
+	err = reqb.Unmarshal(m.ReqBody.Bytes)
+	if err != nil {
+		return krberror.Errorf(err, krberror.EncodingError, "error processing AS_REQ body")
+	}
+	k.MsgType = m.MsgType
+	k.PAData = m.PAData
+	k.PVNO = m.PVNO
+	k.ReqBody = reqb
+	return nil
+}
+
+// Unmarshal bytes b into the TGSReq struct.
+func (k *TGSReq) Unmarshal(b []byte) error {
+	var m marshalKDCReq
+	_, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.TGSREQ))
+	if err != nil {
+		return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling TGS_REQ")
+	}
+	expectedMsgType := msgtype.KRB_TGS_REQ
+	if m.MsgType != expectedMsgType {
+		return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a TGS_REQ. Expected: %v; Actual: %v", expectedMsgType, m.MsgType)
+	}
+	var reqb KDCReqBody
+	err = reqb.Unmarshal(m.ReqBody.Bytes)
+	if err != nil {
+		return krberror.Errorf(err, krberror.EncodingError, "error processing TGS_REQ body")
+	}
+	k.MsgType = m.MsgType
+	k.PAData = m.PAData
+	k.PVNO = m.PVNO
+	k.ReqBody = reqb
+	return nil
+}
+
+// Unmarshal bytes b into the KRB_KDC_REQ body struct.
+func (k *KDCReqBody) Unmarshal(b []byte) error {
+	var m marshalKDCReqBody
+	_, err := asn1.Unmarshal(b, &m)
+	if err != nil {
+		return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling KDC_REQ body")
+	}
+	k.KDCOptions = m.KDCOptions
+	if len(k.KDCOptions.Bytes) < 4 {
+		tb := make([]byte, 4-len(k.KDCOptions.Bytes))
+		k.KDCOptions.Bytes = append(tb, k.KDCOptions.Bytes...)
+		k.KDCOptions.BitLength = len(k.KDCOptions.Bytes) * 8
+	}
+	k.CName = m.CName
+	k.Realm = m.Realm
+	k.SName = m.SName
+	k.From = m.From
+	k.Till = m.Till
+	k.RTime = m.RTime
+	k.Nonce = m.Nonce
+	k.EType = m.EType
+	k.Addresses = m.Addresses
+	k.EncAuthData = m.EncAuthData
+	if len(m.AdditionalTickets.Bytes) > 0 {
+		k.AdditionalTickets, err = unmarshalTicketsSequence(m.AdditionalTickets)
+		if err != nil {
+			return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling additional tickets")
+		}
+	}
+	return nil
+}
+
+// Marshal ASReq struct.
+func (k *ASReq) Marshal() ([]byte, error) {
+	m := marshalKDCReq{
+		PVNO:    k.PVNO,
+		MsgType: k.MsgType,
+		PAData:  k.PAData,
+	}
+	b, err := k.ReqBody.Marshal()
+	if err != nil {
+		var mk []byte
+		return mk, err
+	}
+	m.ReqBody = asn1.RawValue{
+		Class:      asn1.ClassContextSpecific,
+		IsCompound: true,
+		Tag:        4,
+		Bytes:      b,
+	}
+	mk, err := asn1.Marshal(m)
+	if err != nil {
+		return mk, krberror.Errorf(err, krberror.EncodingError, "error marshaling AS_REQ")
+	}
+	mk = asn1tools.AddASNAppTag(mk, asnAppTag.ASREQ)
+	return mk, nil
+}
+
+// Marshal TGSReq struct.
+func (k *TGSReq) Marshal() ([]byte, error) {
+	m := marshalKDCReq{
+		PVNO:    k.PVNO,
+		MsgType: k.MsgType,
+		PAData:  k.PAData,
+	}
+	b, err := k.ReqBody.Marshal()
+	if err != nil {
+		var mk []byte
+		return mk, err
+	}
+	m.ReqBody = asn1.RawValue{
+		Class:      asn1.ClassContextSpecific,
+		IsCompound: true,
+		Tag:        4,
+		Bytes:      b,
+	}
+	mk, err := asn1.Marshal(m)
+	if err != nil {
+		return mk, krberror.Errorf(err, krberror.EncodingError, "error marshaling AS_REQ")
+	}
+	mk = asn1tools.AddASNAppTag(mk, asnAppTag.TGSREQ)
+	return mk, nil
+}
+
+// Marshal KRB_KDC_REQ body struct.
+func (k *KDCReqBody) Marshal() ([]byte, error) {
+	var b []byte
+	m := marshalKDCReqBody{
+		KDCOptions:  k.KDCOptions,
+		CName:       k.CName,
+		Realm:       k.Realm,
+		SName:       k.SName,
+		From:        k.From,
+		Till:        k.Till,
+		RTime:       k.RTime,
+		Nonce:       k.Nonce,
+		EType:       k.EType,
+		Addresses:   k.Addresses,
+		EncAuthData: k.EncAuthData,
+	}
+	rawtkts, err := MarshalTicketSequence(k.AdditionalTickets)
+	if err != nil {
+		return b, krberror.Errorf(err, krberror.EncodingError, "error in marshaling KDC request body additional tickets")
+	}
+	//The asn1.rawValue needs the tag setting on it for where it is in the KDCReqBody
+	rawtkts.Tag = 11
+	if len(rawtkts.Bytes) > 0 {
+		m.AdditionalTickets = rawtkts
+	}
+	b, err = asn1.Marshal(m)
+	if err != nil {
+		return b, krberror.Errorf(err, krberror.EncodingError, "error in marshaling KDC request body")
+	}
+	return b, nil
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBCred.go b/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBCred.go
new file mode 100644
index 0000000..536fdb9
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBCred.go
@@ -0,0 +1,102 @@
+package messages
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/jcmturner/gofork/encoding/asn1"
+	"github.com/jcmturner/gokrb5/v8/crypto"
+	"github.com/jcmturner/gokrb5/v8/iana/asnAppTag"
+	"github.com/jcmturner/gokrb5/v8/iana/keyusage"
+	"github.com/jcmturner/gokrb5/v8/iana/msgtype"
+	"github.com/jcmturner/gokrb5/v8/krberror"
+	"github.com/jcmturner/gokrb5/v8/types"
+)
+
+type marshalKRBCred struct {
+	PVNO    int                 `asn1:"explicit,tag:0"`
+	MsgType int                 `asn1:"explicit,tag:1"`
+	Tickets asn1.RawValue       `asn1:"explicit,tag:2"`
+	EncPart types.EncryptedData `asn1:"explicit,tag:3"`
+}
+
+// KRBCred implements RFC 4120 KRB_CRED: https://tools.ietf.org/html/rfc4120#section-5.8.1.
+type KRBCred struct {
+	PVNO             int
+	MsgType          int
+	Tickets          []Ticket
+	EncPart          types.EncryptedData
+	DecryptedEncPart EncKrbCredPart
+}
+
+// EncKrbCredPart is the encrypted part of KRB_CRED.
+type EncKrbCredPart struct {
+	TicketInfo []KrbCredInfo     `asn1:"explicit,tag:0"`
+	Nouce      int               `asn1:"optional,explicit,tag:1"`
+	Timestamp  time.Time         `asn1:"generalized,optional,explicit,tag:2"`
+	Usec       int               `asn1:"optional,explicit,tag:3"`
+	SAddress   types.HostAddress `asn1:"optional,explicit,tag:4"`
+	RAddress   types.HostAddress `asn1:"optional,explicit,tag:5"`
+}
+
+// KrbCredInfo is the KRB_CRED_INFO part of KRB_CRED.
+type KrbCredInfo struct {
+	Key       types.EncryptionKey `asn1:"explicit,tag:0"`
+	PRealm    string              `asn1:"generalstring,optional,explicit,tag:1"`
+	PName     types.PrincipalName `asn1:"optional,explicit,tag:2"`
+	Flags     asn1.BitString      `asn1:"optional,explicit,tag:3"`
+	AuthTime  time.Time           `asn1:"generalized,optional,explicit,tag:4"`
+	StartTime time.Time           `asn1:"generalized,optional,explicit,tag:5"`
+	EndTime   time.Time           `asn1:"generalized,optional,explicit,tag:6"`
+	RenewTill time.Time           `asn1:"generalized,optional,explicit,tag:7"`
+	SRealm    string              `asn1:"optional,explicit,ia5,tag:8"`
+	SName     types.PrincipalName `asn1:"optional,explicit,tag:9"`
+	CAddr     types.HostAddresses `asn1:"optional,explicit,tag:10"`
+}
+
+// Unmarshal bytes b into the KRBCred struct.
+func (k *KRBCred) Unmarshal(b []byte) error {
+	var m marshalKRBCred
+	_, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.KRBCred))
+	if err != nil {
+		return processUnmarshalReplyError(b, err)
+	}
+	expectedMsgType := msgtype.KRB_CRED
+	if m.MsgType != expectedMsgType {
+		return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_CRED. Expected: %v; Actual: %v", expectedMsgType, m.MsgType)
+	}
+	k.PVNO = m.PVNO
+	k.MsgType = m.MsgType
+	k.EncPart = m.EncPart
+	if len(m.Tickets.Bytes) > 0 {
+		k.Tickets, err = unmarshalTicketsSequence(m.Tickets)
+		if err != nil {
+			return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling tickets within KRB_CRED")
+		}
+	}
+	return nil
+}
+
+// DecryptEncPart decrypts the encrypted part of a KRB_CRED.
+func (k *KRBCred) DecryptEncPart(key types.EncryptionKey) error {
+	b, err := crypto.DecryptEncPart(k.EncPart, key, keyusage.KRB_CRED_ENCPART)
+	if err != nil {
+		return krberror.Errorf(err, krberror.DecryptingError, "error decrypting KRB_CRED EncPart")
+	}
+	var denc EncKrbCredPart
+	err = denc.Unmarshal(b)
+	if err != nil {
+		return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling encrypted part of KRB_CRED")
+	}
+	k.DecryptedEncPart = denc
+	return nil
+}
+
+// Unmarshal bytes b into the encrypted part of KRB_CRED.
+func (k *EncKrbCredPart) Unmarshal(b []byte) error {
+	_, err := asn1.UnmarshalWithParams(b, k, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncKrbCredPart))
+	if err != nil {
+		return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling EncKrbCredPart")
+	}
+	return nil
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBError.go b/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBError.go
new file mode 100644
index 0000000..d2cf32d
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBError.go
@@ -0,0 +1,94 @@
+// Package messages implements Kerberos 5 message types and methods.
+package messages
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/jcmturner/gofork/encoding/asn1"
+	"github.com/jcmturner/gokrb5/v8/asn1tools"
+	"github.com/jcmturner/gokrb5/v8/iana"
+	"github.com/jcmturner/gokrb5/v8/iana/asnAppTag"
+	"github.com/jcmturner/gokrb5/v8/iana/errorcode"
+	"github.com/jcmturner/gokrb5/v8/iana/msgtype"
+	"github.com/jcmturner/gokrb5/v8/krberror"
+	"github.com/jcmturner/gokrb5/v8/types"
+)
+
+// KRBError implements RFC 4120 KRB_ERROR: https://tools.ietf.org/html/rfc4120#section-5.9.1.
+type KRBError struct {
+	PVNO      int                 `asn1:"explicit,tag:0"`
+	MsgType   int                 `asn1:"explicit,tag:1"`
+	CTime     time.Time           `asn1:"generalized,optional,explicit,tag:2"`
+	Cusec     int                 `asn1:"optional,explicit,tag:3"`
+	STime     time.Time           `asn1:"generalized,explicit,tag:4"`
+	Susec     int                 `asn1:"explicit,tag:5"`
+	ErrorCode int32               `asn1:"explicit,tag:6"`
+	CRealm    string              `asn1:"generalstring,optional,explicit,tag:7"`
+	CName     types.PrincipalName `asn1:"optional,explicit,tag:8"`
+	Realm     string              `asn1:"generalstring,explicit,tag:9"`
+	SName     types.PrincipalName `asn1:"explicit,tag:10"`
+	EText     string              `asn1:"generalstring,optional,explicit,tag:11"`
+	EData     []byte              `asn1:"optional,explicit,tag:12"`
+}
+
+// NewKRBError creates a new KRBError.
+func NewKRBError(sname types.PrincipalName, realm string, code int32, etext string) KRBError {
+	t := time.Now().UTC()
+	return KRBError{
+		PVNO:      iana.PVNO,
+		MsgType:   msgtype.KRB_ERROR,
+		STime:     t,
+		Susec:     int((t.UnixNano() / int64(time.Microsecond)) - (t.Unix() * 1e6)),
+		ErrorCode: code,
+		SName:     sname,
+		Realm:     realm,
+		EText:     etext,
+	}
+}
+
+// Unmarshal bytes b into the KRBError struct.
+func (k *KRBError) Unmarshal(b []byte) error {
+	_, err := asn1.UnmarshalWithParams(b, k, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.KRBError))
+	if err != nil {
+		return krberror.Errorf(err, krberror.EncodingError, "KRB_ERROR unmarshal error")
+	}
+	expectedMsgType := msgtype.KRB_ERROR
+	if k.MsgType != expectedMsgType {
+		return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_ERROR. Expected: %v; Actual: %v", expectedMsgType, k.MsgType)
+	}
+	return nil
+}
+
+// Marshal a KRBError into bytes.
+func (k *KRBError) Marshal() ([]byte, error) {
+	b, err := asn1.Marshal(*k)
+	if err != nil {
+		return b, krberror.Errorf(err, krberror.EncodingError, "error marshaling KRBError")
+	}
+	b = asn1tools.AddASNAppTag(b, asnAppTag.KRBError)
+	return b, nil
+}
+
+// Error method implementing error interface on KRBError struct.
+func (k KRBError) Error() string {
+	etxt := fmt.Sprintf("KRB Error: %s", errorcode.Lookup(k.ErrorCode))
+	if k.EText != "" {
+		etxt = fmt.Sprintf("%s - %s", etxt, k.EText)
+	}
+	return etxt
+}
+
+func processUnmarshalReplyError(b []byte, err error) error {
+	switch err.(type) {
+	case asn1.StructuralError:
+		var krberr KRBError
+		tmperr := krberr.Unmarshal(b)
+		if tmperr != nil {
+			return krberror.Errorf(err, krberror.EncodingError, "failed to unmarshal KDC's reply")
+		}
+		return krberr
+	default:
+		return krberror.Errorf(err, krberror.EncodingError, "failed to unmarshal KDC's reply")
+	}
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBPriv.go b/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBPriv.go
new file mode 100644
index 0000000..0ca6149
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBPriv.go
@@ -0,0 +1,108 @@
+package messages
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/jcmturner/gofork/encoding/asn1"
+	"github.com/jcmturner/gokrb5/v8/asn1tools"
+	"github.com/jcmturner/gokrb5/v8/crypto"
+	"github.com/jcmturner/gokrb5/v8/iana"
+	"github.com/jcmturner/gokrb5/v8/iana/asnAppTag"
+	"github.com/jcmturner/gokrb5/v8/iana/keyusage"
+	"github.com/jcmturner/gokrb5/v8/iana/msgtype"
+	"github.com/jcmturner/gokrb5/v8/krberror"
+	"github.com/jcmturner/gokrb5/v8/types"
+)
+
+// KRBPriv implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.7.1.
+type KRBPriv struct {
+	PVNO             int                 `asn1:"explicit,tag:0"`
+	MsgType          int                 `asn1:"explicit,tag:1"`
+	EncPart          types.EncryptedData `asn1:"explicit,tag:3"`
+	DecryptedEncPart EncKrbPrivPart      `asn1:"optional,omitempty"` // Not part of ASN1 bytes so marked as optional so unmarshalling works
+}
+
+// EncKrbPrivPart is the encrypted part of KRB_PRIV.
+type EncKrbPrivPart struct {
+	UserData       []byte            `asn1:"explicit,tag:0"`
+	Timestamp      time.Time         `asn1:"generalized,optional,explicit,tag:1"`
+	Usec           int               `asn1:"optional,explicit,tag:2"`
+	SequenceNumber int64             `asn1:"optional,explicit,tag:3"`
+	SAddress       types.HostAddress `asn1:"explicit,tag:4"`
+	RAddress       types.HostAddress `asn1:"optional,explicit,tag:5"`
+}
+
+// NewKRBPriv returns a new KRBPriv type.
+func NewKRBPriv(part EncKrbPrivPart) KRBPriv {
+	return KRBPriv{
+		PVNO:             iana.PVNO,
+		MsgType:          msgtype.KRB_PRIV,
+		DecryptedEncPart: part,
+	}
+}
+
+// Unmarshal bytes b into the KRBPriv struct.
+func (k *KRBPriv) Unmarshal(b []byte) error {
+	_, err := asn1.UnmarshalWithParams(b, k, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.KRBPriv))
+	if err != nil {
+		return processUnmarshalReplyError(b, err)
+	}
+	expectedMsgType := msgtype.KRB_PRIV
+	if k.MsgType != expectedMsgType {
+		return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_PRIV. Expected: %v; Actual: %v", expectedMsgType, k.MsgType)
+	}
+	return nil
+}
+
+// Unmarshal bytes b into the EncKrbPrivPart struct.
+func (k *EncKrbPrivPart) Unmarshal(b []byte) error {
+	_, err := asn1.UnmarshalWithParams(b, k, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncKrbPrivPart))
+	if err != nil {
+		return krberror.Errorf(err, krberror.EncodingError, "KRB_PRIV unmarshal error")
+	}
+	return nil
+}
+
+// Marshal the KRBPriv.
+func (k *KRBPriv) Marshal() ([]byte, error) {
+	tk := KRBPriv{
+		PVNO:    k.PVNO,
+		MsgType: k.MsgType,
+		EncPart: k.EncPart,
+	}
+	b, err := asn1.Marshal(tk)
+	if err != nil {
+		return []byte{}, err
+	}
+	b = asn1tools.AddASNAppTag(b, asnAppTag.KRBPriv)
+	return b, nil
+}
+
+// EncryptEncPart encrypts the DecryptedEncPart within the KRBPriv.
+// Use to prepare for marshaling.
+func (k *KRBPriv) EncryptEncPart(key types.EncryptionKey) error {
+	b, err := asn1.Marshal(k.DecryptedEncPart)
+	if err != nil {
+		return err
+	}
+	b = asn1tools.AddASNAppTag(b, asnAppTag.EncKrbPrivPart)
+	k.EncPart, err = crypto.GetEncryptedData(b, key, keyusage.KRB_PRIV_ENCPART, 1)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// DecryptEncPart decrypts the encrypted part of the KRBPriv message.
+func (k *KRBPriv) DecryptEncPart(key types.EncryptionKey) error {
+	b, err := crypto.DecryptEncPart(k.EncPart, key, keyusage.KRB_PRIV_ENCPART)
+	if err != nil {
+		return fmt.Errorf("error decrypting KRBPriv EncPart: %v", err)
+	}
+	err = k.DecryptedEncPart.Unmarshal(b)
+	if err != nil {
+		return fmt.Errorf("error unmarshaling encrypted part: %v", err)
+	}
+	return nil
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBSafe.go b/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBSafe.go
new file mode 100644
index 0000000..52cd284
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/messages/KRBSafe.go
@@ -0,0 +1,43 @@
+package messages
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/jcmturner/gofork/encoding/asn1"
+	"github.com/jcmturner/gokrb5/v8/iana/asnAppTag"
+	"github.com/jcmturner/gokrb5/v8/iana/msgtype"
+	"github.com/jcmturner/gokrb5/v8/krberror"
+	"github.com/jcmturner/gokrb5/v8/types"
+)
+
+// KRBSafe implements RFC 4120 KRB_SAFE: https://tools.ietf.org/html/rfc4120#section-5.6.1.
+type KRBSafe struct {
+	PVNO     int            `asn1:"explicit,tag:0"`
+	MsgType  int            `asn1:"explicit,tag:1"`
+	SafeBody KRBSafeBody    `asn1:"explicit,tag:2"`
+	Cksum    types.Checksum `asn1:"explicit,tag:3"`
+}
+
+// KRBSafeBody implements the KRB_SAFE_BODY of KRB_SAFE.
+type KRBSafeBody struct {
+	UserData       []byte            `asn1:"explicit,tag:0"`
+	Timestamp      time.Time         `asn1:"generalized,optional,explicit,tag:1"`
+	Usec           int               `asn1:"optional,explicit,tag:2"`
+	SequenceNumber int64             `asn1:"optional,explicit,tag:3"`
+	SAddress       types.HostAddress `asn1:"explicit,tag:4"`
+	RAddress       types.HostAddress `asn1:"optional,explicit,tag:5"`
+}
+
+// Unmarshal bytes b into the KRBSafe struct.
+func (s *KRBSafe) Unmarshal(b []byte) error {
+	_, err := asn1.UnmarshalWithParams(b, s, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.KRBSafe))
+	if err != nil {
+		return processUnmarshalReplyError(b, err)
+	}
+	expectedMsgType := msgtype.KRB_SAFE
+	if s.MsgType != expectedMsgType {
+		return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_SAFE. Expected: %v; Actual: %v", expectedMsgType, s.MsgType)
+	}
+	return nil
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/messages/Ticket.go b/vendor/github.com/jcmturner/gokrb5/v8/messages/Ticket.go
new file mode 100644
index 0000000..11efad6
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/messages/Ticket.go
@@ -0,0 +1,262 @@
+package messages
+
+import (
+	"fmt"
+	"log"
+	"time"
+
+	"github.com/jcmturner/gofork/encoding/asn1"
+	"github.com/jcmturner/gokrb5/v8/asn1tools"
+	"github.com/jcmturner/gokrb5/v8/crypto"
+	"github.com/jcmturner/gokrb5/v8/iana"
+	"github.com/jcmturner/gokrb5/v8/iana/adtype"
+	"github.com/jcmturner/gokrb5/v8/iana/asnAppTag"
+	"github.com/jcmturner/gokrb5/v8/iana/errorcode"
+	"github.com/jcmturner/gokrb5/v8/iana/flags"
+	"github.com/jcmturner/gokrb5/v8/iana/keyusage"
+	"github.com/jcmturner/gokrb5/v8/keytab"
+	"github.com/jcmturner/gokrb5/v8/krberror"
+	"github.com/jcmturner/gokrb5/v8/pac"
+	"github.com/jcmturner/gokrb5/v8/types"
+)
+
+// Reference: https://www.ietf.org/rfc/rfc4120.txt
+// Section: 5.3
+
+// Ticket implements the Kerberos ticket.
+type Ticket struct {
+	TktVNO           int                 `asn1:"explicit,tag:0"`
+	Realm            string              `asn1:"generalstring,explicit,tag:1"`
+	SName            types.PrincipalName `asn1:"explicit,tag:2"`
+	EncPart          types.EncryptedData `asn1:"explicit,tag:3"`
+	DecryptedEncPart EncTicketPart       `asn1:"optional"` // Not part of ASN1 bytes so marked as optional so unmarshalling works
+}
+
+// EncTicketPart is the encrypted part of the Ticket.
+type EncTicketPart struct {
+	Flags             asn1.BitString          `asn1:"explicit,tag:0"`
+	Key               types.EncryptionKey     `asn1:"explicit,tag:1"`
+	CRealm            string                  `asn1:"generalstring,explicit,tag:2"`
+	CName             types.PrincipalName     `asn1:"explicit,tag:3"`
+	Transited         TransitedEncoding       `asn1:"explicit,tag:4"`
+	AuthTime          time.Time               `asn1:"generalized,explicit,tag:5"`
+	StartTime         time.Time               `asn1:"generalized,explicit,optional,tag:6"`
+	EndTime           time.Time               `asn1:"generalized,explicit,tag:7"`
+	RenewTill         time.Time               `asn1:"generalized,explicit,optional,tag:8"`
+	CAddr             types.HostAddresses     `asn1:"explicit,optional,tag:9"`
+	AuthorizationData types.AuthorizationData `asn1:"explicit,optional,tag:10"`
+}
+
+// TransitedEncoding part of the ticket's encrypted part.
+type TransitedEncoding struct {
+	TRType   int32  `asn1:"explicit,tag:0"`
+	Contents []byte `asn1:"explicit,tag:1"`
+}
+
+// NewTicket creates a new Ticket instance.
+func NewTicket(cname types.PrincipalName, crealm string, sname types.PrincipalName, srealm string, flags asn1.BitString, sktab *keytab.Keytab, eTypeID int32, kvno int, authTime, startTime, endTime, renewTill time.Time) (Ticket, types.EncryptionKey, error) {
+	etype, err := crypto.GetEtype(eTypeID)
+	if err != nil {
+		return Ticket{}, types.EncryptionKey{}, krberror.Errorf(err, krberror.EncryptingError, "error getting etype for new ticket")
+	}
+	sessionKey, err := types.GenerateEncryptionKey(etype)
+	if err != nil {
+		return Ticket{}, types.EncryptionKey{}, krberror.Errorf(err, krberror.EncryptingError, "error generating session key")
+	}
+
+	etp := EncTicketPart{
+		Flags:     flags,
+		Key:       sessionKey,
+		CRealm:    crealm,
+		CName:     cname,
+		Transited: TransitedEncoding{},
+		AuthTime:  authTime,
+		StartTime: startTime,
+		EndTime:   endTime,
+		RenewTill: renewTill,
+	}
+	b, err := asn1.Marshal(etp)
+	if err != nil {
+		return Ticket{}, types.EncryptionKey{}, krberror.Errorf(err, krberror.EncodingError, "error marshalling ticket encpart")
+	}
+	b = asn1tools.AddASNAppTag(b, asnAppTag.EncTicketPart)
+	skey, _, err := sktab.GetEncryptionKey(sname, srealm, kvno, eTypeID)
+	if err != nil {
+		return Ticket{}, types.EncryptionKey{}, krberror.Errorf(err, krberror.EncryptingError, "error getting encryption key for new ticket")
+	}
+	ed, err := crypto.GetEncryptedData(b, skey, keyusage.KDC_REP_TICKET, kvno)
+	if err != nil {
+		return Ticket{}, types.EncryptionKey{}, krberror.Errorf(err, krberror.EncryptingError, "error encrypting ticket encpart")
+	}
+	tkt := Ticket{
+		TktVNO:  iana.PVNO,
+		Realm:   srealm,
+		SName:   sname,
+		EncPart: ed,
+	}
+	return tkt, sessionKey, nil
+}
+
+// Unmarshal bytes b into a Ticket struct.
+func (t *Ticket) Unmarshal(b []byte) error {
+	_, err := asn1.UnmarshalWithParams(b, t, fmt.Sprintf("application,explicit,tag:%d", asnAppTag.Ticket))
+	return err
+}
+
+// Marshal the Ticket.
+func (t *Ticket) Marshal() ([]byte, error) {
+	b, err := asn1.Marshal(*t)
+	if err != nil {
+		return nil, err
+	}
+	b = asn1tools.AddASNAppTag(b, asnAppTag.Ticket)
+	return b, nil
+}
+
+// Unmarshal bytes b into the EncTicketPart struct.
+func (t *EncTicketPart) Unmarshal(b []byte) error {
+	_, err := asn1.UnmarshalWithParams(b, t, fmt.Sprintf("application,explicit,tag:%d", asnAppTag.EncTicketPart))
+	return err
+}
+
+// unmarshalTicket returns a ticket from the bytes provided.
+func unmarshalTicket(b []byte) (t Ticket, err error) {
+	err = t.Unmarshal(b)
+	return
+}
+
+// UnmarshalTicketsSequence returns a slice of Tickets from a raw ASN1 value.
+func unmarshalTicketsSequence(in asn1.RawValue) ([]Ticket, error) {
+	//This is a workaround to a asn1 decoding issue in golang - https://github.com/golang/go/issues/17321. It's not pretty I'm afraid
+	//We pull out raw values from the larger raw value (that is actually the data of the sequence of raw values) and track our position moving along the data.
+	b := in.Bytes
+	// Ignore the head of the asn1 stream (1 byte for tag and those for the length) as this is what tells us its a sequence but we're handling it ourselves
+	p := 1 + asn1tools.GetNumberBytesInLengthHeader(in.Bytes)
+	var tkts []Ticket
+	var raw asn1.RawValue
+	for p < (len(b)) {
+		_, err := asn1.UnmarshalWithParams(b[p:], &raw, fmt.Sprintf("application,tag:%d", asnAppTag.Ticket))
+		if err != nil {
+			return nil, fmt.Errorf("unmarshaling sequence of tickets failed getting length of ticket: %v", err)
+		}
+		t, err := unmarshalTicket(b[p:])
+		if err != nil {
+			return nil, fmt.Errorf("unmarshaling sequence of tickets failed: %v", err)
+		}
+		p += len(raw.FullBytes)
+		tkts = append(tkts, t)
+	}
+	MarshalTicketSequence(tkts)
+	return tkts, nil
+}
+
+// MarshalTicketSequence marshals a slice of Tickets returning an ASN1 raw value containing the ticket sequence.
+func MarshalTicketSequence(tkts []Ticket) (asn1.RawValue, error) {
+	raw := asn1.RawValue{
+		Class:      2,
+		IsCompound: true,
+	}
+	if len(tkts) < 1 {
+		// There are no tickets to marshal
+		return raw, nil
+	}
+	var btkts []byte
+	for i, t := range tkts {
+		b, err := t.Marshal()
+		if err != nil {
+			return raw, fmt.Errorf("error marshaling ticket number %d in sequence of tickets", i+1)
+		}
+		btkts = append(btkts, b...)
+	}
+	// The ASN1 wrapping consists of 2 bytes:
+	// 1st byte -> Identifier Octet - In this case an OCTET STRING (ASN TAG
+	// 2nd byte -> The length (this will be the size indicated in the input bytes + 2 for the additional bytes we add here.
+	// Application Tag:
+	//| Byte:       | 8                            | 7                          | 6                                         | 5 | 4 | 3 | 2 | 1             |
+	//| Value:      | 0                            | 1                          | 1                                         | From the RFC spec 4120        |
+	//| Explanation | Defined by the ASN1 encoding rules for an application tag | A value of 1 indicates a constructed type | The ASN Application tag value |
+	btkts = append(asn1tools.MarshalLengthBytes(len(btkts)), btkts...)
+	btkts = append([]byte{byte(32 + asn1.TagSequence)}, btkts...)
+	raw.Bytes = btkts
+	// If we need to create the full bytes then identifier octet is "context-specific" = 128 + "constructed" + 32 + the wrapping explicit tag (11)
+	//fmt.Fprintf(os.Stderr, "mRaw fb: %v\n", raw.FullBytes)
+	return raw, nil
+}
+
+// DecryptEncPart decrypts the encrypted part of the ticket.
+// The sname argument can be used to specify which service principal's key should be used to decrypt the ticket.
+// If nil is passed as the sname then the service principal specified within the ticket it used.
+func (t *Ticket) DecryptEncPart(keytab *keytab.Keytab, sname *types.PrincipalName) error {
+	if sname == nil {
+		sname = &t.SName
+	}
+	key, _, err := keytab.GetEncryptionKey(*sname, t.Realm, t.EncPart.KVNO, t.EncPart.EType)
+	if err != nil {
+		return NewKRBError(t.SName, t.Realm, errorcode.KRB_AP_ERR_NOKEY, fmt.Sprintf("Could not get key from keytab: %v", err))
+	}
+	return t.Decrypt(key)
+}
+
+// Decrypt decrypts the encrypted part of the ticket using the key provided.
+func (t *Ticket) Decrypt(key types.EncryptionKey) error {
+	b, err := crypto.DecryptEncPart(t.EncPart, key, keyusage.KDC_REP_TICKET)
+	if err != nil {
+		return fmt.Errorf("error decrypting Ticket EncPart: %v", err)
+	}
+	var denc EncTicketPart
+	err = denc.Unmarshal(b)
+	if err != nil {
+		return fmt.Errorf("error unmarshaling encrypted part: %v", err)
+	}
+	t.DecryptedEncPart = denc
+	return nil
+}
+
+// GetPACType returns a Microsoft PAC that has been extracted from the ticket and processed.
+func (t *Ticket) GetPACType(keytab *keytab.Keytab, sname *types.PrincipalName, l *log.Logger) (bool, pac.PACType, error) {
+	var isPAC bool
+	for _, ad := range t.DecryptedEncPart.AuthorizationData {
+		if ad.ADType == adtype.ADIfRelevant {
+			var ad2 types.AuthorizationData
+			err := ad2.Unmarshal(ad.ADData)
+			if err != nil {
+				l.Printf("PAC authorization data could not be unmarshaled: %v", err)
+				continue
+			}
+			if ad2[0].ADType == adtype.ADWin2KPAC {
+				isPAC = true
+				var p pac.PACType
+				err = p.Unmarshal(ad2[0].ADData)
+				if err != nil {
+					return isPAC, p, fmt.Errorf("error unmarshaling PAC: %v", err)
+				}
+				if sname == nil {
+					sname = &t.SName
+				}
+				key, _, err := keytab.GetEncryptionKey(*sname, t.Realm, t.EncPart.KVNO, t.EncPart.EType)
+				if err != nil {
+					return isPAC, p, NewKRBError(t.SName, t.Realm, errorcode.KRB_AP_ERR_NOKEY, fmt.Sprintf("Could not get key from keytab: %v", err))
+				}
+				err = p.ProcessPACInfoBuffers(key, l)
+				return isPAC, p, err
+			}
+		}
+	}
+	return isPAC, pac.PACType{}, nil
+}
+
+// Valid checks it the ticket is currently valid. Max duration passed endtime passed in as argument.
+func (t *Ticket) Valid(d time.Duration) (bool, error) {
+	// Check for future tickets or invalid tickets
+	time := time.Now().UTC()
+	if t.DecryptedEncPart.StartTime.Sub(time) > d || types.IsFlagSet(&t.DecryptedEncPart.Flags, flags.Invalid) {
+		return false, NewKRBError(t.SName, t.Realm, errorcode.KRB_AP_ERR_TKT_NYV, "service ticket provided is not yet valid")
+	}
+
+	// Check for expired ticket
+	if time.Sub(t.DecryptedEncPart.EndTime) > d {
+		return false, NewKRBError(t.SName, t.Realm, errorcode.KRB_AP_ERR_TKT_EXPIRED, "service ticket provided has expired")
+	}
+
+	return true, nil
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/client_claims.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/client_claims.go
new file mode 100644
index 0000000..36871e0
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/client_claims.go
@@ -0,0 +1,34 @@
+package pac
+
+import (
+	"bytes"
+	"fmt"
+
+	"github.com/jcmturner/rpc/v2/mstypes"
+	"github.com/jcmturner/rpc/v2/ndr"
+)
+
+// Claims reference: https://msdn.microsoft.com/en-us/library/hh553895.aspx
+
+// ClientClaimsInfo implements https://msdn.microsoft.com/en-us/library/hh536365.aspx
+type ClientClaimsInfo struct {
+	ClaimsSetMetadata mstypes.ClaimsSetMetadata
+	ClaimsSet         mstypes.ClaimsSet
+}
+
+// Unmarshal bytes into the ClientClaimsInfo struct
+func (k *ClientClaimsInfo) Unmarshal(b []byte) (err error) {
+	dec := ndr.NewDecoder(bytes.NewReader(b))
+	m := new(mstypes.ClaimsSetMetadata)
+	err = dec.Decode(m)
+	if err != nil {
+		err = fmt.Errorf("error unmarshaling ClientClaimsInfo ClaimsSetMetadata: %v", err)
+		return
+	}
+	k.ClaimsSetMetadata = *m
+	k.ClaimsSet, err = k.ClaimsSetMetadata.ClaimsSet()
+	if err != nil {
+		err = fmt.Errorf("error unmarshaling ClientClaimsInfo ClaimsSet: %v", err)
+	}
+	return
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/client_info.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/client_info.go
new file mode 100644
index 0000000..ddd9578
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/client_info.go
@@ -0,0 +1,31 @@
+package pac
+
+import (
+	"bytes"
+
+	"github.com/jcmturner/rpc/v2/mstypes"
+)
+
+// ClientInfo implements https://msdn.microsoft.com/en-us/library/cc237951.aspx
+type ClientInfo struct {
+	ClientID   mstypes.FileTime // A FILETIME structure in little-endian format that contains the Kerberos initial ticket-granting ticket TGT authentication time
+	NameLength uint16           // An unsigned 16-bit integer in little-endian format that specifies the length, in bytes, of the Name field.
+	Name       string           // An array of 16-bit Unicode characters in little-endian format that contains the client's account name.
+}
+
+// Unmarshal bytes into the ClientInfo struct
+func (k *ClientInfo) Unmarshal(b []byte) (err error) {
+	//The PAC_CLIENT_INFO structure is a simple structure that is not NDR-encoded.
+	r := mstypes.NewReader(bytes.NewReader(b))
+
+	k.ClientID, err = r.FileTime()
+	if err != nil {
+		return
+	}
+	k.NameLength, err = r.Uint16()
+	if err != nil {
+		return
+	}
+	k.Name, err = r.UTF16String(int(k.NameLength))
+	return
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/credentials_info.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/credentials_info.go
new file mode 100644
index 0000000..0c7ccd4
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/credentials_info.go
@@ -0,0 +1,86 @@
+package pac
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+
+	"github.com/jcmturner/gokrb5/v8/crypto"
+	"github.com/jcmturner/gokrb5/v8/iana/keyusage"
+	"github.com/jcmturner/gokrb5/v8/types"
+	"github.com/jcmturner/rpc/v2/mstypes"
+	"github.com/jcmturner/rpc/v2/ndr"
+)
+
+// https://msdn.microsoft.com/en-us/library/cc237931.aspx
+
+// CredentialsInfo implements https://msdn.microsoft.com/en-us/library/cc237953.aspx
+type CredentialsInfo struct {
+	Version                    uint32 // A 32-bit unsigned integer in little-endian format that defines the version. MUST be 0x00000000.
+	EType                      uint32
+	PACCredentialDataEncrypted []byte // Key usage number for encryption: KERB_NON_KERB_SALT (16)
+	PACCredentialData          CredentialData
+}
+
+// Unmarshal bytes into the CredentialsInfo struct
+func (c *CredentialsInfo) Unmarshal(b []byte, k types.EncryptionKey) (err error) {
+	//The CredentialsInfo structure is a simple structure that is not NDR-encoded.
+	r := mstypes.NewReader(bytes.NewReader(b))
+
+	c.Version, err = r.Uint32()
+	if err != nil {
+		return
+	}
+	if c.Version != 0 {
+		err = errors.New("credentials info version is not zero")
+		return
+	}
+	c.EType, err = r.Uint32()
+	if err != nil {
+		return
+	}
+	c.PACCredentialDataEncrypted, err = r.ReadBytes(len(b) - 8)
+	if err != nil {
+		err = fmt.Errorf("error reading PAC Credetials Data: %v", err)
+		return
+	}
+
+	err = c.DecryptEncPart(k)
+	if err != nil {
+		err = fmt.Errorf("error decrypting PAC Credentials Data: %v", err)
+		return
+	}
+	return
+}
+
+// DecryptEncPart decrypts the encrypted part of the CredentialsInfo.
+func (c *CredentialsInfo) DecryptEncPart(k types.EncryptionKey) error {
+	if k.KeyType != int32(c.EType) {
+		return fmt.Errorf("key provided is not the correct type. Type needed: %d, type provided: %d", c.EType, k.KeyType)
+	}
+	pt, err := crypto.DecryptMessage(c.PACCredentialDataEncrypted, k, keyusage.KERB_NON_KERB_SALT)
+	if err != nil {
+		return err
+	}
+	err = c.PACCredentialData.Unmarshal(pt)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// CredentialData implements https://msdn.microsoft.com/en-us/library/cc237952.aspx
+type CredentialData struct {
+	CredentialCount uint32
+	Credentials     []SECPKGSupplementalCred // Size is the value of CredentialCount
+}
+
+// Unmarshal converts the bytes provided into a CredentialData type.
+func (c *CredentialData) Unmarshal(b []byte) (err error) {
+	dec := ndr.NewDecoder(bytes.NewReader(b))
+	err = dec.Decode(c)
+	if err != nil {
+		err = fmt.Errorf("error unmarshaling KerbValidationInfo: %v", err)
+	}
+	return
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/device_claims.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/device_claims.go
new file mode 100644
index 0000000..6eb2926
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/device_claims.go
@@ -0,0 +1,34 @@
+package pac
+
+import (
+	"bytes"
+	"fmt"
+
+	"github.com/jcmturner/rpc/v2/mstypes"
+	"github.com/jcmturner/rpc/v2/ndr"
+)
+
+// Claims reference: https://msdn.microsoft.com/en-us/library/hh553895.aspx
+
+// DeviceClaimsInfo implements https://msdn.microsoft.com/en-us/library/hh554226.aspx
+type DeviceClaimsInfo struct {
+	ClaimsSetMetadata mstypes.ClaimsSetMetadata
+	ClaimsSet         mstypes.ClaimsSet
+}
+
+// Unmarshal bytes into the ClientClaimsInfo struct
+func (k *DeviceClaimsInfo) Unmarshal(b []byte) (err error) {
+	dec := ndr.NewDecoder(bytes.NewReader(b))
+	m := new(mstypes.ClaimsSetMetadata)
+	err = dec.Decode(m)
+	if err != nil {
+		err = fmt.Errorf("error unmarshaling ClientClaimsInfo ClaimsSetMetadata: %v", err)
+		return
+	}
+	k.ClaimsSetMetadata = *m
+	k.ClaimsSet, err = k.ClaimsSetMetadata.ClaimsSet()
+	if err != nil {
+		err = fmt.Errorf("error unmarshaling ClientClaimsInfo ClaimsSet: %v", err)
+	}
+	return
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/device_info.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/device_info.go
new file mode 100644
index 0000000..ce82daa
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/device_info.go
@@ -0,0 +1,32 @@
+package pac
+
+import (
+	"bytes"
+	"fmt"
+
+	"github.com/jcmturner/rpc/v2/mstypes"
+	"github.com/jcmturner/rpc/v2/ndr"
+)
+
+// DeviceInfo implements https://msdn.microsoft.com/en-us/library/hh536402.aspx
+type DeviceInfo struct {
+	UserID            uint32                          // A 32-bit unsigned integer that contains the RID of the account. If the UserId member equals 0x00000000, the first group SID in this member is the SID for this account.
+	PrimaryGroupID    uint32                          // A 32-bit unsigned integer that contains the RID for the primary group to which this account belongs.
+	AccountDomainID   mstypes.RPCSID                  `ndr:"pointer"` // A SID structure that contains the SID for the domain of the account.This member is used in conjunction with the UserId, and GroupIds members to create the user and group SIDs for the client.
+	AccountGroupCount uint32                          // A 32-bit unsigned integer that contains the number of groups within the account domain to which the account belongs
+	AccountGroupIDs   []mstypes.GroupMembership       `ndr:"pointer,conformant"` // A pointer to a list of GROUP_MEMBERSHIP (section 2.2.2) structures that contains the groups to which the account belongs in the account domain. The number of groups in this list MUST be equal to GroupCount.
+	SIDCount          uint32                          // A 32-bit unsigned integer that contains the total number of SIDs present in the ExtraSids member.
+	ExtraSIDs         []mstypes.KerbSidAndAttributes  `ndr:"pointer,conformant"` // A pointer to a list of KERB_SID_AND_ATTRIBUTES structures that contain a list of SIDs corresponding to groups not in domains. If the UserId member equals 0x00000000, the first group SID in this member is the SID for this account.
+	DomainGroupCount  uint32                          // A 32-bit unsigned integer that contains the number of domains with groups to which the account belongs.
+	DomainGroup       []mstypes.DomainGroupMembership `ndr:"pointer,conformant"` // A pointer to a list of DOMAIN_GROUP_MEMBERSHIP structures (section 2.2.3) that contains the domains to which the account belongs to a group. The number of sets in this list MUST be equal to DomainCount.
+}
+
+// Unmarshal bytes into the DeviceInfo struct
+func (k *DeviceInfo) Unmarshal(b []byte) (err error) {
+	dec := ndr.NewDecoder(bytes.NewReader(b))
+	err = dec.Decode(k)
+	if err != nil {
+		err = fmt.Errorf("error unmarshaling DeviceInfo: %v", err)
+	}
+	return
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/kerb_validation_info.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/kerb_validation_info.go
new file mode 100644
index 0000000..dde7861
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/kerb_validation_info.go
@@ -0,0 +1,110 @@
+// Package pac implements Microsoft Privilege Attribute Certificate (PAC) processing.
+package pac
+
+import (
+	"bytes"
+	"fmt"
+
+	"github.com/jcmturner/rpc/v2/mstypes"
+	"github.com/jcmturner/rpc/v2/ndr"
+)
+
+// KERB_VALIDATION_INFO flags.
+const (
+	USERFLAG_GUEST                                    = 31 // Authentication was done via the GUEST account; no password was used.
+	USERFLAG_NO_ENCRYPTION_AVAILABLE                  = 30 // No encryption is available.
+	USERFLAG_LAN_MANAGER_KEY                          = 28 // LAN Manager key was used for authentication.
+	USERFLAG_SUB_AUTH                                 = 25 // Sub-authentication used; session key came from the sub-authentication package.
+	USERFLAG_EXTRA_SIDS                               = 26 // Indicates that the ExtraSids field is populated and contains additional SIDs.
+	USERFLAG_MACHINE_ACCOUNT                          = 24 // Indicates that the account is a machine account.
+	USERFLAG_DC_NTLM2                                 = 23 // Indicates that the domain controller understands NTLMv2.
+	USERFLAG_RESOURCE_GROUPIDS                        = 22 // Indicates that the ResourceGroupIds field is populated.
+	USERFLAG_PROFILEPATH                              = 21 // Indicates that ProfilePath is populated.
+	USERFLAG_NTLM2_NTCHALLENGERESP                    = 20 // The NTLMv2 response from the NtChallengeResponseFields ([MS-NLMP] section 2.2.1.3) was used for authentication and session key generation.
+	USERFLAG_LM2_LMCHALLENGERESP                      = 19 // The LMv2 response from the LmChallengeResponseFields ([MS-NLMP] section 2.2.1.3) was used for authentication and session key generation.
+	USERFLAG_AUTH_LMCHALLENGERESP_KEY_NTCHALLENGERESP = 18 // The LMv2 response from the LmChallengeResponseFields ([MS-NLMP] section 2.2.1.3) was used for authentication and the NTLMv2 response from the NtChallengeResponseFields ([MS-NLMP] section 2.2.1.3) was used session key generation.
+)
+
+// KerbValidationInfo implement https://msdn.microsoft.com/en-us/library/cc237948.aspx
+type KerbValidationInfo struct {
+	LogOnTime              mstypes.FileTime
+	LogOffTime             mstypes.FileTime
+	KickOffTime            mstypes.FileTime
+	PasswordLastSet        mstypes.FileTime
+	PasswordCanChange      mstypes.FileTime
+	PasswordMustChange     mstypes.FileTime
+	EffectiveName          mstypes.RPCUnicodeString
+	FullName               mstypes.RPCUnicodeString
+	LogonScript            mstypes.RPCUnicodeString
+	ProfilePath            mstypes.RPCUnicodeString
+	HomeDirectory          mstypes.RPCUnicodeString
+	HomeDirectoryDrive     mstypes.RPCUnicodeString
+	LogonCount             uint16
+	BadPasswordCount       uint16
+	UserID                 uint32
+	PrimaryGroupID         uint32
+	GroupCount             uint32
+	GroupIDs               []mstypes.GroupMembership `ndr:"pointer,conformant"`
+	UserFlags              uint32
+	UserSessionKey         mstypes.UserSessionKey
+	LogonServer            mstypes.RPCUnicodeString
+	LogonDomainName        mstypes.RPCUnicodeString
+	LogonDomainID          mstypes.RPCSID `ndr:"pointer"`
+	Reserved1              [2]uint32      // Has 2 elements
+	UserAccountControl     uint32
+	SubAuthStatus          uint32
+	LastSuccessfulILogon   mstypes.FileTime
+	LastFailedILogon       mstypes.FileTime
+	FailedILogonCount      uint32
+	Reserved3              uint32
+	SIDCount               uint32
+	ExtraSIDs              []mstypes.KerbSidAndAttributes `ndr:"pointer,conformant"`
+	ResourceGroupDomainSID mstypes.RPCSID                 `ndr:"pointer"`
+	ResourceGroupCount     uint32
+	ResourceGroupIDs       []mstypes.GroupMembership `ndr:"pointer,conformant"`
+}
+
+// Unmarshal bytes into the DeviceInfo struct
+func (k *KerbValidationInfo) Unmarshal(b []byte) (err error) {
+	dec := ndr.NewDecoder(bytes.NewReader(b))
+	err = dec.Decode(k)
+	if err != nil {
+		err = fmt.Errorf("error unmarshaling KerbValidationInfo: %v", err)
+	}
+	return
+}
+
+// GetGroupMembershipSIDs returns a slice of strings containing the group membership SIDs found in the PAC.
+func (k *KerbValidationInfo) GetGroupMembershipSIDs() []string {
+	var g []string
+	lSID := k.LogonDomainID.String()
+	for i := range k.GroupIDs {
+		g = append(g, fmt.Sprintf("%s-%d", lSID, k.GroupIDs[i].RelativeID))
+	}
+	for _, s := range k.ExtraSIDs {
+		var exists = false
+		for _, es := range g {
+			if es == s.SID.String() {
+				exists = true
+				break
+			}
+		}
+		if !exists {
+			g = append(g, s.SID.String())
+		}
+	}
+	for _, r := range k.ResourceGroupIDs {
+		var exists = false
+		s := fmt.Sprintf("%s-%d", k.ResourceGroupDomainSID.String(), r.RelativeID)
+		for _, es := range g {
+			if es == s {
+				exists = true
+				break
+			}
+		}
+		if !exists {
+			g = append(g, s)
+		}
+	}
+	return g
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/pac_type.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/pac_type.go
new file mode 100644
index 0000000..fab2ad7
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/pac_type.go
@@ -0,0 +1,251 @@
+package pac
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"log"
+
+	"github.com/jcmturner/gokrb5/v8/crypto"
+	"github.com/jcmturner/gokrb5/v8/iana/keyusage"
+	"github.com/jcmturner/gokrb5/v8/types"
+	"github.com/jcmturner/rpc/v2/mstypes"
+)
+
+const (
+	infoTypeKerbValidationInfo     uint32 = 1
+	infoTypeCredentials            uint32 = 2
+	infoTypePACServerSignatureData uint32 = 6
+	infoTypePACKDCSignatureData    uint32 = 7
+	infoTypePACClientInfo          uint32 = 10
+	infoTypeS4UDelegationInfo      uint32 = 11
+	infoTypeUPNDNSInfo             uint32 = 12
+	infoTypePACClientClaimsInfo    uint32 = 13
+	infoTypePACDeviceInfo          uint32 = 14
+	infoTypePACDeviceClaimsInfo    uint32 = 15
+)
+
+// PACType implements: https://msdn.microsoft.com/en-us/library/cc237950.aspx
+type PACType struct {
+	CBuffers           uint32
+	Version            uint32
+	Buffers            []InfoBuffer
+	Data               []byte
+	KerbValidationInfo *KerbValidationInfo
+	CredentialsInfo    *CredentialsInfo
+	ServerChecksum     *SignatureData
+	KDCChecksum        *SignatureData
+	ClientInfo         *ClientInfo
+	S4UDelegationInfo  *S4UDelegationInfo
+	UPNDNSInfo         *UPNDNSInfo
+	ClientClaimsInfo   *ClientClaimsInfo
+	DeviceInfo         *DeviceInfo
+	DeviceClaimsInfo   *DeviceClaimsInfo
+	ZeroSigData        []byte
+}
+
+// InfoBuffer implements the PAC Info Buffer: https://msdn.microsoft.com/en-us/library/cc237954.aspx
+type InfoBuffer struct {
+	ULType       uint32 // A 32-bit unsigned integer in little-endian format that describes the type of data present in the buffer contained at Offset.
+	CBBufferSize uint32 // A 32-bit unsigned integer in little-endian format that contains the size, in bytes, of the buffer in the PAC located at Offset.
+	Offset       uint64 // A 64-bit unsigned integer in little-endian format that contains the offset to the beginning of the buffer, in bytes, from the beginning of the PACTYPE structure. The data offset MUST be a multiple of eight. The following sections specify the format of each type of element.
+}
+
+// Unmarshal bytes into the PACType struct
+func (pac *PACType) Unmarshal(b []byte) (err error) {
+	pac.Data = b
+	zb := make([]byte, len(b), len(b))
+	copy(zb, b)
+	pac.ZeroSigData = zb
+	r := mstypes.NewReader(bytes.NewReader(b))
+	pac.CBuffers, err = r.Uint32()
+	if err != nil {
+		return
+	}
+	pac.Version, err = r.Uint32()
+	if err != nil {
+		return
+	}
+	buf := make([]InfoBuffer, pac.CBuffers, pac.CBuffers)
+	for i := range buf {
+		buf[i].ULType, err = r.Uint32()
+		if err != nil {
+			return
+		}
+		buf[i].CBBufferSize, err = r.Uint32()
+		if err != nil {
+			return
+		}
+		buf[i].Offset, err = r.Uint64()
+		if err != nil {
+			return
+		}
+	}
+	pac.Buffers = buf
+	return nil
+}
+
+// ProcessPACInfoBuffers processes the PAC Info Buffers.
+// https://msdn.microsoft.com/en-us/library/cc237954.aspx
+func (pac *PACType) ProcessPACInfoBuffers(key types.EncryptionKey, l *log.Logger) error {
+	for _, buf := range pac.Buffers {
+		p := make([]byte, buf.CBBufferSize, buf.CBBufferSize)
+		copy(p, pac.Data[int(buf.Offset):int(buf.Offset)+int(buf.CBBufferSize)])
+		switch buf.ULType {
+		case infoTypeKerbValidationInfo:
+			if pac.KerbValidationInfo != nil {
+				//Must ignore subsequent buffers of this type
+				continue
+			}
+			var k KerbValidationInfo
+			err := k.Unmarshal(p)
+			if err != nil {
+				return fmt.Errorf("error processing KerbValidationInfo: %v", err)
+			}
+			pac.KerbValidationInfo = &k
+		case infoTypeCredentials:
+			// Currently PAC parsing is only useful on the service side in gokrb5
+			// The CredentialsInfo are only useful when gokrb5 has implemented RFC4556 and only applied on the client side.
+			// Skipping CredentialsInfo - will be revisited under RFC4556 implementation.
+			continue
+			//if pac.CredentialsInfo != nil {
+			//	//Must ignore subsequent buffers of this type
+			//	continue
+			//}
+			//var k CredentialsInfo
+			//err := k.Unmarshal(p, key) // The encryption key used is the AS reply key only available to the client.
+			//if err != nil {
+			//	return fmt.Errorf("error processing CredentialsInfo: %v", err)
+			//}
+			//pac.CredentialsInfo = &k
+		case infoTypePACServerSignatureData:
+			if pac.ServerChecksum != nil {
+				//Must ignore subsequent buffers of this type
+				continue
+			}
+			var k SignatureData
+			zb, err := k.Unmarshal(p)
+			copy(pac.ZeroSigData[int(buf.Offset):int(buf.Offset)+int(buf.CBBufferSize)], zb)
+			if err != nil {
+				return fmt.Errorf("error processing ServerChecksum: %v", err)
+			}
+			pac.ServerChecksum = &k
+		case infoTypePACKDCSignatureData:
+			if pac.KDCChecksum != nil {
+				//Must ignore subsequent buffers of this type
+				continue
+			}
+			var k SignatureData
+			zb, err := k.Unmarshal(p)
+			copy(pac.ZeroSigData[int(buf.Offset):int(buf.Offset)+int(buf.CBBufferSize)], zb)
+			if err != nil {
+				return fmt.Errorf("error processing KDCChecksum: %v", err)
+			}
+			pac.KDCChecksum = &k
+		case infoTypePACClientInfo:
+			if pac.ClientInfo != nil {
+				//Must ignore subsequent buffers of this type
+				continue
+			}
+			var k ClientInfo
+			err := k.Unmarshal(p)
+			if err != nil {
+				return fmt.Errorf("error processing ClientInfo: %v", err)
+			}
+			pac.ClientInfo = &k
+		case infoTypeS4UDelegationInfo:
+			if pac.S4UDelegationInfo != nil {
+				//Must ignore subsequent buffers of this type
+				continue
+			}
+			var k S4UDelegationInfo
+			err := k.Unmarshal(p)
+			if err != nil {
+				l.Printf("could not process S4U_DelegationInfo: %v", err)
+				continue
+			}
+			pac.S4UDelegationInfo = &k
+		case infoTypeUPNDNSInfo:
+			if pac.UPNDNSInfo != nil {
+				//Must ignore subsequent buffers of this type
+				continue
+			}
+			var k UPNDNSInfo
+			err := k.Unmarshal(p)
+			if err != nil {
+				l.Printf("could not process UPN_DNSInfo: %v", err)
+				continue
+			}
+			pac.UPNDNSInfo = &k
+		case infoTypePACClientClaimsInfo:
+			if pac.ClientClaimsInfo != nil || len(p) < 1 {
+				//Must ignore subsequent buffers of this type
+				continue
+			}
+			var k ClientClaimsInfo
+			err := k.Unmarshal(p)
+			if err != nil {
+				l.Printf("could not process ClientClaimsInfo: %v", err)
+				continue
+			}
+			pac.ClientClaimsInfo = &k
+		case infoTypePACDeviceInfo:
+			if pac.DeviceInfo != nil {
+				//Must ignore subsequent buffers of this type
+				continue
+			}
+			var k DeviceInfo
+			err := k.Unmarshal(p)
+			if err != nil {
+				l.Printf("could not process DeviceInfo: %v", err)
+				continue
+			}
+			pac.DeviceInfo = &k
+		case infoTypePACDeviceClaimsInfo:
+			if pac.DeviceClaimsInfo != nil {
+				//Must ignore subsequent buffers of this type
+				continue
+			}
+			var k DeviceClaimsInfo
+			err := k.Unmarshal(p)
+			if err != nil {
+				l.Printf("could not process DeviceClaimsInfo: %v", err)
+				continue
+			}
+			pac.DeviceClaimsInfo = &k
+		}
+	}
+
+	if ok, err := pac.verify(key); !ok {
+		return err
+	}
+
+	return nil
+}
+
+func (pac *PACType) verify(key types.EncryptionKey) (bool, error) {
+	if pac.KerbValidationInfo == nil {
+		return false, errors.New("PAC Info Buffers does not contain a KerbValidationInfo")
+	}
+	if pac.ServerChecksum == nil {
+		return false, errors.New("PAC Info Buffers does not contain a ServerChecksum")
+	}
+	if pac.KDCChecksum == nil {
+		return false, errors.New("PAC Info Buffers does not contain a KDCChecksum")
+	}
+	if pac.ClientInfo == nil {
+		return false, errors.New("PAC Info Buffers does not contain a ClientInfo")
+	}
+	etype, err := crypto.GetChksumEtype(int32(pac.ServerChecksum.SignatureType))
+	if err != nil {
+		return false, err
+	}
+	if ok := etype.VerifyChecksum(key.KeyValue,
+		pac.ZeroSigData,
+		pac.ServerChecksum.Signature,
+		keyusage.KERB_NON_KERB_CKSUM_SALT); !ok {
+		return false, errors.New("PAC service checksum verification failed")
+	}
+
+	return true, nil
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/s4u_delegation_info.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/s4u_delegation_info.go
new file mode 100644
index 0000000..da837d4
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/s4u_delegation_info.go
@@ -0,0 +1,26 @@
+package pac
+
+import (
+	"bytes"
+	"fmt"
+
+	"github.com/jcmturner/rpc/v2/mstypes"
+	"github.com/jcmturner/rpc/v2/ndr"
+)
+
+// S4UDelegationInfo implements https://msdn.microsoft.com/en-us/library/cc237944.aspx
+type S4UDelegationInfo struct {
+	S4U2proxyTarget      mstypes.RPCUnicodeString // The name of the principal to whom the application can forward the ticket.
+	TransitedListSize    uint32
+	S4UTransitedServices []mstypes.RPCUnicodeString `ndr:"pointer,conformant"` // List of all services that have been delegated through by this client and subsequent services or servers.. Size is value of TransitedListSize
+}
+
+// Unmarshal bytes into the S4UDelegationInfo struct
+func (k *S4UDelegationInfo) Unmarshal(b []byte) (err error) {
+	dec := ndr.NewDecoder(bytes.NewReader(b))
+	err = dec.Decode(k)
+	if err != nil {
+		err = fmt.Errorf("error unmarshaling S4UDelegationInfo: %v", err)
+	}
+	return
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/signature_data.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/signature_data.go
new file mode 100644
index 0000000..8f6aa58
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/signature_data.go
@@ -0,0 +1,67 @@
+package pac
+
+import (
+	"bytes"
+
+	"github.com/jcmturner/gokrb5/v8/iana/chksumtype"
+	"github.com/jcmturner/rpc/v2/mstypes"
+)
+
+/*
+https://msdn.microsoft.com/en-us/library/cc237955.aspx
+
+The Key Usage Value MUST be KERB_NON_KERB_CKSUM_SALT (17) [MS-KILE] (section 3.1.5.9).
+
+Server Signature (SignatureType = 0x00000006)
+https://msdn.microsoft.com/en-us/library/cc237957.aspx
+
+KDC Signature (SignatureType = 0x00000007)
+https://msdn.microsoft.com/en-us/library/dd357117.aspx
+*/
+
+// SignatureData implements https://msdn.microsoft.com/en-us/library/cc237955.aspx
+type SignatureData struct {
+	SignatureType  uint32 // A 32-bit unsigned integer value in little-endian format that defines the cryptographic system used to calculate the checksum. This MUST be one of the following checksum types: KERB_CHECKSUM_HMAC_MD5 (signature size = 16), HMAC_SHA1_96_AES128 (signature size = 12), HMAC_SHA1_96_AES256 (signature size = 12).
+	Signature      []byte // Size depends on the type. See comment above.
+	RODCIdentifier uint16 // A 16-bit unsigned integer value in little-endian format that contains the first 16 bits of the key version number ([MS-KILE] section 3.1.5.8) when the KDC is an RODC. When the KDC is not an RODC, this field does not exist.
+}
+
+// Unmarshal bytes into the SignatureData struct
+func (k *SignatureData) Unmarshal(b []byte) (rb []byte, err error) {
+	r := mstypes.NewReader(bytes.NewReader(b))
+
+	k.SignatureType, err = r.Uint32()
+	if err != nil {
+		return
+	}
+
+	var c int
+	switch k.SignatureType {
+	case chksumtype.KERB_CHECKSUM_HMAC_MD5_UNSIGNED:
+		c = 16
+	case uint32(chksumtype.HMAC_SHA1_96_AES128):
+		c = 12
+	case uint32(chksumtype.HMAC_SHA1_96_AES256):
+		c = 12
+	}
+	k.Signature, err = r.ReadBytes(c)
+	if err != nil {
+		return
+	}
+
+	// When the KDC is not an Read Only Domain Controller (RODC), this field does not exist.
+	if len(b) >= 4+c+2 {
+		k.RODCIdentifier, err = r.Uint16()
+		if err != nil {
+			return
+		}
+	}
+
+	// Create bytes with zeroed signature needed for checksum verification
+	rb = make([]byte, len(b), len(b))
+	copy(rb, b)
+	z := make([]byte, len(b), len(b))
+	copy(rb[4:4+c], z)
+
+	return
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/supplemental_cred.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/supplemental_cred.go
new file mode 100644
index 0000000..d40679d
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/supplemental_cred.go
@@ -0,0 +1,87 @@
+package pac
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+
+	"github.com/jcmturner/rpc/v2/mstypes"
+	"github.com/jcmturner/rpc/v2/ndr"
+)
+
+const (
+	// NTLMSupCredLMOWF indicates that the LM OWF member is present and valid.
+	NTLMSupCredLMOWF uint32 = 31
+	// NTLMSupCredNTOWF indicates that the NT OWF member is present and valid.
+	NTLMSupCredNTOWF uint32 = 30
+)
+
+// NTLMSupplementalCred implements https://msdn.microsoft.com/en-us/library/cc237949.aspx
+type NTLMSupplementalCred struct {
+	Version    uint32 // A 32-bit unsigned integer that defines the credential version.This field MUST be 0x00000000.
+	Flags      uint32
+	LMPassword []byte // A 16-element array of unsigned 8-bit integers that define the LM OWF. The LMPassword member MUST be ignored if the L flag is not set in the Flags member.
+	NTPassword []byte // A 16-element array of unsigned 8-bit integers that define the NT OWF. The NTPassword member MUST be ignored if the N flag is not set in the Flags member.
+}
+
+// Unmarshal converts the bytes provided into a NTLMSupplementalCred.
+func (c *NTLMSupplementalCred) Unmarshal(b []byte) (err error) {
+	r := mstypes.NewReader(bytes.NewReader(b))
+	c.Version, err = r.Uint32()
+	if err != nil {
+		return
+	}
+	if c.Version != 0 {
+		err = errors.New("NTLMSupplementalCred version is not zero")
+		return
+	}
+	c.Flags, err = r.Uint32()
+	if err != nil {
+		return
+	}
+	if isFlagSet(c.Flags, NTLMSupCredLMOWF) {
+		c.LMPassword, err = r.ReadBytes(16)
+		if err != nil {
+			return
+		}
+	}
+	if isFlagSet(c.Flags, NTLMSupCredNTOWF) {
+		c.NTPassword, err = r.ReadBytes(16)
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+// isFlagSet tests if a flag is set in the uint32 little endian flag
+func isFlagSet(f uint32, i uint32) bool {
+	//Which byte?
+	b := int(i / 8)
+	//Which bit in byte
+	p := uint(7 - (int(i) - 8*b))
+	fb := make([]byte, 4)
+	binary.LittleEndian.PutUint32(fb, f)
+	if fb[b]&(1<<p) != 0 {
+		return true
+	}
+	return false
+}
+
+// SECPKGSupplementalCred implements https://msdn.microsoft.com/en-us/library/cc237956.aspx
+type SECPKGSupplementalCred struct {
+	PackageName    mstypes.RPCUnicodeString
+	CredentialSize uint32
+	Credentials    []uint8 `ndr:"pointer,conformant"` // Is a ptr. Size is the value of CredentialSize
+}
+
+// Unmarshal converts the bytes provided into a SECPKGSupplementalCred.
+func (c *SECPKGSupplementalCred) Unmarshal(b []byte) (err error) {
+	dec := ndr.NewDecoder(bytes.NewReader(b))
+	err = dec.Decode(c)
+	if err != nil {
+		err = fmt.Errorf("error unmarshaling SECPKGSupplementalCred: %v", err)
+	}
+	return
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/pac/upn_dns_info.go b/vendor/github.com/jcmturner/gokrb5/v8/pac/upn_dns_info.go
new file mode 100644
index 0000000..d374b96
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/pac/upn_dns_info.go
@@ -0,0 +1,73 @@
+package pac
+
+import (
+	"bytes"
+
+	"github.com/jcmturner/rpc/v2/mstypes"
+)
+
+// UPNDNSInfo implements https://msdn.microsoft.com/en-us/library/dd240468.aspx
+type UPNDNSInfo struct {
+	UPNLength           uint16 // An unsigned 16-bit integer in little-endian format that specifies the length, in bytes, of the UPN field.
+	UPNOffset           uint16 // An unsigned 16-bit integer in little-endian format that contains the offset to the beginning of the buffer, in bytes, from the beginning of the UPN_DNS_INFO structure.
+	DNSDomainNameLength uint16
+	DNSDomainNameOffset uint16
+	Flags               uint32
+	UPN                 string
+	DNSDomain           string
+}
+
+const (
+	upnNoUPNAttr = 31 // The user account object does not have the userPrincipalName attribute ([MS-ADA3] section 2.349) set. A UPN constructed by concatenating the user name with the DNS domain name of the account domain is provided.
+)
+
+// Unmarshal bytes into the UPN_DNSInfo struct
+func (k *UPNDNSInfo) Unmarshal(b []byte) (err error) {
+	//The UPN_DNS_INFO structure is a simple structure that is not NDR-encoded.
+	r := mstypes.NewReader(bytes.NewReader(b))
+	k.UPNLength, err = r.Uint16()
+	if err != nil {
+		return
+	}
+	k.UPNOffset, err = r.Uint16()
+	if err != nil {
+		return
+	}
+	k.DNSDomainNameLength, err = r.Uint16()
+	if err != nil {
+		return
+	}
+	k.DNSDomainNameOffset, err = r.Uint16()
+	if err != nil {
+		return
+	}
+	k.Flags, err = r.Uint32()
+	if err != nil {
+		return
+	}
+	ub := mstypes.NewReader(bytes.NewReader(b[k.UPNOffset : k.UPNOffset+k.UPNLength]))
+	db := mstypes.NewReader(bytes.NewReader(b[k.DNSDomainNameOffset : k.DNSDomainNameOffset+k.DNSDomainNameLength]))
+
+	u := make([]rune, k.UPNLength/2, k.UPNLength/2)
+	for i := 0; i < len(u); i++ {
+		var r uint16
+		r, err = ub.Uint16()
+		if err != nil {
+			return
+		}
+		u[i] = rune(r)
+	}
+	k.UPN = string(u)
+	d := make([]rune, k.DNSDomainNameLength/2, k.DNSDomainNameLength/2)
+	for i := 0; i < len(d); i++ {
+		var r uint16
+		r, err = db.Uint16()
+		if err != nil {
+			return
+		}
+		d[i] = rune(r)
+	}
+	k.DNSDomain = string(d)
+
+	return
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/types/Authenticator.go b/vendor/github.com/jcmturner/gokrb5/v8/types/Authenticator.go
new file mode 100644
index 0000000..1fdba78
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/types/Authenticator.go
@@ -0,0 +1,81 @@
+// Package types provides Kerberos 5 data types.
+package types
+
+import (
+	"crypto/rand"
+	"fmt"
+	"math"
+	"math/big"
+	"time"
+
+	"github.com/jcmturner/gofork/encoding/asn1"
+	"github.com/jcmturner/gokrb5/v8/asn1tools"
+	"github.com/jcmturner/gokrb5/v8/iana"
+	"github.com/jcmturner/gokrb5/v8/iana/asnAppTag"
+)
+
+// Authenticator - A record containing information that can be shown to have been recently generated using the session
+// key known only by the client and server.
+// https://tools.ietf.org/html/rfc4120#section-5.5.1
+type Authenticator struct {
+	AVNO              int               `asn1:"explicit,tag:0"`
+	CRealm            string            `asn1:"generalstring,explicit,tag:1"`
+	CName             PrincipalName     `asn1:"explicit,tag:2"`
+	Cksum             Checksum          `asn1:"explicit,optional,tag:3"`
+	Cusec             int               `asn1:"explicit,tag:4"`
+	CTime             time.Time         `asn1:"generalized,explicit,tag:5"`
+	SubKey            EncryptionKey     `asn1:"explicit,optional,tag:6"`
+	SeqNumber         int64             `asn1:"explicit,optional,tag:7"`
+	AuthorizationData AuthorizationData `asn1:"explicit,optional,tag:8"`
+}
+
+// NewAuthenticator creates a new Authenticator.
+func NewAuthenticator(realm string, cname PrincipalName) (Authenticator, error) {
+	seq, err := rand.Int(rand.Reader, big.NewInt(math.MaxUint32))
+	if err != nil {
+		return Authenticator{}, err
+	}
+	t := time.Now().UTC()
+	return Authenticator{
+		AVNO:      iana.PVNO,
+		CRealm:    realm,
+		CName:     cname,
+		Cksum:     Checksum{},
+		Cusec:     int((t.UnixNano() / int64(time.Microsecond)) - (t.Unix() * 1e6)),
+		CTime:     t,
+		SeqNumber: seq.Int64(),
+	}, nil
+}
+
+// GenerateSeqNumberAndSubKey sets the Authenticator's sequence number and subkey.
+func (a *Authenticator) GenerateSeqNumberAndSubKey(keyType int32, keySize int) error {
+	seq, err := rand.Int(rand.Reader, big.NewInt(math.MaxUint32))
+	if err != nil {
+		return err
+	}
+	a.SeqNumber = seq.Int64()
+	//Generate subkey value
+	sk := make([]byte, keySize, keySize)
+	rand.Read(sk)
+	a.SubKey = EncryptionKey{
+		KeyType:  keyType,
+		KeyValue: sk,
+	}
+	return nil
+}
+
+// Unmarshal bytes into the Authenticator.
+func (a *Authenticator) Unmarshal(b []byte) error {
+	_, err := asn1.UnmarshalWithParams(b, a, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.Authenticator))
+	return err
+}
+
+// Marshal the Authenticator.
+func (a *Authenticator) Marshal() ([]byte, error) {
+	b, err := asn1.Marshal(*a)
+	if err != nil {
+		return nil, err
+	}
+	b = asn1tools.AddASNAppTag(b, asnAppTag.Authenticator)
+	return b, nil
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/types/AuthorizationData.go b/vendor/github.com/jcmturner/gokrb5/v8/types/AuthorizationData.go
new file mode 100644
index 0000000..80c477c
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/types/AuthorizationData.go
@@ -0,0 +1,55 @@
+package types
+
+import (
+	"github.com/jcmturner/gofork/encoding/asn1"
+)
+
+// Reference: https://www.ietf.org/rfc/rfc4120.txt
+// Section: 5.2.6
+
+// AuthorizationData implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6
+type AuthorizationData []AuthorizationDataEntry
+
+// AuthorizationDataEntry implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6
+type AuthorizationDataEntry struct {
+	ADType int32  `asn1:"explicit,tag:0"`
+	ADData []byte `asn1:"explicit,tag:1"`
+}
+
+// ADIfRelevant implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6.1
+type ADIfRelevant AuthorizationData
+
+// ADKDCIssued implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6.2
+type ADKDCIssued struct {
+	ADChecksum Checksum          `asn1:"explicit,tag:0"`
+	IRealm     string            `asn1:"optional,generalstring,explicit,tag:1"`
+	Isname     PrincipalName     `asn1:"optional,explicit,tag:2"`
+	Elements   AuthorizationData `asn1:"explicit,tag:3"`
+}
+
+// ADAndOr implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6.3
+type ADAndOr struct {
+	ConditionCount int32             `asn1:"explicit,tag:0"`
+	Elements       AuthorizationData `asn1:"explicit,tag:1"`
+}
+
+// ADMandatoryForKDC implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6.4
+type ADMandatoryForKDC AuthorizationData
+
+// Unmarshal bytes into the ADKDCIssued.
+func (a *ADKDCIssued) Unmarshal(b []byte) error {
+	_, err := asn1.Unmarshal(b, a)
+	return err
+}
+
+// Unmarshal bytes into the AuthorizationData.
+func (a *AuthorizationData) Unmarshal(b []byte) error {
+	_, err := asn1.Unmarshal(b, a)
+	return err
+}
+
+// Unmarshal bytes into the AuthorizationDataEntry.
+func (a *AuthorizationDataEntry) Unmarshal(b []byte) error {
+	_, err := asn1.Unmarshal(b, a)
+	return err
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/types/Cryptosystem.go b/vendor/github.com/jcmturner/gokrb5/v8/types/Cryptosystem.go
new file mode 100644
index 0000000..2f354ea
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/types/Cryptosystem.go
@@ -0,0 +1,72 @@
+package types
+
+import (
+	"crypto/rand"
+
+	"github.com/jcmturner/gofork/encoding/asn1"
+	"github.com/jcmturner/gokrb5/v8/crypto/etype"
+)
+
+// Reference: https://www.ietf.org/rfc/rfc4120.txt
+// Section: 5.2.9
+
+// EncryptedData implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.9
+type EncryptedData struct {
+	EType  int32  `asn1:"explicit,tag:0"`
+	KVNO   int    `asn1:"explicit,optional,tag:1"`
+	Cipher []byte `asn1:"explicit,tag:2"`
+}
+
+// EncryptionKey implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.9
+// AKA KeyBlock
+type EncryptionKey struct {
+	KeyType  int32  `asn1:"explicit,tag:0"`
+	KeyValue []byte `asn1:"explicit,tag:1" json:"-"`
+}
+
+// Checksum implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.9
+type Checksum struct {
+	CksumType int32  `asn1:"explicit,tag:0"`
+	Checksum  []byte `asn1:"explicit,tag:1"`
+}
+
+// Unmarshal bytes into the EncryptedData.
+func (a *EncryptedData) Unmarshal(b []byte) error {
+	_, err := asn1.Unmarshal(b, a)
+	return err
+}
+
+// Marshal the EncryptedData.
+func (a *EncryptedData) Marshal() ([]byte, error) {
+	edb, err := asn1.Marshal(*a)
+	if err != nil {
+		return edb, err
+	}
+	return edb, nil
+}
+
+// Unmarshal bytes into the EncryptionKey.
+func (a *EncryptionKey) Unmarshal(b []byte) error {
+	_, err := asn1.Unmarshal(b, a)
+	return err
+}
+
+// Unmarshal bytes into the Checksum.
+func (a *Checksum) Unmarshal(b []byte) error {
+	_, err := asn1.Unmarshal(b, a)
+	return err
+}
+
+// GenerateEncryptionKey creates a new EncryptionKey with a random key value.
+func GenerateEncryptionKey(etype etype.EType) (EncryptionKey, error) {
+	k := EncryptionKey{
+		KeyType: etype.GetETypeID(),
+	}
+	b := make([]byte, etype.GetKeyByteSize(), etype.GetKeyByteSize())
+	_, err := rand.Read(b)
+	if err != nil {
+		return k, err
+	}
+	k.KeyValue = b
+	return k, nil
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/types/HostAddress.go b/vendor/github.com/jcmturner/gokrb5/v8/types/HostAddress.go
new file mode 100644
index 0000000..895fe80
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/types/HostAddress.go
@@ -0,0 +1,180 @@
+package types
+
+// Reference: https://www.ietf.org/rfc/rfc4120.txt
+// Section: 5.2.5
+
+import (
+	"bytes"
+	"fmt"
+	"net"
+
+	"github.com/jcmturner/gofork/encoding/asn1"
+	"github.com/jcmturner/gokrb5/v8/iana/addrtype"
+)
+
+// HostAddresses implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.5
+type HostAddresses []HostAddress
+
+// HostAddress implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.5
+type HostAddress struct {
+	AddrType int32  `asn1:"explicit,tag:0"`
+	Address  []byte `asn1:"explicit,tag:1"`
+}
+
+// GetHostAddress returns a HostAddress struct from a string in the format <hostname>:<port>
+func GetHostAddress(s string) (HostAddress, error) {
+	var h HostAddress
+	cAddr, _, err := net.SplitHostPort(s)
+	if err != nil {
+		return h, fmt.Errorf("invalid format of client address: %v", err)
+	}
+	ip := net.ParseIP(cAddr)
+	var ht int32
+	if ip.To4() != nil {
+		ht = addrtype.IPv4
+		ip = ip.To4()
+	} else if ip.To16() != nil {
+		ht = addrtype.IPv6
+		ip = ip.To16()
+	} else {
+		return h, fmt.Errorf("could not determine client's address types: %v", err)
+	}
+	h = HostAddress{
+		AddrType: ht,
+		Address:  ip,
+	}
+	return h, nil
+}
+
+// GetAddress returns a string representation of the HostAddress.
+func (h *HostAddress) GetAddress() (string, error) {
+	var b []byte
+	_, err := asn1.Unmarshal(h.Address, &b)
+	return string(b), err
+}
+
+// LocalHostAddresses returns a HostAddresses struct for the local machines interface IP addresses.
+func LocalHostAddresses() (ha HostAddresses, err error) {
+	ifs, err := net.Interfaces()
+	if err != nil {
+		return
+	}
+	for _, iface := range ifs {
+		if iface.Flags&net.FlagLoopback != 0 || iface.Flags&net.FlagUp == 0 {
+			// Interface is either loopback of not up
+			continue
+		}
+		addrs, err := iface.Addrs()
+		if err != nil {
+			continue
+		}
+		for _, addr := range addrs {
+			var ip net.IP
+			switch v := addr.(type) {
+			case *net.IPNet:
+				ip = v.IP
+			case *net.IPAddr:
+				ip = v.IP
+			}
+			var a HostAddress
+			if ip.To16() == nil {
+				//neither IPv4 or IPv6
+				continue
+			}
+			if ip.To4() != nil {
+				//Is IPv4
+				a.AddrType = addrtype.IPv4
+				a.Address = ip.To4()
+			} else {
+				a.AddrType = addrtype.IPv6
+				a.Address = ip.To16()
+			}
+			ha = append(ha, a)
+		}
+	}
+	return ha, nil
+}
+
+// HostAddressesFromNetIPs returns a HostAddresses type from a slice of net.IP
+func HostAddressesFromNetIPs(ips []net.IP) (ha HostAddresses) {
+	for _, ip := range ips {
+		ha = append(ha, HostAddressFromNetIP(ip))
+	}
+	return ha
+}
+
+// HostAddressFromNetIP returns a HostAddress type from a net.IP
+func HostAddressFromNetIP(ip net.IP) HostAddress {
+	if ip.To4() != nil {
+		//Is IPv4
+		return HostAddress{
+			AddrType: addrtype.IPv4,
+			Address:  ip.To4(),
+		}
+	}
+	return HostAddress{
+		AddrType: addrtype.IPv6,
+		Address:  ip.To16(),
+	}
+}
+
+// HostAddressesEqual tests if two HostAddress slices are equal.
+func HostAddressesEqual(h, a []HostAddress) bool {
+	if len(h) != len(a) {
+		return false
+	}
+	for _, e := range a {
+		var found bool
+		for _, i := range h {
+			if e.Equal(i) {
+				found = true
+				break
+			}
+		}
+		if !found {
+			return false
+		}
+	}
+	return true
+}
+
+// HostAddressesContains tests if a HostAddress is contained in a HostAddress slice.
+func HostAddressesContains(h []HostAddress, a HostAddress) bool {
+	for _, e := range h {
+		if e.Equal(a) {
+			return true
+		}
+	}
+	return false
+}
+
+// Equal tests if the HostAddress is equal to another HostAddress provided.
+func (h *HostAddress) Equal(a HostAddress) bool {
+	if h.AddrType != a.AddrType {
+		return false
+	}
+	return bytes.Equal(h.Address, a.Address)
+}
+
+// Contains tests if a HostAddress is contained within the HostAddresses struct.
+func (h *HostAddresses) Contains(a HostAddress) bool {
+	for _, e := range *h {
+		if e.Equal(a) {
+			return true
+		}
+	}
+	return false
+}
+
+// Equal tests if a HostAddress slice is equal to the HostAddresses struct.
+func (h *HostAddresses) Equal(a []HostAddress) bool {
+	if len(*h) != len(a) {
+		return false
+	}
+	for _, e := range a {
+		if !h.Contains(e) {
+			return false
+		}
+	}
+	return true
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/types/KerberosFlags.go b/vendor/github.com/jcmturner/gokrb5/v8/types/KerberosFlags.go
new file mode 100644
index 0000000..0f20383
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/types/KerberosFlags.go
@@ -0,0 +1,68 @@
+package types
+
+// Reference: https://www.ietf.org/rfc/rfc4120.txt
+// Section: 5.2.8
+
+import (
+	"github.com/jcmturner/gofork/encoding/asn1"
+)
+
+// NewKrbFlags returns an ASN1 BitString struct of the right size for KrbFlags.
+func NewKrbFlags() asn1.BitString {
+	f := asn1.BitString{}
+	f.Bytes = make([]byte, 4)
+	f.BitLength = len(f.Bytes) * 8
+	return f
+}
+
+// SetFlags sets the flags of an ASN1 BitString.
+func SetFlags(f *asn1.BitString, j []int) {
+	for _, i := range j {
+		SetFlag(f, i)
+	}
+}
+
+// SetFlag sets a flag in an ASN1 BitString.
+func SetFlag(f *asn1.BitString, i int) {
+	for l := len(f.Bytes); l < 4; l++ {
+		(*f).Bytes = append((*f).Bytes, byte(0))
+		(*f).BitLength = len((*f).Bytes) * 8
+	}
+	//Which byte?
+	b := i / 8
+	//Which bit in byte
+	p := uint(7 - (i - 8*b))
+	(*f).Bytes[b] = (*f).Bytes[b] | (1 << p)
+}
+
+// UnsetFlags unsets flags in an ASN1 BitString.
+func UnsetFlags(f *asn1.BitString, j []int) {
+	for _, i := range j {
+		UnsetFlag(f, i)
+	}
+}
+
+// UnsetFlag unsets a flag in an ASN1 BitString.
+func UnsetFlag(f *asn1.BitString, i int) {
+	for l := len(f.Bytes); l < 4; l++ {
+		(*f).Bytes = append((*f).Bytes, byte(0))
+		(*f).BitLength = len((*f).Bytes) * 8
+	}
+	//Which byte?
+	b := i / 8
+	//Which bit in byte
+	p := uint(7 - (i - 8*b))
+	(*f).Bytes[b] = (*f).Bytes[b] &^ (1 << p)
+}
+
+// IsFlagSet tests if a flag is set in the ASN1 BitString.
+func IsFlagSet(f *asn1.BitString, i int) bool {
+	//Which byte?
+	b := i / 8
+	//Which bit in byte
+	p := uint(7 - (i - 8*b))
+	if (*f).Bytes[b]&(1<<p) != 0 {
+		return true
+	}
+	return false
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/types/PAData.go b/vendor/github.com/jcmturner/gokrb5/v8/types/PAData.go
new file mode 100644
index 0000000..41645ec
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/types/PAData.go
@@ -0,0 +1,155 @@
+package types
+
+// Reference: https://www.ietf.org/rfc/rfc4120.txt
+// Section: 5.2.7
+import (
+	"fmt"
+	"time"
+
+	"github.com/jcmturner/gofork/encoding/asn1"
+	"github.com/jcmturner/gokrb5/v8/iana/patype"
+)
+
+// PAData implements RFC 4120 types: https://tools.ietf.org/html/rfc4120#section-5.2.7
+type PAData struct {
+	PADataType  int32  `asn1:"explicit,tag:1"`
+	PADataValue []byte `asn1:"explicit,tag:2"`
+}
+
+// PADataSequence implements RFC 4120 types: https://tools.ietf.org/html/rfc4120#section-5.2.7
+type PADataSequence []PAData
+
+// MethodData implements RFC 4120 types: https://tools.ietf.org/html/rfc4120#section-5.9.1
+type MethodData []PAData
+
+// PAEncTimestamp implements RFC 4120 types: https://tools.ietf.org/html/rfc4120#section-5.2.7.2
+type PAEncTimestamp EncryptedData
+
+// PAEncTSEnc implements RFC 4120 types: https://tools.ietf.org/html/rfc4120#section-5.2.7.2
+type PAEncTSEnc struct {
+	PATimestamp time.Time `asn1:"generalized,explicit,tag:0"`
+	PAUSec      int       `asn1:"explicit,optional,tag:1"`
+}
+
+// Contains tests if a PADataSequence contains PA Data of a certain type.
+func (pas *PADataSequence) Contains(patype int32) bool {
+	for _, pa := range *pas {
+		if pa.PADataType == patype {
+			return true
+		}
+	}
+	return false
+}
+
+// GetPAEncTSEncAsnMarshalled returns the bytes of a PAEncTSEnc.
+func GetPAEncTSEncAsnMarshalled() ([]byte, error) {
+	t := time.Now().UTC()
+	p := PAEncTSEnc{
+		PATimestamp: t,
+		PAUSec:      int((t.UnixNano() / int64(time.Microsecond)) - (t.Unix() * 1e6)),
+	}
+	b, err := asn1.Marshal(p)
+	if err != nil {
+		return b, fmt.Errorf("error mashaling PAEncTSEnc: %v", err)
+	}
+	return b, nil
+}
+
+// ETypeInfoEntry implements RFC 4120 types: https://tools.ietf.org/html/rfc4120#section-5.2.7.4
+type ETypeInfoEntry struct {
+	EType int32  `asn1:"explicit,tag:0"`
+	Salt  []byte `asn1:"explicit,optional,tag:1"`
+}
+
+// ETypeInfo implements RFC 4120 types: https://tools.ietf.org/html/rfc4120#section-5.2.7.4
+type ETypeInfo []ETypeInfoEntry
+
+// ETypeInfo2Entry implements RFC 4120 types: https://tools.ietf.org/html/rfc4120#section-5.2.7.5
+type ETypeInfo2Entry struct {
+	EType     int32  `asn1:"explicit,tag:0"`
+	Salt      string `asn1:"explicit,optional,generalstring,tag:1"`
+	S2KParams []byte `asn1:"explicit,optional,tag:2"`
+}
+
+// ETypeInfo2 implements RFC 4120 types: https://tools.ietf.org/html/rfc4120#section-5.2.7.5
+type ETypeInfo2 []ETypeInfo2Entry
+
+// PAReqEncPARep PA Data Type
+type PAReqEncPARep struct {
+	ChksumType int32  `asn1:"explicit,tag:0"`
+	Chksum     []byte `asn1:"explicit,tag:1"`
+}
+
+// Unmarshal bytes into the PAData
+func (pa *PAData) Unmarshal(b []byte) error {
+	_, err := asn1.Unmarshal(b, pa)
+	return err
+}
+
+// Unmarshal bytes into the PADataSequence
+func (pas *PADataSequence) Unmarshal(b []byte) error {
+	_, err := asn1.Unmarshal(b, pas)
+	return err
+}
+
+// Unmarshal bytes into the PAReqEncPARep
+func (pa *PAReqEncPARep) Unmarshal(b []byte) error {
+	_, err := asn1.Unmarshal(b, pa)
+	return err
+}
+
+// Unmarshal bytes into the PAEncTimestamp
+func (pa *PAEncTimestamp) Unmarshal(b []byte) error {
+	_, err := asn1.Unmarshal(b, pa)
+	return err
+}
+
+// Unmarshal bytes into the PAEncTSEnc
+func (pa *PAEncTSEnc) Unmarshal(b []byte) error {
+	_, err := asn1.Unmarshal(b, pa)
+	return err
+}
+
+// Unmarshal bytes into the ETypeInfo
+func (a *ETypeInfo) Unmarshal(b []byte) error {
+	_, err := asn1.Unmarshal(b, a)
+	return err
+}
+
+// Unmarshal bytes into the ETypeInfoEntry
+func (a *ETypeInfoEntry) Unmarshal(b []byte) error {
+	_, err := asn1.Unmarshal(b, a)
+	return err
+}
+
+// Unmarshal bytes into the ETypeInfo2
+func (a *ETypeInfo2) Unmarshal(b []byte) error {
+	_, err := asn1.Unmarshal(b, a)
+	return err
+}
+
+// Unmarshal bytes into the ETypeInfo2Entry
+func (a *ETypeInfo2Entry) Unmarshal(b []byte) error {
+	_, err := asn1.Unmarshal(b, a)
+	return err
+}
+
+// GetETypeInfo returns an ETypeInfo from the PAData.
+func (pa *PAData) GetETypeInfo() (d ETypeInfo, err error) {
+	if pa.PADataType != patype.PA_ETYPE_INFO {
+		err = fmt.Errorf("PAData does not contain PA EType Info data. TypeID Expected: %v; Actual: %v", patype.PA_ETYPE_INFO, pa.PADataType)
+		return
+	}
+	_, err = asn1.Unmarshal(pa.PADataValue, &d)
+	return
+}
+
+// GetETypeInfo2 returns an ETypeInfo2 from the PAData.
+func (pa *PAData) GetETypeInfo2() (d ETypeInfo2, err error) {
+	if pa.PADataType != patype.PA_ETYPE_INFO2 {
+		err = fmt.Errorf("PAData does not contain PA EType Info 2 data. TypeID Expected: %v; Actual: %v", patype.PA_ETYPE_INFO2, pa.PADataType)
+		return
+	}
+	_, err = asn1.Unmarshal(pa.PADataValue, &d)
+	return
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/types/PrincipalName.go b/vendor/github.com/jcmturner/gokrb5/v8/types/PrincipalName.go
new file mode 100644
index 0000000..d48d2c1
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/types/PrincipalName.go
@@ -0,0 +1,67 @@
+package types
+
+import (
+	"strings"
+
+	"github.com/jcmturner/gokrb5/v8/iana/nametype"
+)
+
+// Reference: https://www.ietf.org/rfc/rfc4120.txt
+// Section: 5.2.2
+
+// PrincipalName implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.2
+type PrincipalName struct {
+	NameType   int32    `asn1:"explicit,tag:0"`
+	NameString []string `asn1:"generalstring,explicit,tag:1"`
+}
+
+// NewPrincipalName creates a new PrincipalName from the name type int32 and name string provided.
+func NewPrincipalName(ntype int32, spn string) PrincipalName {
+	return PrincipalName{
+		NameType:   ntype,
+		NameString: strings.Split(spn, "/"),
+	}
+}
+
+// GetSalt returns a salt derived from the PrincipalName.
+func (pn PrincipalName) GetSalt(realm string) string {
+	var sb []byte
+	sb = append(sb, realm...)
+	for _, n := range pn.NameString {
+		sb = append(sb, n...)
+	}
+	return string(sb)
+}
+
+// Equal tests if the PrincipalName is equal to the one provided.
+func (pn PrincipalName) Equal(n PrincipalName) bool {
+	if len(pn.NameString) != len(n.NameString) {
+		return false
+	}
+	//https://tools.ietf.org/html/rfc4120#section-6.2 - the name type is not significant when checking for equivalence
+	for i, s := range pn.NameString {
+		if n.NameString[i] != s {
+			return false
+		}
+	}
+	return true
+}
+
+// PrincipalNameString returns the PrincipalName in string form.
+func (pn PrincipalName) PrincipalNameString() string {
+	return strings.Join(pn.NameString, "/")
+}
+
+// ParseSPNString will parse a string in the format <service>/<name>@<realm>
+// a PrincipalName type will be returned with the name type set to KRB_NT_PRINCIPAL(1)
+// and the realm will be returned as a string. If the "@<realm>" suffix
+// is not included in the SPN then the value of realm string returned will be ""
+func ParseSPNString(spn string) (pn PrincipalName, realm string) {
+	if strings.Contains(spn, "@") {
+		s := strings.Split(spn, "@")
+		realm = s[len(s)-1]
+		spn = strings.TrimSuffix(spn, "@"+realm)
+	}
+	pn = NewPrincipalName(nametype.KRB_NT_PRINCIPAL, spn)
+	return
+}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/types/TypedData.go b/vendor/github.com/jcmturner/gokrb5/v8/types/TypedData.go
new file mode 100644
index 0000000..19e9f49
--- /dev/null
+++ b/vendor/github.com/jcmturner/gokrb5/v8/types/TypedData.go
@@ -0,0 +1,18 @@
+package types
+
+import "github.com/jcmturner/gofork/encoding/asn1"
+
+// TypedData implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.9.1
+type TypedData struct {
+	DataType  int32  `asn1:"explicit,tag:0"`
+	DataValue []byte `asn1:"optional,explicit,tag:1"`
+}
+
+// TypedDataSequence implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.9.1
+type TypedDataSequence []TypedData
+
+// Unmarshal bytes into the TypedDataSequence.
+func (a *TypedDataSequence) Unmarshal(b []byte) error {
+	_, err := asn1.Unmarshal(b, a)
+	return err
+}
diff --git a/vendor/github.com/jcmturner/rpc/v2/LICENSE b/vendor/github.com/jcmturner/rpc/v2/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/jcmturner/rpc/v2/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/jcmturner/rpc/v2/mstypes/claims.go b/vendor/github.com/jcmturner/rpc/v2/mstypes/claims.go
new file mode 100644
index 0000000..b9f535f
--- /dev/null
+++ b/vendor/github.com/jcmturner/rpc/v2/mstypes/claims.go
@@ -0,0 +1,152 @@
+package mstypes
+
+import (
+	"bytes"
+	"encoding/hex"
+	"errors"
+	"fmt"
+
+	"github.com/jcmturner/rpc/v2/ndr"
+	"golang.org/x/net/http2/hpack"
+)
+
+// Compression format assigned numbers. https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-xca/a8b7cb0a-92a6-4187-a23b-5e14273b96f8
+const (
+	CompressionFormatNone       uint16 = 0
+	CompressionFormatLZNT1      uint16 = 2 // LZNT1 aka ntfs compression
+	CompressionFormatXPress     uint16 = 3 // plain LZ77
+	CompressionFormatXPressHuff uint16 = 4 // LZ77+Huffman - The Huffman variant of the XPRESS compression format uses LZ77-style dictionary compression combined with Huffman coding.
+)
+
+// ClaimsSourceTypeAD https://msdn.microsoft.com/en-us/library/hh553809.aspx
+const ClaimsSourceTypeAD uint16 = 1
+
+// Claim Type assigned numbers
+const (
+	ClaimTypeIDInt64    uint16 = 1
+	ClaimTypeIDUInt64   uint16 = 2
+	ClaimTypeIDString   uint16 = 3
+	ClaimsTypeIDBoolean uint16 = 6
+)
+
+// ClaimsBlob implements https://msdn.microsoft.com/en-us/library/hh554119.aspx
+type ClaimsBlob struct {
+	Size        uint32
+	EncodedBlob EncodedBlob
+}
+
+// EncodedBlob are the bytes of the encoded Claims
+type EncodedBlob []byte
+
+// Size returns the size of the bytes of the encoded Claims
+func (b EncodedBlob) Size(c interface{}) int {
+	cb := c.(ClaimsBlob)
+	return int(cb.Size)
+}
+
+// ClaimsSetMetadata implements https://msdn.microsoft.com/en-us/library/hh554073.aspx
+type ClaimsSetMetadata struct {
+	ClaimsSetSize             uint32
+	ClaimsSetBytes            []byte `ndr:"pointer,conformant"`
+	CompressionFormat         uint16 // Enum see constants for options
+	UncompressedClaimsSetSize uint32
+	ReservedType              uint16
+	ReservedFieldSize         uint32
+	ReservedField             []byte `ndr:"pointer,conformant"`
+}
+
+// ClaimsSet reads the ClaimsSet type from the NDR encoded ClaimsSetBytes in the ClaimsSetMetadata
+func (m *ClaimsSetMetadata) ClaimsSet() (c ClaimsSet, err error) {
+	if len(m.ClaimsSetBytes) < 1 {
+		err = errors.New("no bytes available for ClaimsSet")
+		return
+	}
+	// TODO switch statement to decompress ClaimsSetBytes
+	switch m.CompressionFormat {
+	case CompressionFormatLZNT1:
+		s := hex.EncodeToString(m.ClaimsSetBytes)
+		err = fmt.Errorf("ClaimsSet compressed, format LZNT1 not currently supported: %s", s)
+		return
+	case CompressionFormatXPress:
+		s := hex.EncodeToString(m.ClaimsSetBytes)
+		err = fmt.Errorf("ClaimsSet compressed, format XPress not currently supported: %s", s)
+		return
+	case CompressionFormatXPressHuff:
+		var b []byte
+		buff := bytes.NewBuffer(b)
+		_, e := hpack.HuffmanDecode(buff, m.ClaimsSetBytes)
+		if e != nil {
+			err = fmt.Errorf("error deflating: %v", e)
+			return
+		}
+		m.ClaimsSetBytes = buff.Bytes()
+	}
+	dec := ndr.NewDecoder(bytes.NewReader(m.ClaimsSetBytes))
+	err = dec.Decode(&c)
+	return
+}
+
+// ClaimsSet implements https://msdn.microsoft.com/en-us/library/hh554122.aspx
+type ClaimsSet struct {
+	ClaimsArrayCount  uint32
+	ClaimsArrays      []ClaimsArray `ndr:"pointer,conformant"`
+	ReservedType      uint16
+	ReservedFieldSize uint32
+	ReservedField     []byte `ndr:"pointer,conformant"`
+}
+
+// ClaimsArray implements https://msdn.microsoft.com/en-us/library/hh536458.aspx
+type ClaimsArray struct {
+	ClaimsSourceType uint16
+	ClaimsCount      uint32
+	ClaimEntries     []ClaimEntry `ndr:"pointer,conformant"`
+}
+
+// ClaimEntry is a NDR union that implements https://msdn.microsoft.com/en-us/library/hh536374.aspx
+type ClaimEntry struct {
+	ID         string           `ndr:"pointer,conformant,varying"`
+	Type       uint16           `ndr:"unionTag"`
+	TypeInt64  ClaimTypeInt64   `ndr:"unionField"`
+	TypeUInt64 ClaimTypeUInt64  `ndr:"unionField"`
+	TypeString ClaimTypeString  `ndr:"unionField"`
+	TypeBool   ClaimTypeBoolean `ndr:"unionField"`
+}
+
+// SwitchFunc is the ClaimEntry union field selection function
+func (u ClaimEntry) SwitchFunc(_ interface{}) string {
+	switch u.Type {
+	case ClaimTypeIDInt64:
+		return "TypeInt64"
+	case ClaimTypeIDUInt64:
+		return "TypeUInt64"
+	case ClaimTypeIDString:
+		return "TypeString"
+	case ClaimsTypeIDBoolean:
+		return "TypeBool"
+	}
+	return ""
+}
+
+// ClaimTypeInt64 is a claim of type int64
+type ClaimTypeInt64 struct {
+	ValueCount uint32
+	Value      []int64 `ndr:"pointer,conformant"`
+}
+
+// ClaimTypeUInt64 is a claim of type uint64
+type ClaimTypeUInt64 struct {
+	ValueCount uint32
+	Value      []uint64 `ndr:"pointer,conformant"`
+}
+
+// ClaimTypeString is a claim of type string
+type ClaimTypeString struct {
+	ValueCount uint32
+	Value      []LPWSTR `ndr:"pointer,conformant"`
+}
+
+// ClaimTypeBoolean is a claim of type bool
+type ClaimTypeBoolean struct {
+	ValueCount uint32
+	Value      []bool `ndr:"pointer,conformant"`
+}
diff --git a/vendor/github.com/jcmturner/rpc/v2/mstypes/common.go b/vendor/github.com/jcmturner/rpc/v2/mstypes/common.go
new file mode 100644
index 0000000..fb6510d
--- /dev/null
+++ b/vendor/github.com/jcmturner/rpc/v2/mstypes/common.go
@@ -0,0 +1,12 @@
+// Package mstypes provides implemnations of some Microsoft data types [MS-DTYP] https://msdn.microsoft.com/en-us/library/cc230283.aspx
+package mstypes
+
+// LPWSTR implements https://msdn.microsoft.com/en-us/library/cc230355.aspx
+type LPWSTR struct {
+	Value string `ndr:"pointer,conformant,varying"`
+}
+
+// String returns the string representation of LPWSTR data type.
+func (s *LPWSTR) String() string {
+	return s.Value
+}
diff --git a/vendor/github.com/jcmturner/rpc/v2/mstypes/filetime.go b/vendor/github.com/jcmturner/rpc/v2/mstypes/filetime.go
new file mode 100644
index 0000000..5cc952f
--- /dev/null
+++ b/vendor/github.com/jcmturner/rpc/v2/mstypes/filetime.go
@@ -0,0 +1,52 @@
+// Package mstypes implements representations of Microsoft types
+package mstypes
+
+import (
+	"time"
+)
+
+/*
+FILETIME is a windows data structure.
+Ref: https://msdn.microsoft.com/en-us/library/windows/desktop/ms724284%28v=vs.85%29.aspx
+It contains two parts that are 32bit integers:
+	dwLowDateTime
+	dwHighDateTime
+We need to combine these two into one 64bit integer.
+This gives the number of 100 nano second period from January 1, 1601, Coordinated Universal Time (UTC)
+*/
+
+const unixEpochDiff = 116444736000000000
+
+// FileTime implements the Microsoft FILETIME type https://msdn.microsoft.com/en-us/library/cc230324.aspx
+type FileTime struct {
+	LowDateTime  uint32
+	HighDateTime uint32
+}
+
+// Time return a golang Time type from the FileTime
+func (ft FileTime) Time() time.Time {
+	ns := (ft.MSEpoch() - unixEpochDiff) * 100
+	return time.Unix(0, int64(ns)).UTC()
+}
+
+// MSEpoch returns the FileTime as a Microsoft epoch, the number of 100 nano second periods elapsed from January 1, 1601 UTC.
+func (ft FileTime) MSEpoch() int64 {
+	return (int64(ft.HighDateTime) << 32) + int64(ft.LowDateTime)
+}
+
+// Unix returns the FileTime as a Unix time, the number of seconds elapsed since January 1, 1970 UTC.
+func (ft FileTime) Unix() int64 {
+	return (ft.MSEpoch() - unixEpochDiff) / 10000000
+}
+
+// GetFileTime returns a FileTime type from the provided Golang Time type.
+func GetFileTime(t time.Time) FileTime {
+	ns := t.UnixNano()
+	fp := (ns / 100) + unixEpochDiff
+	hd := fp >> 32
+	ld := fp - (hd << 32)
+	return FileTime{
+		LowDateTime:  uint32(ld),
+		HighDateTime: uint32(hd),
+	}
+}
diff --git a/vendor/github.com/jcmturner/rpc/v2/mstypes/group_membership.go b/vendor/github.com/jcmturner/rpc/v2/mstypes/group_membership.go
new file mode 100644
index 0000000..7915137
--- /dev/null
+++ b/vendor/github.com/jcmturner/rpc/v2/mstypes/group_membership.go
@@ -0,0 +1,19 @@
+package mstypes
+
+// GroupMembership implements https://msdn.microsoft.com/en-us/library/cc237945.aspx
+// RelativeID : A 32-bit unsigned integer that contains the RID of a particular group.
+// The possible values for the Attributes flags are identical to those specified in KERB_SID_AND_ATTRIBUTES
+type GroupMembership struct {
+	RelativeID uint32
+	Attributes uint32
+}
+
+// DomainGroupMembership implements https://msdn.microsoft.com/en-us/library/hh536344.aspx
+// DomainId: A SID structure that contains the SID for the domain.This member is used in conjunction with the GroupIds members to create group SIDs for the device.
+// GroupCount: A 32-bit unsigned integer that contains the number of groups within the domain to which the account belongs.
+// GroupIds: A pointer to a list of GROUP_MEMBERSHIP structures that contain the groups to which the account belongs in the domain. The number of groups in this list MUST be equal to GroupCount.
+type DomainGroupMembership struct {
+	DomainID   RPCSID `ndr:"pointer"`
+	GroupCount uint32
+	GroupIDs   []GroupMembership `ndr:"pointer,conformant"` // Size is value of GroupCount
+}
diff --git a/vendor/github.com/jcmturner/rpc/v2/mstypes/kerb_sid_and_attributes.go b/vendor/github.com/jcmturner/rpc/v2/mstypes/kerb_sid_and_attributes.go
new file mode 100644
index 0000000..61ac39b
--- /dev/null
+++ b/vendor/github.com/jcmturner/rpc/v2/mstypes/kerb_sid_and_attributes.go
@@ -0,0 +1,23 @@
+package mstypes
+
+// Attributes of a security group membership and can be combined by using the bitwise OR operation.
+// They are used by an access check mechanism to specify whether the membership is to be used in an access check decision.
+const (
+	SEGroupMandatory        = 31
+	SEGroupEnabledByDefault = 30
+	SEGroupEnabled          = 29
+	SEGroupOwner            = 28
+	SEGroupResource         = 2
+	//All other bits MUST be set to zero and MUST be  ignored on receipt.
+)
+
+// KerbSidAndAttributes implements https://msdn.microsoft.com/en-us/library/cc237947.aspx
+type KerbSidAndAttributes struct {
+	SID        RPCSID `ndr:"pointer"` // A pointer to an RPC_SID structure.
+	Attributes uint32
+}
+
+// SetFlag sets a flag in a uint32 attribute value.
+func SetFlag(a *uint32, i uint) {
+	*a = *a | (1 << (31 - i))
+}
diff --git a/vendor/github.com/jcmturner/rpc/v2/mstypes/reader.go b/vendor/github.com/jcmturner/rpc/v2/mstypes/reader.go
new file mode 100644
index 0000000..24495bc
--- /dev/null
+++ b/vendor/github.com/jcmturner/rpc/v2/mstypes/reader.go
@@ -0,0 +1,109 @@
+package mstypes
+
+import (
+	"bufio"
+	"encoding/binary"
+	"fmt"
+	"io"
+)
+
+// Byte sizes of primitive types
+const (
+	SizeBool   = 1
+	SizeChar   = 1
+	SizeUint8  = 1
+	SizeUint16 = 2
+	SizeUint32 = 4
+	SizeUint64 = 8
+	SizeEnum   = 2
+	SizeSingle = 4
+	SizeDouble = 8
+	SizePtr    = 4
+)
+
+// Reader reads simple byte stream data into a Go representations
+type Reader struct {
+	r *bufio.Reader // source of the data
+}
+
+// NewReader creates a new instance of a simple Reader.
+func NewReader(r io.Reader) *Reader {
+	reader := new(Reader)
+	reader.r = bufio.NewReader(r)
+	return reader
+}
+
+func (r *Reader) Read(p []byte) (n int, err error) {
+	return r.r.Read(p)
+}
+
+func (r *Reader) Uint8() (uint8, error) {
+	b, err := r.r.ReadByte()
+	if err != nil {
+		return uint8(0), err
+	}
+	return uint8(b), nil
+}
+
+func (r *Reader) Uint16() (uint16, error) {
+	b, err := r.ReadBytes(SizeUint16)
+	if err != nil {
+		return uint16(0), err
+	}
+	return binary.LittleEndian.Uint16(b), nil
+}
+
+func (r *Reader) Uint32() (uint32, error) {
+	b, err := r.ReadBytes(SizeUint32)
+	if err != nil {
+		return uint32(0), err
+	}
+	return binary.LittleEndian.Uint32(b), nil
+}
+
+func (r *Reader) Uint64() (uint64, error) {
+	b, err := r.ReadBytes(SizeUint64)
+	if err != nil {
+		return uint64(0), err
+	}
+	return binary.LittleEndian.Uint64(b), nil
+}
+
+func (r *Reader) FileTime() (f FileTime, err error) {
+	f.LowDateTime, err = r.Uint32()
+	if err != nil {
+		return
+	}
+	f.HighDateTime, err = r.Uint32()
+	if err != nil {
+		return
+	}
+	return
+}
+
+// UTF16String returns a string that is UTF16 encoded in a byte slice. n is the number of bytes representing the string
+func (r *Reader) UTF16String(n int) (str string, err error) {
+	//Length divided by 2 as each run is 16bits = 2bytes
+	s := make([]rune, n/2, n/2)
+	for i := 0; i < len(s); i++ {
+		var u uint16
+		u, err = r.Uint16()
+		if err != nil {
+			return
+		}
+		s[i] = rune(u)
+	}
+	str = string(s)
+	return
+}
+
+// readBytes returns a number of bytes from the NDR byte stream.
+func (r *Reader) ReadBytes(n int) ([]byte, error) {
+	//TODO make this take an int64 as input to allow for larger values on all systems?
+	b := make([]byte, n, n)
+	m, err := r.r.Read(b)
+	if err != nil || m != n {
+		return b, fmt.Errorf("error reading bytes from stream: %v", err)
+	}
+	return b, nil
+}
diff --git a/vendor/github.com/jcmturner/rpc/v2/mstypes/rpc_unicode_string.go b/vendor/github.com/jcmturner/rpc/v2/mstypes/rpc_unicode_string.go
new file mode 100644
index 0000000..4bf02e0
--- /dev/null
+++ b/vendor/github.com/jcmturner/rpc/v2/mstypes/rpc_unicode_string.go
@@ -0,0 +1,13 @@
+package mstypes
+
+// RPCUnicodeString implements https://msdn.microsoft.com/en-us/library/cc230365.aspx
+type RPCUnicodeString struct {
+	Length        uint16 // The length, in bytes, of the string pointed to by the Buffer member, not including the terminating null character if any. The length MUST be a multiple of 2. The length SHOULD equal the entire size of the Buffer, in which case there is no terminating null character. Any method that accesses this structure MUST use the Length specified instead of relying on the presence or absence of a null character.
+	MaximumLength uint16 // The maximum size, in bytes, of the string pointed to by Buffer. The size MUST be a multiple of 2. If not, the size MUST be decremented by 1 prior to use. This value MUST not be less than Length.
+	Value         string `ndr:"pointer,conformant,varying"`
+}
+
+// String returns the RPCUnicodeString string value
+func (r *RPCUnicodeString) String() string {
+	return r.Value
+}
diff --git a/vendor/github.com/jcmturner/rpc/v2/mstypes/sid.go b/vendor/github.com/jcmturner/rpc/v2/mstypes/sid.go
new file mode 100644
index 0000000..8e34705
--- /dev/null
+++ b/vendor/github.com/jcmturner/rpc/v2/mstypes/sid.go
@@ -0,0 +1,36 @@
+package mstypes
+
+import (
+	"encoding/binary"
+	"encoding/hex"
+	"fmt"
+	"math"
+	"strings"
+)
+
+// RPCSID implements https://msdn.microsoft.com/en-us/library/cc230364.aspx
+type RPCSID struct {
+	Revision            uint8    // An 8-bit unsigned integer that specifies the revision level of the SID. This value MUST be set to 0x01.
+	SubAuthorityCount   uint8    // An 8-bit unsigned integer that specifies the number of elements in the SubAuthority array. The maximum number of elements allowed is 15.
+	IdentifierAuthority [6]byte  // An RPC_SID_IDENTIFIER_AUTHORITY structure that indicates the authority under which the SID was created. It describes the entity that created the SID. The Identifier Authority value {0,0,0,0,0,5} denotes SIDs created by the NT SID authority.
+	SubAuthority        []uint32 `ndr:"conformant"` // A variable length array of unsigned 32-bit integers that uniquely identifies a principal relative to the IdentifierAuthority. Its length is determined by SubAuthorityCount.
+}
+
+// String returns the string representation of the RPC_SID.
+func (s *RPCSID) String() string {
+	var strb strings.Builder
+	strb.WriteString("S-1-")
+
+	b := append(make([]byte, 2, 2), s.IdentifierAuthority[:]...)
+	// For a strange reason this is read big endian: https://msdn.microsoft.com/en-us/library/dd302645.aspx
+	i := binary.BigEndian.Uint64(b)
+	if i > math.MaxUint32 {
+		fmt.Fprintf(&strb, "0x%s", hex.EncodeToString(s.IdentifierAuthority[:]))
+	} else {
+		fmt.Fprintf(&strb, "%d", i)
+	}
+	for _, sub := range s.SubAuthority {
+		fmt.Fprintf(&strb, "-%d", sub)
+	}
+	return strb.String()
+}
diff --git a/vendor/github.com/jcmturner/rpc/v2/mstypes/user_session_key.go b/vendor/github.com/jcmturner/rpc/v2/mstypes/user_session_key.go
new file mode 100644
index 0000000..fcf0a5d
--- /dev/null
+++ b/vendor/github.com/jcmturner/rpc/v2/mstypes/user_session_key.go
@@ -0,0 +1,11 @@
+package mstypes
+
+// CypherBlock implements https://msdn.microsoft.com/en-us/library/cc237040.aspx
+type CypherBlock struct {
+	Data [8]byte // size = 8
+}
+
+// UserSessionKey implements https://msdn.microsoft.com/en-us/library/cc237080.aspx
+type UserSessionKey struct {
+	CypherBlock [2]CypherBlock // size = 2
+}
diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/arrays.go b/vendor/github.com/jcmturner/rpc/v2/ndr/arrays.go
new file mode 100644
index 0000000..5e2def2
--- /dev/null
+++ b/vendor/github.com/jcmturner/rpc/v2/ndr/arrays.go
@@ -0,0 +1,413 @@
+package ndr
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+)
+
+// intFromTag returns an int that is a value in a struct tag key/value pair
+func intFromTag(tag reflect.StructTag, key string) (int, error) {
+	ndrTag := parseTags(tag)
+	d := 1
+	if n, ok := ndrTag.Map[key]; ok {
+		i, err := strconv.Atoi(n)
+		if err != nil {
+			return d, fmt.Errorf("invalid dimensions tag [%s]: %v", n, err)
+		}
+		d = i
+	}
+	return d, nil
+}
+
+// parseDimensions returns the a slice of the size of each dimension and type of the member at the deepest level.
+func parseDimensions(v reflect.Value) (l []int, tb reflect.Type) {
+	if v.Kind() == reflect.Ptr {
+		v = v.Elem()
+	}
+	t := v.Type()
+	if t.Kind() == reflect.Ptr {
+		t = t.Elem()
+	}
+	if t.Kind() != reflect.Array && t.Kind() != reflect.Slice {
+		return
+	}
+	l = append(l, v.Len())
+	if t.Elem().Kind() == reflect.Array || t.Elem().Kind() == reflect.Slice {
+		// contains array or slice
+		var m []int
+		m, tb = parseDimensions(v.Index(0))
+		l = append(l, m...)
+	} else {
+		tb = t.Elem()
+	}
+	return
+}
+
+// sliceDimensions returns the count of dimensions a slice has.
+func sliceDimensions(t reflect.Type) (d int, tb reflect.Type) {
+	if t.Kind() == reflect.Ptr {
+		t = t.Elem()
+	}
+	if t.Kind() == reflect.Slice {
+		d++
+		var n int
+		n, tb = sliceDimensions(t.Elem())
+		d += n
+	} else {
+		tb = t
+	}
+	return
+}
+
+// makeSubSlices is a deep recursive creation/initialisation of multi-dimensional slices.
+// Takes the reflect.Value of the 1st dimension and a slice of the lengths of the sub dimensions
+func makeSubSlices(v reflect.Value, l []int) {
+	ty := v.Type().Elem()
+	if ty.Kind() != reflect.Slice {
+		return
+	}
+	for i := 0; i < v.Len(); i++ {
+		s := reflect.MakeSlice(ty, l[0], l[0])
+		v.Index(i).Set(s)
+		// Are there more sub dimensions?
+		if len(l) > 1 {
+			makeSubSlices(v.Index(i), l[1:])
+		}
+	}
+	return
+}
+
+// multiDimensionalIndexPermutations returns all the permutations of the indexes of a multi-dimensional slice.
+// The input is a slice of integers that indicates the max size/length of each dimension
+func multiDimensionalIndexPermutations(l []int) (ps [][]int) {
+	z := make([]int, len(l), len(l)) // The zeros permutation
+	ps = append(ps, z)
+	// for each dimension, in reverse
+	for i := len(l) - 1; i >= 0; i-- {
+		ws := make([][]int, len(ps))
+		copy(ws, ps)
+		//create a permutation for each of the iterations of the current dimension
+		for j := 1; j <= l[i]-1; j++ {
+			// For each existing permutation
+			for _, p := range ws {
+				np := make([]int, len(p), len(p))
+				copy(np, p)
+				np[i] = j
+				ps = append(ps, np)
+			}
+		}
+	}
+	return
+}
+
+// precedingMax reads off the next conformant max value
+func (dec *Decoder) precedingMax() uint32 {
+	m := dec.conformantMax[0]
+	dec.conformantMax = dec.conformantMax[1:]
+	return m
+}
+
+// fillFixedArray establishes if the fixed array is uni or multi dimensional and then fills it.
+func (dec *Decoder) fillFixedArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error {
+	l, t := parseDimensions(v)
+	if t.Kind() == reflect.String {
+		tag = reflect.StructTag(subStringArrayTag)
+	}
+	if len(l) < 1 {
+		return errors.New("could not establish dimensions of fixed array")
+	}
+	if len(l) == 1 {
+		err := dec.fillUniDimensionalFixedArray(v, tag, def)
+		if err != nil {
+			return fmt.Errorf("could not fill uni-dimensional fixed array: %v", err)
+		}
+		return nil
+	}
+	// Fixed array is multidimensional
+	ps := multiDimensionalIndexPermutations(l[:len(l)-1])
+	for _, p := range ps {
+		// Get current multi-dimensional index to fill
+		a := v
+		for _, i := range p {
+			a = a.Index(i)
+		}
+		// fill with the last dimension array
+		err := dec.fillUniDimensionalFixedArray(a, tag, def)
+		if err != nil {
+			return fmt.Errorf("could not fill dimension %v of multi-dimensional fixed array: %v", p, err)
+		}
+	}
+	return nil
+}
+
+// readUniDimensionalFixedArray reads an array (not slice) from the byte stream.
+func (dec *Decoder) fillUniDimensionalFixedArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error {
+	for i := 0; i < v.Len(); i++ {
+		err := dec.fill(v.Index(i), tag, def)
+		if err != nil {
+			return fmt.Errorf("could not fill index %d of fixed array: %v", i, err)
+		}
+	}
+	return nil
+}
+
+// fillConformantArray establishes if the conformant array is uni or multi dimensional and then fills the slice.
+func (dec *Decoder) fillConformantArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error {
+	d, _ := sliceDimensions(v.Type())
+	if d > 1 {
+		err := dec.fillMultiDimensionalConformantArray(v, d, tag, def)
+		if err != nil {
+			return err
+		}
+	} else {
+		err := dec.fillUniDimensionalConformantArray(v, tag, def)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// fillUniDimensionalConformantArray fills the uni-dimensional slice value.
+func (dec *Decoder) fillUniDimensionalConformantArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error {
+	m := dec.precedingMax()
+	n := int(m)
+	a := reflect.MakeSlice(v.Type(), n, n)
+	for i := 0; i < n; i++ {
+		err := dec.fill(a.Index(i), tag, def)
+		if err != nil {
+			return fmt.Errorf("could not fill index %d of uni-dimensional conformant array: %v", i, err)
+		}
+	}
+	v.Set(a)
+	return nil
+}
+
+// fillMultiDimensionalConformantArray fills the multi-dimensional slice value provided from conformant array data.
+// The number of dimensions must be specified. This must be less than or equal to the dimensions in the slice for this
+// method not to panic.
+func (dec *Decoder) fillMultiDimensionalConformantArray(v reflect.Value, d int, tag reflect.StructTag, def *[]deferedPtr) error {
+	// Read the max size of each dimensions from the ndr stream
+	l := make([]int, d, d)
+	for i := range l {
+		l[i] = int(dec.precedingMax())
+	}
+	// Initialise size of slices
+	//   Initialise the size of the 1st dimension
+	ty := v.Type()
+	v.Set(reflect.MakeSlice(ty, l[0], l[0]))
+	// Initialise the size of the other dimensions recursively
+	makeSubSlices(v, l[1:])
+
+	// Get all permutations of the indexes and go through each and fill
+	ps := multiDimensionalIndexPermutations(l)
+	for _, p := range ps {
+		// Get current multi-dimensional index to fill
+		a := v
+		for _, i := range p {
+			a = a.Index(i)
+		}
+		err := dec.fill(a, tag, def)
+		if err != nil {
+			return fmt.Errorf("could not fill index %v of slice: %v", p, err)
+		}
+	}
+	return nil
+}
+
+// fillVaryingArray establishes if the varying array is uni or multi dimensional and then fills the slice.
+func (dec *Decoder) fillVaryingArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error {
+	d, t := sliceDimensions(v.Type())
+	if d > 1 {
+		err := dec.fillMultiDimensionalVaryingArray(v, t, d, tag, def)
+		if err != nil {
+			return err
+		}
+	} else {
+		err := dec.fillUniDimensionalVaryingArray(v, tag, def)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// fillUniDimensionalVaryingArray fills the uni-dimensional slice value.
+func (dec *Decoder) fillUniDimensionalVaryingArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error {
+	o, err := dec.readUint32()
+	if err != nil {
+		return fmt.Errorf("could not read offset of uni-dimensional varying array: %v", err)
+	}
+	s, err := dec.readUint32()
+	if err != nil {
+		return fmt.Errorf("could not establish actual count of uni-dimensional varying array: %v", err)
+	}
+	t := v.Type()
+	// Total size of the array is the offset in the index being passed plus the actual count of elements being passed.
+	n := int(s + o)
+	a := reflect.MakeSlice(t, n, n)
+	// Populate the array starting at the offset specified
+	for i := int(o); i < n; i++ {
+		err := dec.fill(a.Index(i), tag, def)
+		if err != nil {
+			return fmt.Errorf("could not fill index %d of uni-dimensional varying array: %v", i, err)
+		}
+	}
+	v.Set(a)
+	return nil
+}
+
+// fillMultiDimensionalVaryingArray fills the multi-dimensional slice value provided from varying array data.
+// The number of dimensions must be specified. This must be less than or equal to the dimensions in the slice for this
+// method not to panic.
+func (dec *Decoder) fillMultiDimensionalVaryingArray(v reflect.Value, t reflect.Type, d int, tag reflect.StructTag, def *[]deferedPtr) error {
+	// Read the offset and actual count of each dimensions from the ndr stream
+	o := make([]int, d, d)
+	l := make([]int, d, d)
+	for i := range l {
+		off, err := dec.readUint32()
+		if err != nil {
+			return fmt.Errorf("could not read offset of dimension %d: %v", i+1, err)
+		}
+		o[i] = int(off)
+		s, err := dec.readUint32()
+		if err != nil {
+			return fmt.Errorf("could not read size of dimension %d: %v", i+1, err)
+		}
+		l[i] = int(s) + int(off)
+	}
+	// Initialise size of slices
+	//   Initialise the size of the 1st dimension
+	ty := v.Type()
+	v.Set(reflect.MakeSlice(ty, l[0], l[0]))
+	// Initialise the size of the other dimensions recursively
+	makeSubSlices(v, l[1:])
+
+	// Get all permutations of the indexes and go through each and fill
+	ps := multiDimensionalIndexPermutations(l)
+	for _, p := range ps {
+		// Get current multi-dimensional index to fill
+		a := v
+		var os bool // should this permutation be skipped due to the offset of any of the dimensions?
+		for i, j := range p {
+			if j < o[i] {
+				os = true
+				break
+			}
+			a = a.Index(j)
+		}
+		if os {
+			// This permutation should be skipped as it is less than the offset for one of the dimensions.
+			continue
+		}
+		err := dec.fill(a, tag, def)
+		if err != nil {
+			return fmt.Errorf("could not fill index %v of slice: %v", p, err)
+		}
+	}
+	return nil
+}
+
+// fillConformantVaryingArray establishes if the varying array is uni or multi dimensional and then fills the slice.
+func (dec *Decoder) fillConformantVaryingArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error {
+	d, t := sliceDimensions(v.Type())
+	if d > 1 {
+		err := dec.fillMultiDimensionalConformantVaryingArray(v, t, d, tag, def)
+		if err != nil {
+			return err
+		}
+	} else {
+		err := dec.fillUniDimensionalConformantVaryingArray(v, tag, def)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// fillUniDimensionalConformantVaryingArray fills the uni-dimensional slice value.
+func (dec *Decoder) fillUniDimensionalConformantVaryingArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error {
+	m := dec.precedingMax()
+	o, err := dec.readUint32()
+	if err != nil {
+		return fmt.Errorf("could not read offset of uni-dimensional conformant varying array: %v", err)
+	}
+	s, err := dec.readUint32()
+	if err != nil {
+		return fmt.Errorf("could not establish actual count of uni-dimensional conformant varying array: %v", err)
+	}
+	if m < o+s {
+		return errors.New("max count is less than the offset plus actual count")
+	}
+	t := v.Type()
+	n := int(s)
+	a := reflect.MakeSlice(t, n, n)
+	for i := int(o); i < n; i++ {
+		err := dec.fill(a.Index(i), tag, def)
+		if err != nil {
+			return fmt.Errorf("could not fill index %d of uni-dimensional conformant varying array: %v", i, err)
+		}
+	}
+	v.Set(a)
+	return nil
+}
+
+// fillMultiDimensionalConformantVaryingArray fills the multi-dimensional slice value provided from conformant varying array data.
+// The number of dimensions must be specified. This must be less than or equal to the dimensions in the slice for this
+// method not to panic.
+func (dec *Decoder) fillMultiDimensionalConformantVaryingArray(v reflect.Value, t reflect.Type, d int, tag reflect.StructTag, def *[]deferedPtr) error {
+	// Read the offset and actual count of each dimensions from the ndr stream
+	m := make([]int, d, d)
+	for i := range m {
+		m[i] = int(dec.precedingMax())
+	}
+	o := make([]int, d, d)
+	l := make([]int, d, d)
+	for i := range l {
+		off, err := dec.readUint32()
+		if err != nil {
+			return fmt.Errorf("could not read offset of dimension %d: %v", i+1, err)
+		}
+		o[i] = int(off)
+		s, err := dec.readUint32()
+		if err != nil {
+			return fmt.Errorf("could not read actual count of dimension %d: %v", i+1, err)
+		}
+		if m[i] < int(s)+int(off) {
+			m[i] = int(s) + int(off)
+		}
+		l[i] = int(s)
+	}
+	// Initialise size of slices
+	//   Initialise the size of the 1st dimension
+	ty := v.Type()
+	v.Set(reflect.MakeSlice(ty, m[0], m[0]))
+	// Initialise the size of the other dimensions recursively
+	makeSubSlices(v, m[1:])
+
+	// Get all permutations of the indexes and go through each and fill
+	ps := multiDimensionalIndexPermutations(m)
+	for _, p := range ps {
+		// Get current multi-dimensional index to fill
+		a := v
+		var os bool // should this permutation be skipped due to the offset of any of the dimensions or max is higher than the actual count being passed
+		for i, j := range p {
+			if j < o[i] || j >= l[i] {
+				os = true
+				break
+			}
+			a = a.Index(j)
+		}
+		if os {
+			// This permutation should be skipped as it is less than the offset for one of the dimensions.
+			continue
+		}
+		err := dec.fill(a, tag, def)
+		if err != nil {
+			return fmt.Errorf("could not fill index %v of slice: %v", p, err)
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/decoder.go b/vendor/github.com/jcmturner/rpc/v2/ndr/decoder.go
new file mode 100644
index 0000000..6157b4e
--- /dev/null
+++ b/vendor/github.com/jcmturner/rpc/v2/ndr/decoder.go
@@ -0,0 +1,393 @@
+// Package ndr provides the ability to unmarshal NDR encoded byte steams into Go data structures
+package ndr
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"reflect"
+	"strings"
+)
+
+// Struct tag values
+const (
+	TagConformant = "conformant"
+	TagVarying    = "varying"
+	TagPointer    = "pointer"
+	TagPipe       = "pipe"
+)
+
+// Decoder unmarshals NDR byte stream data into a Go struct representation
+type Decoder struct {
+	r             *bufio.Reader // source of the data
+	size          int           // initial size of bytes in buffer
+	ch            CommonHeader  // NDR common header
+	ph            PrivateHeader // NDR private header
+	conformantMax []uint32      // conformant max values that were moved to the beginning of the structure
+	s             interface{}   // pointer to the structure being populated
+	current       []string      // keeps track of the current field being populated
+}
+
+type deferedPtr struct {
+	v   reflect.Value
+	tag reflect.StructTag
+}
+
+// NewDecoder creates a new instance of a NDR Decoder.
+func NewDecoder(r io.Reader) *Decoder {
+	dec := new(Decoder)
+	dec.r = bufio.NewReader(r)
+	dec.r.Peek(int(commonHeaderBytes)) // For some reason an operation is needed on the buffer to initialise it so Buffered() != 0
+	dec.size = dec.r.Buffered()
+	return dec
+}
+
+// Decode unmarshals the NDR encoded bytes into the pointer of a struct provided.
+func (dec *Decoder) Decode(s interface{}) error {
+	dec.s = s
+	err := dec.readCommonHeader()
+	if err != nil {
+		return err
+	}
+	err = dec.readPrivateHeader()
+	if err != nil {
+		return err
+	}
+	_, err = dec.r.Discard(4) //The next 4 bytes are an RPC unique pointer referent. We just skip these.
+	if err != nil {
+		return Errorf("unable to process byte stream: %v", err)
+	}
+
+	return dec.process(s, reflect.StructTag(""))
+}
+
+func (dec *Decoder) process(s interface{}, tag reflect.StructTag) error {
+	// Scan for conformant fields as their max counts are moved to the beginning
+	// http://pubs.opengroup.org/onlinepubs/9629399/chap14.htm#tagfcjh_37
+	err := dec.scanConformantArrays(s, tag)
+	if err != nil {
+		return err
+	}
+	// Recursively fill the struct fields
+	var localDef []deferedPtr
+	err = dec.fill(s, tag, &localDef)
+	if err != nil {
+		return Errorf("could not decode: %v", err)
+	}
+	// Read any deferred referents associated with pointers
+	for _, p := range localDef {
+		err = dec.process(p.v, p.tag)
+		if err != nil {
+			return fmt.Errorf("could not decode deferred referent: %v", err)
+		}
+	}
+	return nil
+}
+
+// scanConformantArrays scans the structure for embedded conformant fields and captures the maximum element counts for
+// dimensions of the array that are moved to the beginning of the structure.
+func (dec *Decoder) scanConformantArrays(s interface{}, tag reflect.StructTag) error {
+	err := dec.conformantScan(s, tag)
+	if err != nil {
+		return fmt.Errorf("failed to scan for embedded conformant arrays: %v", err)
+	}
+	for i := range dec.conformantMax {
+		dec.conformantMax[i], err = dec.readUint32()
+		if err != nil {
+			return fmt.Errorf("could not read preceding conformant max count index %d: %v", i, err)
+		}
+	}
+	return nil
+}
+
+// conformantScan inspects the structure's fields for whether they are conformant.
+func (dec *Decoder) conformantScan(s interface{}, tag reflect.StructTag) error {
+	ndrTag := parseTags(tag)
+	if ndrTag.HasValue(TagPointer) {
+		return nil
+	}
+	v := getReflectValue(s)
+	switch v.Kind() {
+	case reflect.Struct:
+		for i := 0; i < v.NumField(); i++ {
+			err := dec.conformantScan(v.Field(i), v.Type().Field(i).Tag)
+			if err != nil {
+				return err
+			}
+		}
+	case reflect.String:
+		if !ndrTag.HasValue(TagConformant) {
+			break
+		}
+		dec.conformantMax = append(dec.conformantMax, uint32(0))
+	case reflect.Slice:
+		if !ndrTag.HasValue(TagConformant) {
+			break
+		}
+		d, t := sliceDimensions(v.Type())
+		for i := 0; i < d; i++ {
+			dec.conformantMax = append(dec.conformantMax, uint32(0))
+		}
+		// For string arrays there is a common max for the strings within the array.
+		if t.Kind() == reflect.String {
+			dec.conformantMax = append(dec.conformantMax, uint32(0))
+		}
+	}
+	return nil
+}
+
+func (dec *Decoder) isPointer(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) (bool, error) {
+	// Pointer so defer filling the referent
+	ndrTag := parseTags(tag)
+	if ndrTag.HasValue(TagPointer) {
+		p, err := dec.readUint32()
+		if err != nil {
+			return true, fmt.Errorf("could not read pointer: %v", err)
+		}
+		ndrTag.delete(TagPointer)
+		if p != 0 {
+			// if pointer is not zero add to the deferred items at end of stream
+			*def = append(*def, deferedPtr{v, ndrTag.StructTag()})
+		}
+		return true, nil
+	}
+	return false, nil
+}
+
+func getReflectValue(s interface{}) (v reflect.Value) {
+	if r, ok := s.(reflect.Value); ok {
+		v = r
+	} else {
+		if reflect.ValueOf(s).Kind() == reflect.Ptr {
+			v = reflect.ValueOf(s).Elem()
+		}
+	}
+	return
+}
+
+// fill populates fields with values from the NDR byte stream.
+func (dec *Decoder) fill(s interface{}, tag reflect.StructTag, localDef *[]deferedPtr) error {
+	v := getReflectValue(s)
+
+	//// Pointer so defer filling the referent
+	ptr, err := dec.isPointer(v, tag, localDef)
+	if err != nil {
+		return fmt.Errorf("could not process struct field(%s): %v", strings.Join(dec.current, "/"), err)
+	}
+	if ptr {
+		return nil
+	}
+
+	// Populate the value from the byte stream
+	switch v.Kind() {
+	case reflect.Struct:
+		dec.current = append(dec.current, v.Type().Name()) //Track the current field being filled
+		// in case struct is a union, track this and the selected union field for efficiency
+		var unionTag reflect.Value
+		var unionField string // field to fill if struct is a union
+		// Go through each field in the struct and recursively fill
+		for i := 0; i < v.NumField(); i++ {
+			fieldName := v.Type().Field(i).Name
+			dec.current = append(dec.current, fieldName) //Track the current field being filled
+			//fmt.Fprintf(os.Stderr, "DEBUG Decoding: %s\n", strings.Join(dec.current, "/"))
+			structTag := v.Type().Field(i).Tag
+			ndrTag := parseTags(structTag)
+
+			// Union handling
+			if !unionTag.IsValid() {
+				// Is this field a union tag?
+				unionTag = dec.isUnion(v.Field(i), structTag)
+			} else {
+				// What is the selected field value of the union if we don't already know
+				if unionField == "" {
+					unionField, err = unionSelectedField(v, unionTag)
+					if err != nil {
+						return fmt.Errorf("could not determine selected union value field for %s with discriminat"+
+							" tag %s: %v", v.Type().Name(), unionTag, err)
+					}
+				}
+				if ndrTag.HasValue(TagUnionField) && fieldName != unionField {
+					// is a union and this field has not been selected so will skip it.
+					dec.current = dec.current[:len(dec.current)-1] //This field has been skipped so remove it from the current field tracker
+					continue
+				}
+			}
+
+			// Check if field is a pointer
+			if v.Field(i).Type().Implements(reflect.TypeOf(new(RawBytes)).Elem()) &&
+				v.Field(i).Type().Kind() == reflect.Slice && v.Field(i).Type().Elem().Kind() == reflect.Uint8 {
+				//field is for rawbytes
+				structTag, err = addSizeToTag(v, v.Field(i), structTag)
+				if err != nil {
+					return fmt.Errorf("could not get rawbytes field(%s) size: %v", strings.Join(dec.current, "/"), err)
+				}
+				ptr, err := dec.isPointer(v.Field(i), structTag, localDef)
+				if err != nil {
+					return fmt.Errorf("could not process struct field(%s): %v", strings.Join(dec.current, "/"), err)
+				}
+				if !ptr {
+					err := dec.readRawBytes(v.Field(i), structTag)
+					if err != nil {
+						return fmt.Errorf("could not fill raw bytes struct field(%s): %v", strings.Join(dec.current, "/"), err)
+					}
+				}
+			} else {
+				err := dec.fill(v.Field(i), structTag, localDef)
+				if err != nil {
+					return fmt.Errorf("could not fill struct field(%s): %v", strings.Join(dec.current, "/"), err)
+				}
+			}
+			dec.current = dec.current[:len(dec.current)-1] //This field has been filled so remove it from the current field tracker
+		}
+		dec.current = dec.current[:len(dec.current)-1] //This field has been filled so remove it from the current field tracker
+	case reflect.Bool:
+		i, err := dec.readBool()
+		if err != nil {
+			return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err)
+		}
+		v.Set(reflect.ValueOf(i))
+	case reflect.Uint8:
+		i, err := dec.readUint8()
+		if err != nil {
+			return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err)
+		}
+		v.Set(reflect.ValueOf(i))
+	case reflect.Uint16:
+		i, err := dec.readUint16()
+		if err != nil {
+			return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err)
+		}
+		v.Set(reflect.ValueOf(i))
+	case reflect.Uint32:
+		i, err := dec.readUint32()
+		if err != nil {
+			return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err)
+		}
+		v.Set(reflect.ValueOf(i))
+	case reflect.Uint64:
+		i, err := dec.readUint64()
+		if err != nil {
+			return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err)
+		}
+		v.Set(reflect.ValueOf(i))
+	case reflect.Int8:
+		i, err := dec.readInt8()
+		if err != nil {
+			return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err)
+		}
+		v.Set(reflect.ValueOf(i))
+	case reflect.Int16:
+		i, err := dec.readInt16()
+		if err != nil {
+			return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err)
+		}
+		v.Set(reflect.ValueOf(i))
+	case reflect.Int32:
+		i, err := dec.readInt32()
+		if err != nil {
+			return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err)
+		}
+		v.Set(reflect.ValueOf(i))
+	case reflect.Int64:
+		i, err := dec.readInt64()
+		if err != nil {
+			return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err)
+		}
+		v.Set(reflect.ValueOf(i))
+	case reflect.String:
+		ndrTag := parseTags(tag)
+		conformant := ndrTag.HasValue(TagConformant)
+		// strings are always varying so this is assumed without an explicit tag
+		var s string
+		var err error
+		if conformant {
+			s, err = dec.readConformantVaryingString(localDef)
+			if err != nil {
+				return fmt.Errorf("could not fill with conformant varying string: %v", err)
+			}
+		} else {
+			s, err = dec.readVaryingString(localDef)
+			if err != nil {
+				return fmt.Errorf("could not fill with varying string: %v", err)
+			}
+		}
+		v.Set(reflect.ValueOf(s))
+	case reflect.Float32:
+		i, err := dec.readFloat32()
+		if err != nil {
+			return fmt.Errorf("could not fill %v: %v", v.Type().Name(), err)
+		}
+		v.Set(reflect.ValueOf(i))
+	case reflect.Float64:
+		i, err := dec.readFloat64()
+		if err != nil {
+			return fmt.Errorf("could not fill %v: %v", v.Type().Name(), err)
+		}
+		v.Set(reflect.ValueOf(i))
+	case reflect.Array:
+		err := dec.fillFixedArray(v, tag, localDef)
+		if err != nil {
+			return err
+		}
+	case reflect.Slice:
+		if v.Type().Implements(reflect.TypeOf(new(RawBytes)).Elem()) && v.Type().Elem().Kind() == reflect.Uint8 {
+			//field is for rawbytes
+			err := dec.readRawBytes(v, tag)
+			if err != nil {
+				return fmt.Errorf("could not fill raw bytes struct field(%s): %v", strings.Join(dec.current, "/"), err)
+			}
+			break
+		}
+		ndrTag := parseTags(tag)
+		conformant := ndrTag.HasValue(TagConformant)
+		varying := ndrTag.HasValue(TagVarying)
+		if ndrTag.HasValue(TagPipe) {
+			err := dec.fillPipe(v, tag)
+			if err != nil {
+				return err
+			}
+			break
+		}
+		_, t := sliceDimensions(v.Type())
+		if t.Kind() == reflect.String && !ndrTag.HasValue(subStringArrayValue) {
+			// String array
+			err := dec.readStringsArray(v, tag, localDef)
+			if err != nil {
+				return err
+			}
+			break
+		}
+		// varying is assumed as fixed arrays use the Go array type rather than slice
+		if conformant && varying {
+			err := dec.fillConformantVaryingArray(v, tag, localDef)
+			if err != nil {
+				return err
+			}
+		} else if !conformant && varying {
+			err := dec.fillVaryingArray(v, tag, localDef)
+			if err != nil {
+				return err
+			}
+		} else {
+			//default to conformant and not varying
+			err := dec.fillConformantArray(v, tag, localDef)
+			if err != nil {
+				return err
+			}
+		}
+	default:
+		return fmt.Errorf("unsupported type")
+	}
+	return nil
+}
+
+// readBytes returns a number of bytes from the NDR byte stream.
+func (dec *Decoder) readBytes(n int) ([]byte, error) {
+	//TODO make this take an int64 as input to allow for larger values on all systems?
+	b := make([]byte, n, n)
+	m, err := dec.r.Read(b)
+	if err != nil || m != n {
+		return b, fmt.Errorf("error reading bytes from stream: %v", err)
+	}
+	return b, nil
+}
diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/error.go b/vendor/github.com/jcmturner/rpc/v2/ndr/error.go
new file mode 100644
index 0000000..9971194
--- /dev/null
+++ b/vendor/github.com/jcmturner/rpc/v2/ndr/error.go
@@ -0,0 +1,18 @@
+package ndr
+
+import "fmt"
+
+// Malformed implements the error interface for malformed NDR encoding errors.
+type Malformed struct {
+	EText string
+}
+
+// Error implements the error interface on the Malformed struct.
+func (e Malformed) Error() string {
+	return fmt.Sprintf("malformed NDR stream: %s", e.EText)
+}
+
+// Errorf formats an error message into a malformed NDR error.
+func Errorf(format string, a ...interface{}) Malformed {
+	return Malformed{EText: fmt.Sprintf(format, a...)}
+}
diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/header.go b/vendor/github.com/jcmturner/rpc/v2/ndr/header.go
new file mode 100644
index 0000000..1970ddb
--- /dev/null
+++ b/vendor/github.com/jcmturner/rpc/v2/ndr/header.go
@@ -0,0 +1,116 @@
+package ndr
+
+import (
+	"encoding/binary"
+	"fmt"
+)
+
+/*
+Serialization Version 1
+https://msdn.microsoft.com/en-us/library/cc243563.aspx
+
+Common Header - https://msdn.microsoft.com/en-us/library/cc243890.aspx
+8 bytes in total:
+- First byte - Version: Must equal 1
+- Second byte -  1st 4 bits: Endianess (0=Big; 1=Little); 2nd 4 bits: Character Encoding (0=ASCII; 1=EBCDIC)
+- 3rd - Floating point representation (This does not seem to be the case in examples for Microsoft test sources)
+- 4th - Common Header Length: Must equal 8
+- 5th - 8th - Filler: MUST be set to 0xcccccccc on marshaling, and SHOULD be ignored during unmarshaling.
+
+Private Header - https://msdn.microsoft.com/en-us/library/cc243919.aspx
+8 bytes in total:
+- First 4 bytes - Indicates the length of a serialized top-level type in the octet stream. It MUST include the padding length and exclude the header itself.
+- Second 4 bytes - Filler: MUST be set to 0 (zero) during marshaling, and SHOULD be ignored during unmarshaling.
+*/
+
+const (
+	protocolVersion   uint8  = 1
+	commonHeaderBytes uint16 = 8
+	bigEndian                = 0
+	littleEndian             = 1
+	ascii             uint8  = 0
+	ebcdic            uint8  = 1
+	ieee              uint8  = 0
+	vax               uint8  = 1
+	cray              uint8  = 2
+	ibm               uint8  = 3
+)
+
+// CommonHeader implements the NDR common header: https://msdn.microsoft.com/en-us/library/cc243889.aspx
+type CommonHeader struct {
+	Version             uint8
+	Endianness          binary.ByteOrder
+	CharacterEncoding   uint8
+	FloatRepresentation uint8
+	HeaderLength        uint16
+	Filler              []byte
+}
+
+// PrivateHeader implements the NDR private header: https://msdn.microsoft.com/en-us/library/cc243919.aspx
+type PrivateHeader struct {
+	ObjectBufferLength uint32
+	Filler             []byte
+}
+
+func (dec *Decoder) readCommonHeader() error {
+	// Version
+	vb, err := dec.r.ReadByte()
+	if err != nil {
+		return Malformed{EText: "could not read first byte of common header for version"}
+	}
+	dec.ch.Version = uint8(vb)
+	if dec.ch.Version != protocolVersion {
+		return Malformed{EText: fmt.Sprintf("byte stream does not indicate a RPC Type serialization of version %v", protocolVersion)}
+	}
+	// Read Endianness & Character Encoding
+	eb, err := dec.r.ReadByte()
+	if err != nil {
+		return Malformed{EText: "could not read second byte of common header for endianness"}
+	}
+	endian := int(eb >> 4 & 0xF)
+	if endian != 0 && endian != 1 {
+		return Malformed{EText: "common header does not indicate a valid endianness"}
+	}
+	dec.ch.CharacterEncoding = uint8(vb & 0xF)
+	if dec.ch.CharacterEncoding != 0 && dec.ch.CharacterEncoding != 1 {
+		return Malformed{EText: "common header does not indicate a valid character encoding"}
+	}
+	switch endian {
+	case littleEndian:
+		dec.ch.Endianness = binary.LittleEndian
+	case bigEndian:
+		dec.ch.Endianness = binary.BigEndian
+	}
+	// Common header length
+	lb, err := dec.readBytes(2)
+	if err != nil {
+		return Malformed{EText: fmt.Sprintf("could not read common header length: %v", err)}
+	}
+	dec.ch.HeaderLength = dec.ch.Endianness.Uint16(lb)
+	if dec.ch.HeaderLength != commonHeaderBytes {
+		return Malformed{EText: "common header does not indicate a valid length"}
+	}
+	// Filler bytes
+	dec.ch.Filler, err = dec.readBytes(4)
+	if err != nil {
+		return Malformed{EText: fmt.Sprintf("could not read common header filler: %v", err)}
+	}
+	return nil
+}
+
+func (dec *Decoder) readPrivateHeader() error {
+	// The next 8 bytes after the common header comprise the RPC type marshalling private header for constructed types.
+	err := binary.Read(dec.r, dec.ch.Endianness, &dec.ph.ObjectBufferLength)
+	if err != nil {
+		return Malformed{EText: "could not read private header object buffer length"}
+	}
+	if dec.ph.ObjectBufferLength%8 != 0 {
+		return Malformed{EText: "object buffer length not a multiple of 8"}
+	}
+	// Filler bytes
+	dec.ph.Filler, err = dec.readBytes(4)
+	if err != nil {
+		return Malformed{EText: fmt.Sprintf("could not read private header filler: %v", err)}
+	}
+	return nil
+}
diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/pipe.go b/vendor/github.com/jcmturner/rpc/v2/ndr/pipe.go
new file mode 100644
index 0000000..5fd27da
--- /dev/null
+++ b/vendor/github.com/jcmturner/rpc/v2/ndr/pipe.go
@@ -0,0 +1,31 @@
+package ndr
+
+import (
+	"fmt"
+	"reflect"
+)
+
+func (dec *Decoder) fillPipe(v reflect.Value, tag reflect.StructTag) error {
+	s, err := dec.readUint32() // read element count of first chunk
+	if err != nil {
+		return err
+	}
+	a := reflect.MakeSlice(v.Type(), 0, 0)
+	c := reflect.MakeSlice(v.Type(), int(s), int(s))
+	for s != 0 {
+		for i := 0; i < int(s); i++ {
+			err := dec.fill(c.Index(i), tag, &[]deferedPtr{})
+			if err != nil {
+				return fmt.Errorf("could not fill element %d of pipe: %v", i, err)
+			}
+		}
+		s, err = dec.readUint32() // read element count of first chunk
+		if err != nil {
+			return err
+		}
+		a = reflect.AppendSlice(a, c)
+		c = reflect.MakeSlice(v.Type(), int(s), int(s))
+	}
+	v.Set(a)
+	return nil
+}
diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/primitives.go b/vendor/github.com/jcmturner/rpc/v2/ndr/primitives.go
new file mode 100644
index 0000000..7eb1d1a
--- /dev/null
+++ b/vendor/github.com/jcmturner/rpc/v2/ndr/primitives.go
@@ -0,0 +1,211 @@
+package ndr
+
+import (
+	"bytes"
+	"encoding/binary"
+	"math"
+)
+
+// Byte sizes of primitive types
+const (
+	SizeBool   = 1
+	SizeChar   = 1
+	SizeUint8  = 1
+	SizeUint16 = 2
+	SizeUint32 = 4
+	SizeUint64 = 8
+	SizeEnum   = 2
+	SizeSingle = 4
+	SizeDouble = 8
+	SizePtr    = 4
+)
+
+// Bool is an NDR Boolean which is a logical quantity that assumes one of two values: TRUE or FALSE.
+// NDR represents a Boolean as one octet.
+// It represents a value of FALSE as a zero octet, an octet in which every bit is reset.
+// It represents a value of TRUE as a non-zero octet, an octet in which one or more bits are set.
+
+// Char is an NDR character.
+// NDR represents a character as one octet.
+// Characters have two representation formats: ASCII and EBCDIC.
+
+// USmall is an unsigned 8 bit integer
+
+// UShort is an unsigned 16 bit integer
+
+// ULong is an unsigned 32 bit integer
+
+// UHyper is an unsigned 64 bit integer
+
+// Small is an signed 8 bit integer
+
+// Short is an signed 16 bit integer
+
+// Long is an signed 32 bit integer
+
+// Hyper is an signed 64 bit integer
+
+// Enum is the NDR representation of enumerated types as signed short integers (2 octets)
+
+// Single is an NDR defined single-precision floating-point data type
+
+// Double is an NDR defined double-precision floating-point data type
+
+// readBool reads a byte representing a boolean.
+// NDR represents a Boolean as one octet.
+// It represents a value of FALSE as a zero octet, an octet in which every bit is reset.
+// It represents a value of TRUE as a non-zero octet, an octet in which one or more bits are set.
+func (dec *Decoder) readBool() (bool, error) {
+	i, err := dec.readUint8()
+	if err != nil {
+		return false, err
+	}
+	if i != 0 {
+		return true, nil
+	}
+	return false, nil
+}
+
+// readChar reads bytes representing a 8bit ASCII integer cast to a rune.
+func (dec *Decoder) readChar() (rune, error) {
+	var r rune
+	a, err := dec.readUint8()
+	if err != nil {
+		return r, err
+	}
+	return rune(a), nil
+}
+
+// readUint8 reads bytes representing a 8bit unsigned integer.
+func (dec *Decoder) readUint8() (uint8, error) {
+	b, err := dec.r.ReadByte()
+	if err != nil {
+		return uint8(0), err
+	}
+	return uint8(b), nil
+}
+
+// readUint16 reads bytes representing a 16bit unsigned integer.
+func (dec *Decoder) readUint16() (uint16, error) {
+	dec.ensureAlignment(SizeUint16)
+	b, err := dec.readBytes(SizeUint16)
+	if err != nil {
+		return uint16(0), err
+	}
+	return dec.ch.Endianness.Uint16(b), nil
+}
+
+// readUint32 reads bytes representing a 32bit unsigned integer.
+func (dec *Decoder) readUint32() (uint32, error) {
+	dec.ensureAlignment(SizeUint32)
+	b, err := dec.readBytes(SizeUint32)
+	if err != nil {
+		return uint32(0), err
+	}
+	return dec.ch.Endianness.Uint32(b), nil
+}
+
+// readUint32 reads bytes representing a 32bit unsigned integer.
+func (dec *Decoder) readUint64() (uint64, error) {
+	dec.ensureAlignment(SizeUint64)
+	b, err := dec.readBytes(SizeUint64)
+	if err != nil {
+		return uint64(0), err
+	}
+	return dec.ch.Endianness.Uint64(b), nil
+}
+
+func (dec *Decoder) readInt8() (int8, error) {
+	dec.ensureAlignment(SizeUint8)
+	b, err := dec.readBytes(SizeUint8)
+	if err != nil {
+		return 0, err
+	}
+	var i int8
+	buf := bytes.NewReader(b)
+	err = binary.Read(buf, dec.ch.Endianness, &i)
+	if err != nil {
+		return 0, err
+	}
+	return i, nil
+}
+
+func (dec *Decoder) readInt16() (int16, error) {
+	dec.ensureAlignment(SizeUint16)
+	b, err := dec.readBytes(SizeUint16)
+	if err != nil {
+		return 0, err
+	}
+	var i int16
+	buf := bytes.NewReader(b)
+	err = binary.Read(buf, dec.ch.Endianness, &i)
+	if err != nil {
+		return 0, err
+	}
+	return i, nil
+}
+
+func (dec *Decoder) readInt32() (int32, error) {
+	dec.ensureAlignment(SizeUint32)
+	b, err := dec.readBytes(SizeUint32)
+	if err != nil {
+		return 0, err
+	}
+	var i int32
+	buf := bytes.NewReader(b)
+	err = binary.Read(buf, dec.ch.Endianness, &i)
+	if err != nil {
+		return 0, err
+	}
+	return i, nil
+}
+
+func (dec *Decoder) readInt64() (int64, error) {
+	dec.ensureAlignment(SizeUint64)
+	b, err := dec.readBytes(SizeUint64)
+	if err != nil {
+		return 0, err
+	}
+	var i int64
+	buf := bytes.NewReader(b)
+	err = binary.Read(buf, dec.ch.Endianness, &i)
+	if err != nil {
+		return 0, err
+	}
+	return i, nil
+}
+
+// https://en.wikipedia.org/wiki/IEEE_754-1985
+func (dec *Decoder) readFloat32() (f float32, err error) {
+	dec.ensureAlignment(SizeSingle)
+	b, err := dec.readBytes(SizeSingle)
+	if err != nil {
+		return
+	}
+	bits := dec.ch.Endianness.Uint32(b)
+	f = math.Float32frombits(bits)
+	return
+}
+
+func (dec *Decoder) readFloat64() (f float64, err error) {
+	dec.ensureAlignment(SizeDouble)
+	b, err := dec.readBytes(SizeDouble)
+	if err != nil {
+		return
+	}
+	bits := dec.ch.Endianness.Uint64(b)
+	f = math.Float64frombits(bits)
+	return
+}
+
+// NDR enforces NDR alignment of primitive data; that is, any primitive of size n octets is aligned at a octet stream
+// index that is a multiple of n. (In this version of NDR, n is one of {1, 2, 4, 8}.) An octet stream index indicates
+// the number of an octet in an octet stream when octets are numbered, beginning with 0, from the first octet in the
+// stream. Where necessary, an alignment gap, consisting of octets of unspecified value, precedes the representation
+// of a primitive. The gap is of the smallest size sufficient to align the primitive.
+func (dec *Decoder) ensureAlignment(n int) {
+	p := dec.size - dec.r.Buffered()
+	if s := p % n; s != 0 {
+		dec.r.Discard(n - s)
+	}
+}
diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/rawbytes.go b/vendor/github.com/jcmturner/rpc/v2/ndr/rawbytes.go
new file mode 100644
index 0000000..9ee59fb
--- /dev/null
+++ b/vendor/github.com/jcmturner/rpc/v2/ndr/rawbytes.go
@@ -0,0 +1,61 @@
+package ndr
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+)
+
+// type MyBytes []byte
+// implement RawBytes interface
+
+const (
+	sizeMethod = "Size"
+)
+
+// RawBytes interface should be implemented if reading just a number of bytes from the NDR stream
+type RawBytes interface {
+	Size(interface{}) int
+}
+
+func rawBytesSize(parent reflect.Value, v reflect.Value) (int, error) {
+	sf := v.MethodByName(sizeMethod)
+	if !sf.IsValid() {
+		return 0, fmt.Errorf("could not find a method called %s on the implementation of RawBytes", sizeMethod)
+	}
+	in := []reflect.Value{parent}
+	f := sf.Call(in)
+	if f[0].Kind() != reflect.Int {
+		return 0, errors.New("the RawBytes size function did not return an integer")
+	}
+	return int(f[0].Int()), nil
+}
+
+func addSizeToTag(parent reflect.Value, v reflect.Value, tag reflect.StructTag) (reflect.StructTag, error) {
+	size, err := rawBytesSize(parent, v)
+	if err != nil {
+		return tag, err
+	}
+	ndrTag := parseTags(tag)
+	ndrTag.Map["size"] = strconv.Itoa(size)
+	return ndrTag.StructTag(), nil
+}
+
+func (dec *Decoder) readRawBytes(v reflect.Value, tag reflect.StructTag) error {
+	ndrTag := parseTags(tag)
+	sizeStr, ok := ndrTag.Map["size"]
+	if !ok {
+		return errors.New("size tag not available")
+	}
+	size, err := strconv.Atoi(sizeStr)
+	if err != nil {
+		return fmt.Errorf("size not valid: %v", err)
+	}
+	b, err := dec.readBytes(size)
+	if err != nil {
+		return err
+	}
+	v.Set(reflect.ValueOf(b).Convert(v.Type()))
+	return nil
+}
diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/strings.go b/vendor/github.com/jcmturner/rpc/v2/ndr/strings.go
new file mode 100644
index 0000000..b7a910b
--- /dev/null
+++ b/vendor/github.com/jcmturner/rpc/v2/ndr/strings.go
@@ -0,0 +1,70 @@
+package ndr
+
+import (
+	"fmt"
+	"reflect"
+)
+
+const (
+	subStringArrayTag   = `ndr:"varying,X-subStringArray"`
+	subStringArrayValue = "X-subStringArray"
+)
+
+func uint16SliceToString(a []uint16) string {
+	s := make([]rune, len(a), len(a))
+	for i := range s {
+		s[i] = rune(a[i])
+	}
+	if len(s) > 0 {
+		// Remove any null terminator
+		if s[len(s)-1] == rune(0) {
+			s = s[:len(s)-1]
+		}
+	}
+	return string(s)
+}
+
+func (dec *Decoder) readVaryingString(def *[]deferedPtr) (string, error) {
+	a := new([]uint16)
+	v := reflect.ValueOf(a)
+	var t reflect.StructTag
+	err := dec.fillUniDimensionalVaryingArray(v.Elem(), t, def)
+	if err != nil {
+		return "", err
+	}
+	s := uint16SliceToString(*a)
+	return s, nil
+}
+
+func (dec *Decoder) readConformantVaryingString(def *[]deferedPtr) (string, error) {
+	a := new([]uint16)
+	v := reflect.ValueOf(a)
+	var t reflect.StructTag
+	err := dec.fillUniDimensionalConformantVaryingArray(v.Elem(), t, def)
+	if err != nil {
+		return "", err
+	}
+	s := uint16SliceToString(*a)
+	return s, nil
+}
+
+func (dec *Decoder) readStringsArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error {
+	d, _ := sliceDimensions(v.Type())
+	ndrTag := parseTags(tag)
+	var m []int
+	//var ms int
+	if ndrTag.HasValue(TagConformant) {
+		for i := 0; i < d; i++ {
+			m = append(m, int(dec.precedingMax()))
+		}
+		//common max size
+		_ = dec.precedingMax()
+		//ms = int(n)
+	}
+	tag = reflect.StructTag(subStringArrayTag)
+	err := dec.fillVaryingArray(v, tag, def)
+	if err != nil {
+		return fmt.Errorf("could not read string array: %v", err)
+	}
+	return nil
+}
diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/tags.go b/vendor/github.com/jcmturner/rpc/v2/ndr/tags.go
new file mode 100644
index 0000000..01657e0
--- /dev/null
+++ b/vendor/github.com/jcmturner/rpc/v2/ndr/tags.go
@@ -0,0 +1,69 @@
+package ndr
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+)
+
+const ndrNameSpace = "ndr"
+
+type tags struct {
+	Values []string
+	Map    map[string]string
+}
+
+// parse the struct field tags and extract the ndr related ones.
+// format of tag ndr:"value,key:value1,value2"
+func parseTags(st reflect.StructTag) tags {
+	s := st.Get(ndrNameSpace)
+	t := tags{
+		Values: []string{},
+		Map:    make(map[string]string),
+	}
+	if s != "" {
+		ndrTags := strings.Trim(s, `"`)
+		for _, tag := range strings.Split(ndrTags, ",") {
+			if strings.Contains(tag, ":") {
+				m := strings.SplitN(tag, ":", 2)
+				t.Map[m[0]] = m[1]
+			} else {
+				t.Values = append(t.Values, tag)
+			}
+		}
+	}
+	return t
+}
+
+func appendTag(t reflect.StructTag, s string) reflect.StructTag {
+	ts := t.Get(ndrNameSpace)
+	ts = fmt.Sprintf(`%s"%s,%s"`, ndrNameSpace, ts, s)
+	return reflect.StructTag(ts)
+}
+
+func (t *tags) StructTag() reflect.StructTag {
+	mv := t.Values
+	for key, val := range t.Map {
+		mv = append(mv, key+":"+val)
+	}
+	s := ndrNameSpace + ":" + `"` + strings.Join(mv, ",") + `"`
+	return reflect.StructTag(s)
+}
+
+func (t *tags) delete(s string) {
+	for i, x := range t.Values {
+		if x == s {
+			t.Values = append(t.Values[:i], t.Values[i+1:]...)
+		}
+	}
+	delete(t.Map, s)
+}
+
+func (t *tags) HasValue(s string) bool {
+	for _, v := range t.Values {
+		if v == s {
+			return true
+		}
+	}
+	return false
+}
diff --git a/vendor/github.com/jcmturner/rpc/v2/ndr/union.go b/vendor/github.com/jcmturner/rpc/v2/ndr/union.go
new file mode 100644
index 0000000..6a657fa
--- /dev/null
+++ b/vendor/github.com/jcmturner/rpc/v2/ndr/union.go
@@ -0,0 +1,57 @@
+package ndr
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+)
+
+// Union interface must be implemented by structs that will be unmarshaled into from the NDR byte stream union representation.
+// The union's discriminating tag will be passed to the SwitchFunc method.
+// The discriminating tag field must have the struct tag: `ndr:"unionTag"`
+// If the union is encapsulated the discriminating tag field must have the struct tag: `ndr:"encapsulated"`
+// The possible value fields that can be selected from must have the struct tag: `ndr:"unionField"`
+type Union interface {
+	SwitchFunc(t interface{}) string
+}
+
+// Union related constants such as struct tag values
+const (
+	unionSelectionFuncName = "SwitchFunc"
+	TagEncapsulated        = "encapsulated"
+	TagUnionTag            = "unionTag"
+	TagUnionField          = "unionField"
+)
+
+func (dec *Decoder) isUnion(field reflect.Value, tag reflect.StructTag) (r reflect.Value) {
+	ndrTag := parseTags(tag)
+	if !ndrTag.HasValue(TagUnionTag) {
+		return
+	}
+	r = field
+	// For a non-encapsulated union, the discriminant is marshalled into the transmitted data stream twice: once as the
+	// field or parameter, which is referenced by the switch_is construct, in the procedure argument list; and once as
+	// the first part of the union representation.
+	if !ndrTag.HasValue(TagEncapsulated) {
+		dec.r.Discard(int(r.Type().Size()))
+	}
+	return
+}
+
+// unionSelectedField returns the field name of which of the union values to fill
+func unionSelectedField(union, discriminant reflect.Value) (string, error) {
+	if !union.Type().Implements(reflect.TypeOf(new(Union)).Elem()) {
+		return "", errors.New("struct does not implement union interface")
+	}
+	args := []reflect.Value{discriminant}
+	// Call the SelectFunc of the union struct to find the name of the field to fill with the value selected.
+	sf := union.MethodByName(unionSelectionFuncName)
+	if !sf.IsValid() {
+		return "", fmt.Errorf("could not find a selection function called %s in the unions struct representation", unionSelectionFuncName)
+	}
+	f := sf.Call(args)
+	if f[0].Kind() != reflect.String || f[0].String() == "" {
+		return "", fmt.Errorf("the union select function did not return a string for the name of the field to fill")
+	}
+	return f[0].String(), nil
+}
diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE
new file mode 100644
index 0000000..1eb75ef
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+Copyright (c) 2019 Klaus Post. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md
new file mode 100644
index 0000000..ea7324d
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/fse/README.md
@@ -0,0 +1,79 @@
+# Finite State Entropy

+

+This package provides Finite State Entropy encoding and decoding.

+            

+Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) 

+encoding provides a fast near-optimal symbol encoding/decoding

+for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd).

+

+This can be used for compressing input with a lot of similar input values to the smallest number of bytes.

+This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders,

+but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. 

+

+* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse)

+

+## News

+

+ * Feb 2018: First implementation released. Consider this beta software for now.

+

+# Usage

+

+This package provides a low level interface that allows to compress single independent blocks. 

+

+Each block is separate, and there is no built in integrity checks. 

+This means that the caller should keep track of block sizes and also do checksums if needed.  

+

+Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function.

+You must provide input and will receive the output and maybe an error.

+

+These error values can be returned:

+

+| Error               | Description                                                                 |

+|---------------------|-----------------------------------------------------------------------------|

+| `<nil>`             | Everything ok, output is returned                                           |

+| `ErrIncompressible` | Returned when input is judged to be too hard to compress                    |

+| `ErrUseRLE`         | Returned from the compressor when the input is a single byte value repeated |

+| `(error)`           | An internal error occurred.                                                 |

+

+As can be seen above there are errors that will be returned even under normal operation so it is important to handle these.

+

+To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object 

+that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same 

+object can be used for both.   

+

+Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this

+you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output.

+

+Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function.

+You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back

+your input was likely corrupted. 

+

+It is important to note that a successful decoding does *not* mean your output matches your original input. 

+There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid.

+

+For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples).

+

+# Performance

+

+A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors.  

+All compression functions are currently only running on the calling goroutine so only one core will be used per block.  

+

+The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input

+is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be 

+beneficial to transpose all your input values down by 64.   

+

+With moderate block sizes around 64k speed are typically 200MB/s per core for compression and 

+around 300MB/s decompression speed. 

+

+The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. 

+

+# Plans

+

+At one point, more internals will be exposed to facilitate more "expert" usage of the components. 

+

+A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261).  

+

+# Contributing

+

+Contributions are always welcome. Be aware that adding public functions will require good justification and breaking 

+changes will likely not be accepted. If in doubt open an issue before writing the PR.  
\ No newline at end of file
diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go
new file mode 100644
index 0000000..f65eb39
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/fse/bitreader.go
@@ -0,0 +1,122 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package fse
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+)
+
+// bitReader reads a bitstream in reverse.
+// The last set bit indicates the start of the stream and is used
+// for aligning the input.
+type bitReader struct {
+	in       []byte
+	off      uint // next byte to read is at in[off - 1]
+	value    uint64
+	bitsRead uint8
+}
+
+// init initializes and resets the bit reader.
+func (b *bitReader) init(in []byte) error {
+	if len(in) < 1 {
+		return errors.New("corrupt stream: too short")
+	}
+	b.in = in
+	b.off = uint(len(in))
+	// The highest bit of the last byte indicates where to start
+	v := in[len(in)-1]
+	if v == 0 {
+		return errors.New("corrupt stream, did not find end of stream")
+	}
+	b.bitsRead = 64
+	b.value = 0
+	if len(in) >= 8 {
+		b.fillFastStart()
+	} else {
+		b.fill()
+		b.fill()
+	}
+	b.bitsRead += 8 - uint8(highBits(uint32(v)))
+	return nil
+}
+
+// getBits will return n bits. n can be 0.
+func (b *bitReader) getBits(n uint8) uint16 {
+	if n == 0 || b.bitsRead >= 64 {
+		return 0
+	}
+	return b.getBitsFast(n)
+}
+
+// getBitsFast requires that at least one bit is requested every time.
+// There are no checks if the buffer is filled.
+func (b *bitReader) getBitsFast(n uint8) uint16 {
+	const regMask = 64 - 1
+	v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
+	b.bitsRead += n
+	return v
+}
+
+// fillFast() will make sure at least 32 bits are available.
+// There must be at least 4 bytes available.
+func (b *bitReader) fillFast() {
+	if b.bitsRead < 32 {
+		return
+	}
+	// 2 bounds checks.
+	v := b.in[b.off-4:]
+	v = v[:4]
+	low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+	b.value = (b.value << 32) | uint64(low)
+	b.bitsRead -= 32
+	b.off -= 4
+}
+
+// fill() will make sure at least 32 bits are available.
+func (b *bitReader) fill() {
+	if b.bitsRead < 32 {
+		return
+	}
+	if b.off > 4 {
+		v := b.in[b.off-4:]
+		v = v[:4]
+		low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+		b.value = (b.value << 32) | uint64(low)
+		b.bitsRead -= 32
+		b.off -= 4
+		return
+	}
+	for b.off > 0 {
+		b.value = (b.value << 8) | uint64(b.in[b.off-1])
+		b.bitsRead -= 8
+		b.off--
+	}
+}
+
+// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
+func (b *bitReader) fillFastStart() {
+	// Do single re-slice to avoid bounds checks.
+	b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+	b.bitsRead = 0
+	b.off -= 8
+}
+
+// finished returns true if all bits have been read from the bit stream.
+func (b *bitReader) finished() bool {
+	return b.bitsRead >= 64 && b.off == 0
+}
+
+// close the bitstream and returns an error if out-of-buffer reads occurred.
+func (b *bitReader) close() error {
+	// Release reference.
+	b.in = nil
+	if b.bitsRead > 64 {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go
new file mode 100644
index 0000000..43e4636
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go
@@ -0,0 +1,168 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package fse
+
+import "fmt"
+
+// bitWriter will write bits.
+// First bit will be LSB of the first byte of output.
+type bitWriter struct {
+	bitContainer uint64
+	nBits        uint8
+	out          []byte
+}
+
+// bitMask16 is bitmasks. Has extra to avoid bounds check.
+var bitMask16 = [32]uint16{
+	0, 1, 3, 7, 0xF, 0x1F,
+	0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
+	0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF,
+	0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
+	0xFFFF, 0xFFFF} /* up to 16 bits */
+
+// addBits16NC will add up to 16 bits.
+// It will not check if there is space for them,
+// so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
+	b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63)
+	b.nBits += bits
+}
+
+// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
+// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
+	b.bitContainer |= uint64(value) << (b.nBits & 63)
+	b.nBits += bits
+}
+
+// addBits16ZeroNC will add up to 16 bits.
+// It will not check if there is space for them,
+// so the caller must ensure that it has flushed recently.
+// This is fastest if bits can be zero.
+func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) {
+	if bits == 0 {
+		return
+	}
+	value <<= (16 - bits) & 15
+	value >>= (16 - bits) & 15
+	b.bitContainer |= uint64(value) << (b.nBits & 63)
+	b.nBits += bits
+}
+
+// flush will flush all pending full bytes.
+// There will be at least 56 bits available for writing when this has been called.
+// Using flush32 is faster, but leaves less space for writing.
+func (b *bitWriter) flush() {
+	v := b.nBits >> 3
+	switch v {
+	case 0:
+	case 1:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+		)
+	case 2:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+		)
+	case 3:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+		)
+	case 4:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+		)
+	case 5:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+		)
+	case 6:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+			byte(b.bitContainer>>40),
+		)
+	case 7:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+			byte(b.bitContainer>>40),
+			byte(b.bitContainer>>48),
+		)
+	case 8:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+			byte(b.bitContainer>>40),
+			byte(b.bitContainer>>48),
+			byte(b.bitContainer>>56),
+		)
+	default:
+		panic(fmt.Errorf("bits (%d) > 64", b.nBits))
+	}
+	b.bitContainer >>= v << 3
+	b.nBits &= 7
+}
+
+// flush32 will flush out, so there are at least 32 bits available for writing.
+func (b *bitWriter) flush32() {
+	if b.nBits < 32 {
+		return
+	}
+	b.out = append(b.out,
+		byte(b.bitContainer),
+		byte(b.bitContainer>>8),
+		byte(b.bitContainer>>16),
+		byte(b.bitContainer>>24))
+	b.nBits -= 32
+	b.bitContainer >>= 32
+}
+
+// flushAlign will flush remaining full bytes and align to next byte boundary.
+func (b *bitWriter) flushAlign() {
+	nbBytes := (b.nBits + 7) >> 3
+	for i := uint8(0); i < nbBytes; i++ {
+		b.out = append(b.out, byte(b.bitContainer>>(i*8)))
+	}
+	b.nBits = 0
+	b.bitContainer = 0
+}
+
+// close will write the alignment bit and write the final byte(s)
+// to the output.
+func (b *bitWriter) close() error {
+	// End mark
+	b.addBits16Clean(1, 1)
+	// flush until next byte.
+	b.flushAlign()
+	return nil
+}
+
+// reset and continue writing by appending to out.
+func (b *bitWriter) reset(out []byte) {
+	b.bitContainer = 0
+	b.nBits = 0
+	b.out = out
+}
diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go
new file mode 100644
index 0000000..abade2d
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/fse/bytereader.go
@@ -0,0 +1,47 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package fse
+
+// byteReader provides a byte reader that reads
+// little endian values from a byte stream.
+// The input stream is manually advanced.
+// The reader performs no bounds checks.
+type byteReader struct {
+	b   []byte
+	off int
+}
+
+// init will initialize the reader and set the input.
+func (b *byteReader) init(in []byte) {
+	b.b = in
+	b.off = 0
+}
+
+// advance the stream b n bytes.
+func (b *byteReader) advance(n uint) {
+	b.off += int(n)
+}
+
+// Uint32 returns a little endian uint32 starting at current offset.
+func (b byteReader) Uint32() uint32 {
+	b2 := b.b[b.off:]
+	b2 = b2[:4]
+	v3 := uint32(b2[3])
+	v2 := uint32(b2[2])
+	v1 := uint32(b2[1])
+	v0 := uint32(b2[0])
+	return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24)
+}
+
+// unread returns the unread portion of the input.
+func (b byteReader) unread() []byte {
+	return b.b[b.off:]
+}
+
+// remain will return the number of bytes remaining.
+func (b byteReader) remain() int {
+	return len(b.b) - b.off
+}
diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go
new file mode 100644
index 0000000..6f34191
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/fse/compress.go
@@ -0,0 +1,683 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package fse
+
+import (
+	"errors"
+	"fmt"
+)
+
+// Compress the input bytes. Input must be < 2GB.
+// Provide a Scratch buffer to avoid memory allocations.
+// Note that the output is also kept in the scratch buffer.
+// If input is too hard to compress, ErrIncompressible is returned.
+// If input is a single byte value repeated ErrUseRLE is returned.
+func Compress(in []byte, s *Scratch) ([]byte, error) {
+	if len(in) <= 1 {
+		return nil, ErrIncompressible
+	}
+	if len(in) > (2<<30)-1 {
+		return nil, errors.New("input too big, must be < 2GB")
+	}
+	s, err := s.prepare(in)
+	if err != nil {
+		return nil, err
+	}
+
+	// Create histogram, if none was provided.
+	maxCount := s.maxCount
+	if maxCount == 0 {
+		maxCount = s.countSimple(in)
+	}
+	// Reset for next run.
+	s.clearCount = true
+	s.maxCount = 0
+	if maxCount == len(in) {
+		// One symbol, use RLE
+		return nil, ErrUseRLE
+	}
+	if maxCount == 1 || maxCount < (len(in)>>7) {
+		// Each symbol present maximum once or too well distributed.
+		return nil, ErrIncompressible
+	}
+	s.optimalTableLog()
+	err = s.normalizeCount()
+	if err != nil {
+		return nil, err
+	}
+	err = s.writeCount()
+	if err != nil {
+		return nil, err
+	}
+
+	if false {
+		err = s.validateNorm()
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	err = s.buildCTable()
+	if err != nil {
+		return nil, err
+	}
+	err = s.compress(in)
+	if err != nil {
+		return nil, err
+	}
+	s.Out = s.bw.out
+	// Check if we compressed.
+	if len(s.Out) >= len(in) {
+		return nil, ErrIncompressible
+	}
+	return s.Out, nil
+}
+
+// cState contains the compression state of a stream.
+type cState struct {
+	bw         *bitWriter
+	stateTable []uint16
+	state      uint16
+}
+
+// init will initialize the compression state to the first symbol of the stream.
+func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) {
+	c.bw = bw
+	c.stateTable = ct.stateTable
+
+	nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16
+	im := int32((nbBitsOut << 16) - first.deltaNbBits)
+	lu := (im >> nbBitsOut) + first.deltaFindState
+	c.state = c.stateTable[lu]
+}
+
+// encode the output symbol provided and write it to the bitstream.
+func (c *cState) encode(symbolTT symbolTransform) {
+	nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
+	dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState
+	c.bw.addBits16NC(c.state, uint8(nbBitsOut))
+	c.state = c.stateTable[dstState]
+}
+
+// encode the output symbol provided and write it to the bitstream.
+func (c *cState) encodeZero(symbolTT symbolTransform) {
+	nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
+	dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState
+	c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut))
+	c.state = c.stateTable[dstState]
+}
+
+// flush will write the tablelog to the output and flush the remaining full bytes.
+func (c *cState) flush(tableLog uint8) {
+	c.bw.flush32()
+	c.bw.addBits16NC(c.state, tableLog)
+	c.bw.flush()
+}
+
+// compress is the main compression loop that will encode the input from the last byte to the first.
+func (s *Scratch) compress(src []byte) error {
+	if len(src) <= 2 {
+		return errors.New("compress: src too small")
+	}
+	tt := s.ct.symbolTT[:256]
+	s.bw.reset(s.Out)
+
+	// Our two states each encodes every second byte.
+	// Last byte encoded (first byte decoded) will always be encoded by c1.
+	var c1, c2 cState
+
+	// Encode so remaining size is divisible by 4.
+	ip := len(src)
+	if ip&1 == 1 {
+		c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]])
+		c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]])
+		c1.encodeZero(tt[src[ip-3]])
+		ip -= 3
+	} else {
+		c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]])
+		c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]])
+		ip -= 2
+	}
+	if ip&2 != 0 {
+		c2.encodeZero(tt[src[ip-1]])
+		c1.encodeZero(tt[src[ip-2]])
+		ip -= 2
+	}
+
+	// Main compression loop.
+	switch {
+	case !s.zeroBits && s.actualTableLog <= 8:
+		// We can encode 4 symbols without requiring a flush.
+		// We do not need to check if any output is 0 bits.
+		for ip >= 4 {
+			s.bw.flush32()
+			v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
+			c2.encode(tt[v0])
+			c1.encode(tt[v1])
+			c2.encode(tt[v2])
+			c1.encode(tt[v3])
+			ip -= 4
+		}
+	case !s.zeroBits:
+		// We do not need to check if any output is 0 bits.
+		for ip >= 4 {
+			s.bw.flush32()
+			v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
+			c2.encode(tt[v0])
+			c1.encode(tt[v1])
+			s.bw.flush32()
+			c2.encode(tt[v2])
+			c1.encode(tt[v3])
+			ip -= 4
+		}
+	case s.actualTableLog <= 8:
+		// We can encode 4 symbols without requiring a flush
+		for ip >= 4 {
+			s.bw.flush32()
+			v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
+			c2.encodeZero(tt[v0])
+			c1.encodeZero(tt[v1])
+			c2.encodeZero(tt[v2])
+			c1.encodeZero(tt[v3])
+			ip -= 4
+		}
+	default:
+		for ip >= 4 {
+			s.bw.flush32()
+			v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
+			c2.encodeZero(tt[v0])
+			c1.encodeZero(tt[v1])
+			s.bw.flush32()
+			c2.encodeZero(tt[v2])
+			c1.encodeZero(tt[v3])
+			ip -= 4
+		}
+	}
+
+	// Flush final state.
+	// Used to initialize state when decoding.
+	c2.flush(s.actualTableLog)
+	c1.flush(s.actualTableLog)
+
+	return s.bw.close()
+}
+
+// writeCount will write the normalized histogram count to header.
+// This is read back by readNCount.
+func (s *Scratch) writeCount() error {
+	var (
+		tableLog  = s.actualTableLog
+		tableSize = 1 << tableLog
+		previous0 bool
+		charnum   uint16
+
+		maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3
+
+		// Write Table Size
+		bitStream = uint32(tableLog - minTablelog)
+		bitCount  = uint(4)
+		remaining = int16(tableSize + 1) /* +1 for extra accuracy */
+		threshold = int16(tableSize)
+		nbBits    = uint(tableLog + 1)
+	)
+	if cap(s.Out) < maxHeaderSize {
+		s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize)
+	}
+	outP := uint(0)
+	out := s.Out[:maxHeaderSize]
+
+	// stops at 1
+	for remaining > 1 {
+		if previous0 {
+			start := charnum
+			for s.norm[charnum] == 0 {
+				charnum++
+			}
+			for charnum >= start+24 {
+				start += 24
+				bitStream += uint32(0xFFFF) << bitCount
+				out[outP] = byte(bitStream)
+				out[outP+1] = byte(bitStream >> 8)
+				outP += 2
+				bitStream >>= 16
+			}
+			for charnum >= start+3 {
+				start += 3
+				bitStream += 3 << bitCount
+				bitCount += 2
+			}
+			bitStream += uint32(charnum-start) << bitCount
+			bitCount += 2
+			if bitCount > 16 {
+				out[outP] = byte(bitStream)
+				out[outP+1] = byte(bitStream >> 8)
+				outP += 2
+				bitStream >>= 16
+				bitCount -= 16
+			}
+		}
+
+		count := s.norm[charnum]
+		charnum++
+		max := (2*threshold - 1) - remaining
+		if count < 0 {
+			remaining += count
+		} else {
+			remaining -= count
+		}
+		count++ // +1 for extra accuracy
+		if count >= threshold {
+			count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[
+		}
+		bitStream += uint32(count) << bitCount
+		bitCount += nbBits
+		if count < max {
+			bitCount--
+		}
+
+		previous0 = count == 1
+		if remaining < 1 {
+			return errors.New("internal error: remaining<1")
+		}
+		for remaining < threshold {
+			nbBits--
+			threshold >>= 1
+		}
+
+		if bitCount > 16 {
+			out[outP] = byte(bitStream)
+			out[outP+1] = byte(bitStream >> 8)
+			outP += 2
+			bitStream >>= 16
+			bitCount -= 16
+		}
+	}
+
+	out[outP] = byte(bitStream)
+	out[outP+1] = byte(bitStream >> 8)
+	outP += (bitCount + 7) / 8
+
+	if charnum > s.symbolLen {
+		return errors.New("internal error: charnum > s.symbolLen")
+	}
+	s.Out = out[:outP]
+	return nil
+}
+
+// symbolTransform contains the state transform for a symbol.
+type symbolTransform struct {
+	deltaFindState int32
+	deltaNbBits    uint32
+}
+
+// String prints values as a human readable string.
+func (s symbolTransform) String() string {
+	return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState)
+}
+
+// cTable contains tables used for compression.
+type cTable struct {
+	tableSymbol []byte
+	stateTable  []uint16
+	symbolTT    []symbolTransform
+}
+
+// allocCtable will allocate tables needed for compression.
+// If existing tables a re big enough, they are simply re-used.
+func (s *Scratch) allocCtable() {
+	tableSize := 1 << s.actualTableLog
+	// get tableSymbol that is big enough.
+	if cap(s.ct.tableSymbol) < tableSize {
+		s.ct.tableSymbol = make([]byte, tableSize)
+	}
+	s.ct.tableSymbol = s.ct.tableSymbol[:tableSize]
+
+	ctSize := tableSize
+	if cap(s.ct.stateTable) < ctSize {
+		s.ct.stateTable = make([]uint16, ctSize)
+	}
+	s.ct.stateTable = s.ct.stateTable[:ctSize]
+
+	if cap(s.ct.symbolTT) < 256 {
+		s.ct.symbolTT = make([]symbolTransform, 256)
+	}
+	s.ct.symbolTT = s.ct.symbolTT[:256]
+}
+
+// buildCTable will populate the compression table so it is ready to be used.
+func (s *Scratch) buildCTable() error {
+	tableSize := uint32(1 << s.actualTableLog)
+	highThreshold := tableSize - 1
+	var cumul [maxSymbolValue + 2]int16
+
+	s.allocCtable()
+	tableSymbol := s.ct.tableSymbol[:tableSize]
+	// symbol start positions
+	{
+		cumul[0] = 0
+		for ui, v := range s.norm[:s.symbolLen-1] {
+			u := byte(ui) // one less than reference
+			if v == -1 {
+				// Low proba symbol
+				cumul[u+1] = cumul[u] + 1
+				tableSymbol[highThreshold] = u
+				highThreshold--
+			} else {
+				cumul[u+1] = cumul[u] + v
+			}
+		}
+		// Encode last symbol separately to avoid overflowing u
+		u := int(s.symbolLen - 1)
+		v := s.norm[s.symbolLen-1]
+		if v == -1 {
+			// Low proba symbol
+			cumul[u+1] = cumul[u] + 1
+			tableSymbol[highThreshold] = byte(u)
+			highThreshold--
+		} else {
+			cumul[u+1] = cumul[u] + v
+		}
+		if uint32(cumul[s.symbolLen]) != tableSize {
+			return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize)
+		}
+		cumul[s.symbolLen] = int16(tableSize) + 1
+	}
+	// Spread symbols
+	s.zeroBits = false
+	{
+		step := tableStep(tableSize)
+		tableMask := tableSize - 1
+		var position uint32
+		// if any symbol > largeLimit, we may have 0 bits output.
+		largeLimit := int16(1 << (s.actualTableLog - 1))
+		for ui, v := range s.norm[:s.symbolLen] {
+			symbol := byte(ui)
+			if v > largeLimit {
+				s.zeroBits = true
+			}
+			for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ {
+				tableSymbol[position] = symbol
+				position = (position + step) & tableMask
+				for position > highThreshold {
+					position = (position + step) & tableMask
+				} /* Low proba area */
+			}
+		}
+
+		// Check if we have gone through all positions
+		if position != 0 {
+			return errors.New("position!=0")
+		}
+	}
+
+	// Build table
+	table := s.ct.stateTable
+	{
+		tsi := int(tableSize)
+		for u, v := range tableSymbol {
+			// TableU16 : sorted by symbol order; gives next state value
+			table[cumul[v]] = uint16(tsi + u)
+			cumul[v]++
+		}
+	}
+
+	// Build Symbol Transformation Table
+	{
+		total := int16(0)
+		symbolTT := s.ct.symbolTT[:s.symbolLen]
+		tableLog := s.actualTableLog
+		tl := (uint32(tableLog) << 16) - (1 << tableLog)
+		for i, v := range s.norm[:s.symbolLen] {
+			switch v {
+			case 0:
+			case -1, 1:
+				symbolTT[i].deltaNbBits = tl
+				symbolTT[i].deltaFindState = int32(total - 1)
+				total++
+			default:
+				maxBitsOut := uint32(tableLog) - highBits(uint32(v-1))
+				minStatePlus := uint32(v) << maxBitsOut
+				symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus
+				symbolTT[i].deltaFindState = int32(total - v)
+				total += v
+			}
+		}
+		if total != int16(tableSize) {
+			return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize)
+		}
+	}
+	return nil
+}
+
+// countSimple will create a simple histogram in s.count.
+// Returns the biggest count.
+// Does not update s.clearCount.
+func (s *Scratch) countSimple(in []byte) (max int) {
+	for _, v := range in {
+		s.count[v]++
+	}
+	m := uint32(0)
+	for i, v := range s.count[:] {
+		if v > m {
+			m = v
+		}
+		if v > 0 {
+			s.symbolLen = uint16(i) + 1
+		}
+	}
+	return int(m)
+}
+
+// minTableLog provides the minimum logSize to safely represent a distribution.
+func (s *Scratch) minTableLog() uint8 {
+	minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1
+	minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2
+	if minBitsSrc < minBitsSymbols {
+		return uint8(minBitsSrc)
+	}
+	return uint8(minBitsSymbols)
+}
+
+// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog
+func (s *Scratch) optimalTableLog() {
+	tableLog := s.TableLog
+	minBits := s.minTableLog()
+	maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2
+	if maxBitsSrc < tableLog {
+		// Accuracy can be reduced
+		tableLog = maxBitsSrc
+	}
+	if minBits > tableLog {
+		tableLog = minBits
+	}
+	// Need a minimum to safely represent all symbol values
+	if tableLog < minTablelog {
+		tableLog = minTablelog
+	}
+	if tableLog > maxTableLog {
+		tableLog = maxTableLog
+	}
+	s.actualTableLog = tableLog
+}
+
+var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000}
+
+// normalizeCount will normalize the count of the symbols so
+// the total is equal to the table size.
+func (s *Scratch) normalizeCount() error {
+	var (
+		tableLog          = s.actualTableLog
+		scale             = 62 - uint64(tableLog)
+		step              = (1 << 62) / uint64(s.br.remain())
+		vStep             = uint64(1) << (scale - 20)
+		stillToDistribute = int16(1 << tableLog)
+		largest           int
+		largestP          int16
+		lowThreshold      = (uint32)(s.br.remain() >> tableLog)
+	)
+
+	for i, cnt := range s.count[:s.symbolLen] {
+		// already handled
+		// if (count[s] == s.length) return 0;   /* rle special case */
+
+		if cnt == 0 {
+			s.norm[i] = 0
+			continue
+		}
+		if cnt <= lowThreshold {
+			s.norm[i] = -1
+			stillToDistribute--
+		} else {
+			proba := (int16)((uint64(cnt) * step) >> scale)
+			if proba < 8 {
+				restToBeat := vStep * uint64(rtbTable[proba])
+				v := uint64(cnt)*step - (uint64(proba) << scale)
+				if v > restToBeat {
+					proba++
+				}
+			}
+			if proba > largestP {
+				largestP = proba
+				largest = i
+			}
+			s.norm[i] = proba
+			stillToDistribute -= proba
+		}
+	}
+
+	if -stillToDistribute >= (s.norm[largest] >> 1) {
+		// corner case, need another normalization method
+		return s.normalizeCount2()
+	}
+	s.norm[largest] += stillToDistribute
+	return nil
+}
+
+// Secondary normalization method.
+// To be used when primary method fails.
+func (s *Scratch) normalizeCount2() error {
+	const notYetAssigned = -2
+	var (
+		distributed  uint32
+		total        = uint32(s.br.remain())
+		tableLog     = s.actualTableLog
+		lowThreshold = total >> tableLog
+		lowOne       = (total * 3) >> (tableLog + 1)
+	)
+	for i, cnt := range s.count[:s.symbolLen] {
+		if cnt == 0 {
+			s.norm[i] = 0
+			continue
+		}
+		if cnt <= lowThreshold {
+			s.norm[i] = -1
+			distributed++
+			total -= cnt
+			continue
+		}
+		if cnt <= lowOne {
+			s.norm[i] = 1
+			distributed++
+			total -= cnt
+			continue
+		}
+		s.norm[i] = notYetAssigned
+	}
+	toDistribute := (1 << tableLog) - distributed
+
+	if (total / toDistribute) > lowOne {
+		// risk of rounding to zero
+		lowOne = (total * 3) / (toDistribute * 2)
+		for i, cnt := range s.count[:s.symbolLen] {
+			if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) {
+				s.norm[i] = 1
+				distributed++
+				total -= cnt
+				continue
+			}
+		}
+		toDistribute = (1 << tableLog) - distributed
+	}
+	if distributed == uint32(s.symbolLen)+1 {
+		// all values are pretty poor;
+		//   probably incompressible data (should have already been detected);
+		//   find max, then give all remaining points to max
+		var maxV int
+		var maxC uint32
+		for i, cnt := range s.count[:s.symbolLen] {
+			if cnt > maxC {
+				maxV = i
+				maxC = cnt
+			}
+		}
+		s.norm[maxV] += int16(toDistribute)
+		return nil
+	}
+
+	if total == 0 {
+		// all of the symbols were low enough for the lowOne or lowThreshold
+		for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) {
+			if s.norm[i] > 0 {
+				toDistribute--
+				s.norm[i]++
+			}
+		}
+		return nil
+	}
+
+	var (
+		vStepLog = 62 - uint64(tableLog)
+		mid      = uint64((1 << (vStepLog - 1)) - 1)
+		rStep    = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining
+		tmpTotal = mid
+	)
+	for i, cnt := range s.count[:s.symbolLen] {
+		if s.norm[i] == notYetAssigned {
+			var (
+				end    = tmpTotal + uint64(cnt)*rStep
+				sStart = uint32(tmpTotal >> vStepLog)
+				sEnd   = uint32(end >> vStepLog)
+				weight = sEnd - sStart
+			)
+			if weight < 1 {
+				return errors.New("weight < 1")
+			}
+			s.norm[i] = int16(weight)
+			tmpTotal = end
+		}
+	}
+	return nil
+}
+
+// validateNorm validates the normalized histogram table.
+func (s *Scratch) validateNorm() (err error) {
+	var total int
+	for _, v := range s.norm[:s.symbolLen] {
+		if v >= 0 {
+			total += int(v)
+		} else {
+			total -= int(v)
+		}
+	}
+	defer func() {
+		if err == nil {
+			return
+		}
+		fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen)
+		for i, v := range s.norm[:s.symbolLen] {
+			fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v)
+		}
+	}()
+	if total != (1 << s.actualTableLog) {
+		return fmt.Errorf("warning: Total == %d != %d", total, 1<<s.actualTableLog)
+	}
+	for i, v := range s.count[s.symbolLen:] {
+		if v != 0 {
+			return fmt.Errorf("warning: Found symbol out of range, %d after cut", i)
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go
new file mode 100644
index 0000000..926f5f1
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/fse/decompress.go
@@ -0,0 +1,374 @@
+package fse
+
+import (
+	"errors"
+	"fmt"
+)
+
+const (
+	tablelogAbsoluteMax = 15
+)
+
+// Decompress a block of data.
+// You can provide a scratch buffer to avoid allocations.
+// If nil is provided a temporary one will be allocated.
+// It is possible, but by no way guaranteed that corrupt data will
+// return an error.
+// It is up to the caller to verify integrity of the returned data.
+// Use a predefined Scrach to set maximum acceptable output size.
+func Decompress(b []byte, s *Scratch) ([]byte, error) {
+	s, err := s.prepare(b)
+	if err != nil {
+		return nil, err
+	}
+	s.Out = s.Out[:0]
+	err = s.readNCount()
+	if err != nil {
+		return nil, err
+	}
+	err = s.buildDtable()
+	if err != nil {
+		return nil, err
+	}
+	err = s.decompress()
+	if err != nil {
+		return nil, err
+	}
+
+	return s.Out, nil
+}
+
+// readNCount will read the symbol distribution so decoding tables can be constructed.
+func (s *Scratch) readNCount() error {
+	var (
+		charnum   uint16
+		previous0 bool
+		b         = &s.br
+	)
+	iend := b.remain()
+	if iend < 4 {
+		return errors.New("input too small")
+	}
+	bitStream := b.Uint32()
+	nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog
+	if nbBits > tablelogAbsoluteMax {
+		return errors.New("tableLog too large")
+	}
+	bitStream >>= 4
+	bitCount := uint(4)
+
+	s.actualTableLog = uint8(nbBits)
+	remaining := int32((1 << nbBits) + 1)
+	threshold := int32(1 << nbBits)
+	gotTotal := int32(0)
+	nbBits++
+
+	for remaining > 1 {
+		if previous0 {
+			n0 := charnum
+			for (bitStream & 0xFFFF) == 0xFFFF {
+				n0 += 24
+				if b.off < iend-5 {
+					b.advance(2)
+					bitStream = b.Uint32() >> bitCount
+				} else {
+					bitStream >>= 16
+					bitCount += 16
+				}
+			}
+			for (bitStream & 3) == 3 {
+				n0 += 3
+				bitStream >>= 2
+				bitCount += 2
+			}
+			n0 += uint16(bitStream & 3)
+			bitCount += 2
+			if n0 > maxSymbolValue {
+				return errors.New("maxSymbolValue too small")
+			}
+			for charnum < n0 {
+				s.norm[charnum&0xff] = 0
+				charnum++
+			}
+
+			if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 {
+				b.advance(bitCount >> 3)
+				bitCount &= 7
+				bitStream = b.Uint32() >> bitCount
+			} else {
+				bitStream >>= 2
+			}
+		}
+
+		max := (2*(threshold) - 1) - (remaining)
+		var count int32
+
+		if (int32(bitStream) & (threshold - 1)) < max {
+			count = int32(bitStream) & (threshold - 1)
+			bitCount += nbBits - 1
+		} else {
+			count = int32(bitStream) & (2*threshold - 1)
+			if count >= threshold {
+				count -= max
+			}
+			bitCount += nbBits
+		}
+
+		count-- // extra accuracy
+		if count < 0 {
+			// -1 means +1
+			remaining += count
+			gotTotal -= count
+		} else {
+			remaining -= count
+			gotTotal += count
+		}
+		s.norm[charnum&0xff] = int16(count)
+		charnum++
+		previous0 = count == 0
+		for remaining < threshold {
+			nbBits--
+			threshold >>= 1
+		}
+		if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 {
+			b.advance(bitCount >> 3)
+			bitCount &= 7
+		} else {
+			bitCount -= (uint)(8 * (len(b.b) - 4 - b.off))
+			b.off = len(b.b) - 4
+		}
+		bitStream = b.Uint32() >> (bitCount & 31)
+	}
+	s.symbolLen = charnum
+
+	if s.symbolLen <= 1 {
+		return fmt.Errorf("symbolLen (%d) too small", s.symbolLen)
+	}
+	if s.symbolLen > maxSymbolValue+1 {
+		return fmt.Errorf("symbolLen (%d) too big", s.symbolLen)
+	}
+	if remaining != 1 {
+		return fmt.Errorf("corruption detected (remaining %d != 1)", remaining)
+	}
+	if bitCount > 32 {
+		return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount)
+	}
+	if gotTotal != 1<<s.actualTableLog {
+		return fmt.Errorf("corruption detected (total %d != %d)", gotTotal, 1<<s.actualTableLog)
+	}
+	b.advance((bitCount + 7) >> 3)
+	return nil
+}
+
+// decSymbol contains information about a state entry,
+// Including the state offset base, the output symbol and
+// the number of bits to read for the low part of the destination state.
+type decSymbol struct {
+	newState uint16
+	symbol   uint8
+	nbBits   uint8
+}
+
+// allocDtable will allocate decoding tables if they are not big enough.
+func (s *Scratch) allocDtable() {
+	tableSize := 1 << s.actualTableLog
+	if cap(s.decTable) < tableSize {
+		s.decTable = make([]decSymbol, tableSize)
+	}
+	s.decTable = s.decTable[:tableSize]
+
+	if cap(s.ct.tableSymbol) < 256 {
+		s.ct.tableSymbol = make([]byte, 256)
+	}
+	s.ct.tableSymbol = s.ct.tableSymbol[:256]
+
+	if cap(s.ct.stateTable) < 256 {
+		s.ct.stateTable = make([]uint16, 256)
+	}
+	s.ct.stateTable = s.ct.stateTable[:256]
+}
+
+// buildDtable will build the decoding table.
+func (s *Scratch) buildDtable() error {
+	tableSize := uint32(1 << s.actualTableLog)
+	highThreshold := tableSize - 1
+	s.allocDtable()
+	symbolNext := s.ct.stateTable[:256]
+
+	// Init, lay down lowprob symbols
+	s.zeroBits = false
+	{
+		largeLimit := int16(1 << (s.actualTableLog - 1))
+		for i, v := range s.norm[:s.symbolLen] {
+			if v == -1 {
+				s.decTable[highThreshold].symbol = uint8(i)
+				highThreshold--
+				symbolNext[i] = 1
+			} else {
+				if v >= largeLimit {
+					s.zeroBits = true
+				}
+				symbolNext[i] = uint16(v)
+			}
+		}
+	}
+	// Spread symbols
+	{
+		tableMask := tableSize - 1
+		step := tableStep(tableSize)
+		position := uint32(0)
+		for ss, v := range s.norm[:s.symbolLen] {
+			for i := 0; i < int(v); i++ {
+				s.decTable[position].symbol = uint8(ss)
+				position = (position + step) & tableMask
+				for position > highThreshold {
+					// lowprob area
+					position = (position + step) & tableMask
+				}
+			}
+		}
+		if position != 0 {
+			// position must reach all cells once, otherwise normalizedCounter is incorrect
+			return errors.New("corrupted input (position != 0)")
+		}
+	}
+
+	// Build Decoding table
+	{
+		tableSize := uint16(1 << s.actualTableLog)
+		for u, v := range s.decTable {
+			symbol := v.symbol
+			nextState := symbolNext[symbol]
+			symbolNext[symbol] = nextState + 1
+			nBits := s.actualTableLog - byte(highBits(uint32(nextState)))
+			s.decTable[u].nbBits = nBits
+			newState := (nextState << nBits) - tableSize
+			if newState >= tableSize {
+				return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize)
+			}
+			if newState == uint16(u) && nBits == 0 {
+				// Seems weird that this is possible with nbits > 0.
+				return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u)
+			}
+			s.decTable[u].newState = newState
+		}
+	}
+	return nil
+}
+
+// decompress will decompress the bitstream.
+// If the buffer is over-read an error is returned.
+func (s *Scratch) decompress() error {
+	br := &s.bits
+	br.init(s.br.unread())
+
+	var s1, s2 decoder
+	// Initialize and decode first state and symbol.
+	s1.init(br, s.decTable, s.actualTableLog)
+	s2.init(br, s.decTable, s.actualTableLog)
+
+	// Use temp table to avoid bound checks/append penalty.
+	var tmp = s.ct.tableSymbol[:256]
+	var off uint8
+
+	// Main part
+	if !s.zeroBits {
+		for br.off >= 8 {
+			br.fillFast()
+			tmp[off+0] = s1.nextFast()
+			tmp[off+1] = s2.nextFast()
+			br.fillFast()
+			tmp[off+2] = s1.nextFast()
+			tmp[off+3] = s2.nextFast()
+			off += 4
+			// When off is 0, we have overflowed and should write.
+			if off == 0 {
+				s.Out = append(s.Out, tmp...)
+				if len(s.Out) >= s.DecompressLimit {
+					return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit)
+				}
+			}
+		}
+	} else {
+		for br.off >= 8 {
+			br.fillFast()
+			tmp[off+0] = s1.next()
+			tmp[off+1] = s2.next()
+			br.fillFast()
+			tmp[off+2] = s1.next()
+			tmp[off+3] = s2.next()
+			off += 4
+			if off == 0 {
+				s.Out = append(s.Out, tmp...)
+				// When off is 0, we have overflowed and should write.
+				if len(s.Out) >= s.DecompressLimit {
+					return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit)
+				}
+			}
+		}
+	}
+	s.Out = append(s.Out, tmp[:off]...)
+
+	// Final bits, a bit more expensive check
+	for {
+		if s1.finished() {
+			s.Out = append(s.Out, s1.final(), s2.final())
+			break
+		}
+		br.fill()
+		s.Out = append(s.Out, s1.next())
+		if s2.finished() {
+			s.Out = append(s.Out, s2.final(), s1.final())
+			break
+		}
+		s.Out = append(s.Out, s2.next())
+		if len(s.Out) >= s.DecompressLimit {
+			return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit)
+		}
+	}
+	return br.close()
+}
+
+// decoder keeps track of the current state and updates it from the bitstream.
+type decoder struct {
+	state uint16
+	br    *bitReader
+	dt    []decSymbol
+}
+
+// init will initialize the decoder and read the first state from the stream.
+func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) {
+	d.dt = dt
+	d.br = in
+	d.state = in.getBits(tableLog)
+}
+
+// next returns the next symbol and sets the next state.
+// At least tablelog bits must be available in the bit reader.
+func (d *decoder) next() uint8 {
+	n := &d.dt[d.state]
+	lowBits := d.br.getBits(n.nbBits)
+	d.state = n.newState + lowBits
+	return n.symbol
+}
+
+// finished returns true if all bits have been read from the bitstream
+// and the next state would require reading bits from the input.
+func (d *decoder) finished() bool {
+	return d.br.finished() && d.dt[d.state].nbBits > 0
+}
+
+// final returns the current state symbol without decoding the next.
+func (d *decoder) final() uint8 {
+	return d.dt[d.state].symbol
+}
+
+// nextFast returns the next symbol and sets the next state.
+// This can only be used if no symbols are 0 bits.
+// At least tablelog bits must be available in the bit reader.
+func (d *decoder) nextFast() uint8 {
+	n := d.dt[d.state]
+	lowBits := d.br.getBitsFast(n.nbBits)
+	d.state = n.newState + lowBits
+	return n.symbol
+}
diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go
new file mode 100644
index 0000000..535cbad
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/fse/fse.go
@@ -0,0 +1,144 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+// Package fse provides Finite State Entropy encoding and decoding.
+//
+// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding
+// for byte blocks as implemented in zstd.
+//
+// See https://github.com/klauspost/compress/tree/master/fse for more information.
+package fse
+
+import (
+	"errors"
+	"fmt"
+	"math/bits"
+)
+
+const (
+	/*!MEMORY_USAGE :
+	 *  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+	 *  Increasing memory usage improves compression ratio
+	 *  Reduced memory usage can improve speed, due to cache effect
+	 *  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+	maxMemoryUsage     = 14
+	defaultMemoryUsage = 13
+
+	maxTableLog     = maxMemoryUsage - 2
+	maxTablesize    = 1 << maxTableLog
+	defaultTablelog = defaultMemoryUsage - 2
+	minTablelog     = 5
+	maxSymbolValue  = 255
+)
+
+var (
+	// ErrIncompressible is returned when input is judged to be too hard to compress.
+	ErrIncompressible = errors.New("input is not compressible")
+
+	// ErrUseRLE is returned from the compressor when the input is a single byte value repeated.
+	ErrUseRLE = errors.New("input is single value repeated")
+)
+
+// Scratch provides temporary storage for compression and decompression.
+type Scratch struct {
+	// Private
+	count    [maxSymbolValue + 1]uint32
+	norm     [maxSymbolValue + 1]int16
+	br       byteReader
+	bits     bitReader
+	bw       bitWriter
+	ct       cTable      // Compression tables.
+	decTable []decSymbol // Decompression table.
+	maxCount int         // count of the most probable symbol
+
+	// Per block parameters.
+	// These can be used to override compression parameters of the block.
+	// Do not touch, unless you know what you are doing.
+
+	// Out is output buffer.
+	// If the scratch is re-used before the caller is done processing the output,
+	// set this field to nil.
+	// Otherwise the output buffer will be re-used for next Compression/Decompression step
+	// and allocation will be avoided.
+	Out []byte
+
+	// DecompressLimit limits the maximum decoded size acceptable.
+	// If > 0 decompression will stop when approximately this many bytes
+	// has been decoded.
+	// If 0, maximum size will be 2GB.
+	DecompressLimit int
+
+	symbolLen      uint16 // Length of active part of the symbol table.
+	actualTableLog uint8  // Selected tablelog.
+	zeroBits       bool   // no bits has prob > 50%.
+	clearCount     bool   // clear count
+
+	// MaxSymbolValue will override the maximum symbol value of the next block.
+	MaxSymbolValue uint8
+
+	// TableLog will attempt to override the tablelog for the next block.
+	TableLog uint8
+}
+
+// Histogram allows to populate the histogram and skip that step in the compression,
+// It otherwise allows to inspect the histogram when compression is done.
+// To indicate that you have populated the histogram call HistogramFinished
+// with the value of the highest populated symbol, as well as the number of entries
+// in the most populated entry. These are accepted at face value.
+// The returned slice will always be length 256.
+func (s *Scratch) Histogram() []uint32 {
+	return s.count[:]
+}
+
+// HistogramFinished can be called to indicate that the histogram has been populated.
+// maxSymbol is the index of the highest set symbol of the next data segment.
+// maxCount is the number of entries in the most populated entry.
+// These are accepted at face value.
+func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) {
+	s.maxCount = maxCount
+	s.symbolLen = uint16(maxSymbol) + 1
+	s.clearCount = maxCount != 0
+}
+
+// prepare will prepare and allocate scratch tables used for both compression and decompression.
+func (s *Scratch) prepare(in []byte) (*Scratch, error) {
+	if s == nil {
+		s = &Scratch{}
+	}
+	if s.MaxSymbolValue == 0 {
+		s.MaxSymbolValue = 255
+	}
+	if s.TableLog == 0 {
+		s.TableLog = defaultTablelog
+	}
+	if s.TableLog > maxTableLog {
+		return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog)
+	}
+	if cap(s.Out) == 0 {
+		s.Out = make([]byte, 0, len(in))
+	}
+	if s.clearCount && s.maxCount == 0 {
+		for i := range s.count {
+			s.count[i] = 0
+		}
+		s.clearCount = false
+	}
+	s.br.init(in)
+	if s.DecompressLimit == 0 {
+		// Max size 2GB.
+		s.DecompressLimit = (2 << 30) - 1
+	}
+
+	return s, nil
+}
+
+// tableStep returns the next table index.
+func tableStep(tableSize uint32) uint32 {
+	return (tableSize >> 1) + (tableSize >> 3) + 3
+}
+
+func highBits(val uint32) (n uint32) {
+	return uint32(bits.Len32(val) - 1)
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore
new file mode 100644
index 0000000..b3d2629
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/.gitignore
@@ -0,0 +1 @@
+/huff0-fuzz.zip
diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md
new file mode 100644
index 0000000..8b6e5c6
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/README.md
@@ -0,0 +1,89 @@
+# Huff0 entropy compression

+

+This package provides Huff0 encoding and decoding as used in zstd.

+            

+[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), 

+a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU 

+(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds.

+

+This can be used for compressing input with a lot of similar input values to the smallest number of bytes.

+This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders,

+but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. 

+

+* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0)

+

+## News

+

+This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package.

+

+This ensures that most functionality is well tested.

+

+# Usage

+

+This package provides a low level interface that allows to compress single independent blocks. 

+

+Each block is separate, and there is no built in integrity checks. 

+This means that the caller should keep track of block sizes and also do checksums if needed.  

+

+Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and 

+[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions.

+You must provide input and will receive the output and maybe an error.

+

+These error values can be returned:

+

+| Error               | Description                                                                 |

+|---------------------|-----------------------------------------------------------------------------|

+| `<nil>`             | Everything ok, output is returned                                           |

+| `ErrIncompressible` | Returned when input is judged to be too hard to compress                    |

+| `ErrUseRLE`         | Returned from the compressor when the input is a single byte value repeated |

+| `ErrTooBig`         | Returned if the input block exceeds the maximum allowed size (128 Kib)      |

+| `(error)`           | An internal error occurred.                                                 |

+

+

+As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these.

+

+To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object 

+that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same 

+object can be used for both.   

+

+Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this

+you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output.

+

+The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding.  

+

+## Tables and re-use

+

+Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. 

+

+The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) 

+that controls this behaviour. See the documentation for details. This can be altered between each block.

+

+Do however note that this information is *not* stored in the output block and it is up to the users of the package to

+record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called,

+based on the boolean reported back from the CompressXX call. 

+

+If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the 

+[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object.

+

+## Decompressing

+

+The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable).

+This will initialize the decoding tables. 

+You can supply the complete block to `ReadTable` and it will return the data part of the block 

+which can be given to the decompressor. 

+

+Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) 

+or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function.

+

+For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size.

+

+You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back

+your input was likely corrupted. 

+

+It is important to note that a successful decoding does *not* mean your output matches your original input. 

+There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid.

+

+# Contributing

+

+Contributions are always welcome. Be aware that adding public functions will require good justification and breaking 

+changes will likely not be accepted. If in doubt open an issue before writing the PR.

diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go
new file mode 100644
index 0000000..a4979e8
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go
@@ -0,0 +1,329 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package huff0
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+)
+
+// bitReader reads a bitstream in reverse.
+// The last set bit indicates the start of the stream and is used
+// for aligning the input.
+type bitReader struct {
+	in       []byte
+	off      uint // next byte to read is at in[off - 1]
+	value    uint64
+	bitsRead uint8
+}
+
+// init initializes and resets the bit reader.
+func (b *bitReader) init(in []byte) error {
+	if len(in) < 1 {
+		return errors.New("corrupt stream: too short")
+	}
+	b.in = in
+	b.off = uint(len(in))
+	// The highest bit of the last byte indicates where to start
+	v := in[len(in)-1]
+	if v == 0 {
+		return errors.New("corrupt stream, did not find end of stream")
+	}
+	b.bitsRead = 64
+	b.value = 0
+	if len(in) >= 8 {
+		b.fillFastStart()
+	} else {
+		b.fill()
+		b.fill()
+	}
+	b.bitsRead += 8 - uint8(highBit32(uint32(v)))
+	return nil
+}
+
+// peekBitsFast requires that at least one bit is requested every time.
+// There are no checks if the buffer is filled.
+func (b *bitReader) peekBitsFast(n uint8) uint16 {
+	const regMask = 64 - 1
+	v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
+	return v
+}
+
+// fillFast() will make sure at least 32 bits are available.
+// There must be at least 4 bytes available.
+func (b *bitReader) fillFast() {
+	if b.bitsRead < 32 {
+		return
+	}
+
+	// 2 bounds checks.
+	v := b.in[b.off-4 : b.off]
+	v = v[:4]
+	low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+	b.value = (b.value << 32) | uint64(low)
+	b.bitsRead -= 32
+	b.off -= 4
+}
+
+func (b *bitReader) advance(n uint8) {
+	b.bitsRead += n
+}
+
+// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
+func (b *bitReader) fillFastStart() {
+	// Do single re-slice to avoid bounds checks.
+	b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+	b.bitsRead = 0
+	b.off -= 8
+}
+
+// fill() will make sure at least 32 bits are available.
+func (b *bitReader) fill() {
+	if b.bitsRead < 32 {
+		return
+	}
+	if b.off > 4 {
+		v := b.in[b.off-4:]
+		v = v[:4]
+		low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+		b.value = (b.value << 32) | uint64(low)
+		b.bitsRead -= 32
+		b.off -= 4
+		return
+	}
+	for b.off > 0 {
+		b.value = (b.value << 8) | uint64(b.in[b.off-1])
+		b.bitsRead -= 8
+		b.off--
+	}
+}
+
+// finished returns true if all bits have been read from the bit stream.
+func (b *bitReader) finished() bool {
+	return b.off == 0 && b.bitsRead >= 64
+}
+
+// close the bitstream and returns an error if out-of-buffer reads occurred.
+func (b *bitReader) close() error {
+	// Release reference.
+	b.in = nil
+	if b.bitsRead > 64 {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+
+// bitReader reads a bitstream in reverse.
+// The last set bit indicates the start of the stream and is used
+// for aligning the input.
+type bitReaderBytes struct {
+	in       []byte
+	off      uint // next byte to read is at in[off - 1]
+	value    uint64
+	bitsRead uint8
+}
+
+// init initializes and resets the bit reader.
+func (b *bitReaderBytes) init(in []byte) error {
+	if len(in) < 1 {
+		return errors.New("corrupt stream: too short")
+	}
+	b.in = in
+	b.off = uint(len(in))
+	// The highest bit of the last byte indicates where to start
+	v := in[len(in)-1]
+	if v == 0 {
+		return errors.New("corrupt stream, did not find end of stream")
+	}
+	b.bitsRead = 64
+	b.value = 0
+	if len(in) >= 8 {
+		b.fillFastStart()
+	} else {
+		b.fill()
+		b.fill()
+	}
+	b.advance(8 - uint8(highBit32(uint32(v))))
+	return nil
+}
+
+// peekBitsFast requires that at least one bit is requested every time.
+// There are no checks if the buffer is filled.
+func (b *bitReaderBytes) peekByteFast() uint8 {
+	got := uint8(b.value >> 56)
+	return got
+}
+
+func (b *bitReaderBytes) advance(n uint8) {
+	b.bitsRead += n
+	b.value <<= n & 63
+}
+
+// fillFast() will make sure at least 32 bits are available.
+// There must be at least 4 bytes available.
+func (b *bitReaderBytes) fillFast() {
+	if b.bitsRead < 32 {
+		return
+	}
+
+	// 2 bounds checks.
+	v := b.in[b.off-4 : b.off]
+	v = v[:4]
+	low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+	b.value |= uint64(low) << (b.bitsRead - 32)
+	b.bitsRead -= 32
+	b.off -= 4
+}
+
+// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read.
+func (b *bitReaderBytes) fillFastStart() {
+	// Do single re-slice to avoid bounds checks.
+	b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+	b.bitsRead = 0
+	b.off -= 8
+}
+
+// fill() will make sure at least 32 bits are available.
+func (b *bitReaderBytes) fill() {
+	if b.bitsRead < 32 {
+		return
+	}
+	if b.off > 4 {
+		v := b.in[b.off-4:]
+		v = v[:4]
+		low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+		b.value |= uint64(low) << (b.bitsRead - 32)
+		b.bitsRead -= 32
+		b.off -= 4
+		return
+	}
+	for b.off > 0 {
+		b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8)
+		b.bitsRead -= 8
+		b.off--
+	}
+}
+
+// finished returns true if all bits have been read from the bit stream.
+func (b *bitReaderBytes) finished() bool {
+	return b.off == 0 && b.bitsRead >= 64
+}
+
+// close the bitstream and returns an error if out-of-buffer reads occurred.
+func (b *bitReaderBytes) close() error {
+	// Release reference.
+	b.in = nil
+	if b.bitsRead > 64 {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+
+// bitReaderShifted reads a bitstream in reverse.
+// The last set bit indicates the start of the stream and is used
+// for aligning the input.
+type bitReaderShifted struct {
+	in       []byte
+	off      uint // next byte to read is at in[off - 1]
+	value    uint64
+	bitsRead uint8
+}
+
+// init initializes and resets the bit reader.
+func (b *bitReaderShifted) init(in []byte) error {
+	if len(in) < 1 {
+		return errors.New("corrupt stream: too short")
+	}
+	b.in = in
+	b.off = uint(len(in))
+	// The highest bit of the last byte indicates where to start
+	v := in[len(in)-1]
+	if v == 0 {
+		return errors.New("corrupt stream, did not find end of stream")
+	}
+	b.bitsRead = 64
+	b.value = 0
+	if len(in) >= 8 {
+		b.fillFastStart()
+	} else {
+		b.fill()
+		b.fill()
+	}
+	b.advance(8 - uint8(highBit32(uint32(v))))
+	return nil
+}
+
+// peekBitsFast requires that at least one bit is requested every time.
+// There are no checks if the buffer is filled.
+func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 {
+	return uint16(b.value >> ((64 - n) & 63))
+}
+
+func (b *bitReaderShifted) advance(n uint8) {
+	b.bitsRead += n
+	b.value <<= n & 63
+}
+
+// fillFast() will make sure at least 32 bits are available.
+// There must be at least 4 bytes available.
+func (b *bitReaderShifted) fillFast() {
+	if b.bitsRead < 32 {
+		return
+	}
+
+	// 2 bounds checks.
+	v := b.in[b.off-4 : b.off]
+	v = v[:4]
+	low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+	b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
+	b.bitsRead -= 32
+	b.off -= 4
+}
+
+// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read.
+func (b *bitReaderShifted) fillFastStart() {
+	// Do single re-slice to avoid bounds checks.
+	b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+	b.bitsRead = 0
+	b.off -= 8
+}
+
+// fill() will make sure at least 32 bits are available.
+func (b *bitReaderShifted) fill() {
+	if b.bitsRead < 32 {
+		return
+	}
+	if b.off > 4 {
+		v := b.in[b.off-4:]
+		v = v[:4]
+		low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+		b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
+		b.bitsRead -= 32
+		b.off -= 4
+		return
+	}
+	for b.off > 0 {
+		b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63)
+		b.bitsRead -= 8
+		b.off--
+	}
+}
+
+// finished returns true if all bits have been read from the bit stream.
+func (b *bitReaderShifted) finished() bool {
+	return b.off == 0 && b.bitsRead >= 64
+}
+
+// close the bitstream and returns an error if out-of-buffer reads occurred.
+func (b *bitReaderShifted) close() error {
+	// Release reference.
+	b.in = nil
+	if b.bitsRead > 64 {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
new file mode 100644
index 0000000..6bce4e8
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
@@ -0,0 +1,210 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package huff0
+
+import "fmt"
+
+// bitWriter will write bits.
+// First bit will be LSB of the first byte of output.
+type bitWriter struct {
+	bitContainer uint64
+	nBits        uint8
+	out          []byte
+}
+
+// bitMask16 is bitmasks. Has extra to avoid bounds check.
+var bitMask16 = [32]uint16{
+	0, 1, 3, 7, 0xF, 0x1F,
+	0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
+	0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF,
+	0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
+	0xFFFF, 0xFFFF} /* up to 16 bits */
+
+// addBits16NC will add up to 16 bits.
+// It will not check if there is space for them,
+// so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
+	b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63)
+	b.nBits += bits
+}
+
+// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
+// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
+	b.bitContainer |= uint64(value) << (b.nBits & 63)
+	b.nBits += bits
+}
+
+// encSymbol will add up to 16 bits. value may not contain more set bits than indicated.
+// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
+func (b *bitWriter) encSymbol(ct cTable, symbol byte) {
+	enc := ct[symbol]
+	b.bitContainer |= uint64(enc.val) << (b.nBits & 63)
+	if false {
+		if enc.nBits == 0 {
+			panic("nbits 0")
+		}
+	}
+	b.nBits += enc.nBits
+}
+
+// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated.
+// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
+func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
+	encA := ct[av]
+	encB := ct[bv]
+	sh := b.nBits & 63
+	combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63))
+	b.bitContainer |= combined << sh
+	if false {
+		if encA.nBits == 0 {
+			panic("nbitsA 0")
+		}
+		if encB.nBits == 0 {
+			panic("nbitsB 0")
+		}
+	}
+	b.nBits += encA.nBits + encB.nBits
+}
+
+// addBits16ZeroNC will add up to 16 bits.
+// It will not check if there is space for them,
+// so the caller must ensure that it has flushed recently.
+// This is fastest if bits can be zero.
+func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) {
+	if bits == 0 {
+		return
+	}
+	value <<= (16 - bits) & 15
+	value >>= (16 - bits) & 15
+	b.bitContainer |= uint64(value) << (b.nBits & 63)
+	b.nBits += bits
+}
+
+// flush will flush all pending full bytes.
+// There will be at least 56 bits available for writing when this has been called.
+// Using flush32 is faster, but leaves less space for writing.
+func (b *bitWriter) flush() {
+	v := b.nBits >> 3
+	switch v {
+	case 0:
+		return
+	case 1:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+		)
+		b.bitContainer >>= 1 << 3
+	case 2:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+		)
+		b.bitContainer >>= 2 << 3
+	case 3:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+		)
+		b.bitContainer >>= 3 << 3
+	case 4:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+		)
+		b.bitContainer >>= 4 << 3
+	case 5:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+		)
+		b.bitContainer >>= 5 << 3
+	case 6:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+			byte(b.bitContainer>>40),
+		)
+		b.bitContainer >>= 6 << 3
+	case 7:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+			byte(b.bitContainer>>40),
+			byte(b.bitContainer>>48),
+		)
+		b.bitContainer >>= 7 << 3
+	case 8:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+			byte(b.bitContainer>>40),
+			byte(b.bitContainer>>48),
+			byte(b.bitContainer>>56),
+		)
+		b.bitContainer = 0
+		b.nBits = 0
+		return
+	default:
+		panic(fmt.Errorf("bits (%d) > 64", b.nBits))
+	}
+	b.nBits &= 7
+}
+
+// flush32 will flush out, so there are at least 32 bits available for writing.
+func (b *bitWriter) flush32() {
+	if b.nBits < 32 {
+		return
+	}
+	b.out = append(b.out,
+		byte(b.bitContainer),
+		byte(b.bitContainer>>8),
+		byte(b.bitContainer>>16),
+		byte(b.bitContainer>>24))
+	b.nBits -= 32
+	b.bitContainer >>= 32
+}
+
+// flushAlign will flush remaining full bytes and align to next byte boundary.
+func (b *bitWriter) flushAlign() {
+	nbBytes := (b.nBits + 7) >> 3
+	for i := uint8(0); i < nbBytes; i++ {
+		b.out = append(b.out, byte(b.bitContainer>>(i*8)))
+	}
+	b.nBits = 0
+	b.bitContainer = 0
+}
+
+// close will write the alignment bit and write the final byte(s)
+// to the output.
+func (b *bitWriter) close() error {
+	// End mark
+	b.addBits16Clean(1, 1)
+	// flush until next byte.
+	b.flushAlign()
+	return nil
+}
+
+// reset and continue writing by appending to out.
+func (b *bitWriter) reset(out []byte) {
+	b.bitContainer = 0
+	b.nBits = 0
+	b.out = out
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go
new file mode 100644
index 0000000..50bcdf6
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/bytereader.go
@@ -0,0 +1,54 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package huff0
+
+// byteReader provides a byte reader that reads
+// little endian values from a byte stream.
+// The input stream is manually advanced.
+// The reader performs no bounds checks.
+type byteReader struct {
+	b   []byte
+	off int
+}
+
+// init will initialize the reader and set the input.
+func (b *byteReader) init(in []byte) {
+	b.b = in
+	b.off = 0
+}
+
+// advance the stream b n bytes.
+func (b *byteReader) advance(n uint) {
+	b.off += int(n)
+}
+
+// Int32 returns a little endian int32 starting at current offset.
+func (b byteReader) Int32() int32 {
+	v3 := int32(b.b[b.off+3])
+	v2 := int32(b.b[b.off+2])
+	v1 := int32(b.b[b.off+1])
+	v0 := int32(b.b[b.off])
+	return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
+}
+
+// Uint32 returns a little endian uint32 starting at current offset.
+func (b byteReader) Uint32() uint32 {
+	v3 := uint32(b.b[b.off+3])
+	v2 := uint32(b.b[b.off+2])
+	v1 := uint32(b.b[b.off+1])
+	v0 := uint32(b.b[b.off])
+	return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
+}
+
+// unread returns the unread portion of the input.
+func (b byteReader) unread() []byte {
+	return b.b[b.off:]
+}
+
+// remain will return the number of bytes remaining.
+func (b byteReader) remain() int {
+	return len(b.b) - b.off
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go
new file mode 100644
index 0000000..0823c92
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/compress.go
@@ -0,0 +1,656 @@
+package huff0
+
+import (
+	"fmt"
+	"runtime"
+	"sync"
+)
+
+// Compress1X will compress the input.
+// The output can be decoded using Decompress1X.
+// Supply a Scratch object. The scratch object contains state about re-use,
+// So when sharing across independent encodes, be sure to set the re-use policy.
+func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) {
+	s, err = s.prepare(in)
+	if err != nil {
+		return nil, false, err
+	}
+	return compress(in, s, s.compress1X)
+}
+
+// Compress4X will compress the input. The input is split into 4 independent blocks
+// and compressed similar to Compress1X.
+// The output can be decoded using Decompress4X.
+// Supply a Scratch object. The scratch object contains state about re-use,
+// So when sharing across independent encodes, be sure to set the re-use policy.
+func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) {
+	s, err = s.prepare(in)
+	if err != nil {
+		return nil, false, err
+	}
+	if false {
+		// TODO: compress4Xp only slightly faster.
+		const parallelThreshold = 8 << 10
+		if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 {
+			return compress(in, s, s.compress4X)
+		}
+		return compress(in, s, s.compress4Xp)
+	}
+	return compress(in, s, s.compress4X)
+}
+
+func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) {
+	// Nuke previous table if we cannot reuse anyway.
+	if s.Reuse == ReusePolicyNone {
+		s.prevTable = s.prevTable[:0]
+	}
+
+	// Create histogram, if none was provided.
+	maxCount := s.maxCount
+	var canReuse = false
+	if maxCount == 0 {
+		maxCount, canReuse = s.countSimple(in)
+	} else {
+		canReuse = s.canUseTable(s.prevTable)
+	}
+
+	// We want the output size to be less than this:
+	wantSize := len(in)
+	if s.WantLogLess > 0 {
+		wantSize -= wantSize >> s.WantLogLess
+	}
+
+	// Reset for next run.
+	s.clearCount = true
+	s.maxCount = 0
+	if maxCount >= len(in) {
+		if maxCount > len(in) {
+			return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in))
+		}
+		if len(in) == 1 {
+			return nil, false, ErrIncompressible
+		}
+		// One symbol, use RLE
+		return nil, false, ErrUseRLE
+	}
+	if maxCount == 1 || maxCount < (len(in)>>7) {
+		// Each symbol present maximum once or too well distributed.
+		return nil, false, ErrIncompressible
+	}
+	if s.Reuse == ReusePolicyMust && !canReuse {
+		// We must reuse, but we can't.
+		return nil, false, ErrIncompressible
+	}
+	if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse {
+		keepTable := s.cTable
+		keepTL := s.actualTableLog
+		s.cTable = s.prevTable
+		s.actualTableLog = s.prevTableLog
+		s.Out, err = compressor(in)
+		s.cTable = keepTable
+		s.actualTableLog = keepTL
+		if err == nil && len(s.Out) < wantSize {
+			s.OutData = s.Out
+			return s.Out, true, nil
+		}
+		if s.Reuse == ReusePolicyMust {
+			return nil, false, ErrIncompressible
+		}
+		// Do not attempt to re-use later.
+		s.prevTable = s.prevTable[:0]
+	}
+
+	// Calculate new table.
+	err = s.buildCTable()
+	if err != nil {
+		return nil, false, err
+	}
+
+	if false && !s.canUseTable(s.cTable) {
+		panic("invalid table generated")
+	}
+
+	if s.Reuse == ReusePolicyAllow && canReuse {
+		hSize := len(s.Out)
+		oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen])
+		newSize := s.cTable.estimateSize(s.count[:s.symbolLen])
+		if oldSize <= hSize+newSize || hSize+12 >= wantSize {
+			// Retain cTable even if we re-use.
+			keepTable := s.cTable
+			keepTL := s.actualTableLog
+
+			s.cTable = s.prevTable
+			s.actualTableLog = s.prevTableLog
+			s.Out, err = compressor(in)
+
+			// Restore ctable.
+			s.cTable = keepTable
+			s.actualTableLog = keepTL
+			if err != nil {
+				return nil, false, err
+			}
+			if len(s.Out) >= wantSize {
+				return nil, false, ErrIncompressible
+			}
+			s.OutData = s.Out
+			return s.Out, true, nil
+		}
+	}
+
+	// Use new table
+	err = s.cTable.write(s)
+	if err != nil {
+		s.OutTable = nil
+		return nil, false, err
+	}
+	s.OutTable = s.Out
+
+	// Compress using new table
+	s.Out, err = compressor(in)
+	if err != nil {
+		s.OutTable = nil
+		return nil, false, err
+	}
+	if len(s.Out) >= wantSize {
+		s.OutTable = nil
+		return nil, false, ErrIncompressible
+	}
+	// Move current table into previous.
+	s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0]
+	s.OutData = s.Out[len(s.OutTable):]
+	return s.Out, false, nil
+}
+
+func (s *Scratch) compress1X(src []byte) ([]byte, error) {
+	return s.compress1xDo(s.Out, src)
+}
+
+func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
+	var bw = bitWriter{out: dst}
+
+	// N is length divisible by 4.
+	n := len(src)
+	n -= n & 3
+	cTable := s.cTable[:256]
+
+	// Encode last bytes.
+	for i := len(src) & 3; i > 0; i-- {
+		bw.encSymbol(cTable, src[n+i-1])
+	}
+	n -= 4
+	if s.actualTableLog <= 8 {
+		for ; n >= 0; n -= 4 {
+			tmp := src[n : n+4]
+			// tmp should be len 4
+			bw.flush32()
+			bw.encTwoSymbols(cTable, tmp[3], tmp[2])
+			bw.encTwoSymbols(cTable, tmp[1], tmp[0])
+		}
+	} else {
+		for ; n >= 0; n -= 4 {
+			tmp := src[n : n+4]
+			// tmp should be len 4
+			bw.flush32()
+			bw.encTwoSymbols(cTable, tmp[3], tmp[2])
+			bw.flush32()
+			bw.encTwoSymbols(cTable, tmp[1], tmp[0])
+		}
+	}
+	err := bw.close()
+	return bw.out, err
+}
+
+var sixZeros [6]byte
+
+func (s *Scratch) compress4X(src []byte) ([]byte, error) {
+	if len(src) < 12 {
+		return nil, ErrIncompressible
+	}
+	segmentSize := (len(src) + 3) / 4
+
+	// Add placeholder for output length
+	offsetIdx := len(s.Out)
+	s.Out = append(s.Out, sixZeros[:]...)
+
+	for i := 0; i < 4; i++ {
+		toDo := src
+		if len(toDo) > segmentSize {
+			toDo = toDo[:segmentSize]
+		}
+		src = src[len(toDo):]
+
+		var err error
+		idx := len(s.Out)
+		s.Out, err = s.compress1xDo(s.Out, toDo)
+		if err != nil {
+			return nil, err
+		}
+		// Write compressed length as little endian before block.
+		if i < 3 {
+			// Last length is not written.
+			length := len(s.Out) - idx
+			s.Out[i*2+offsetIdx] = byte(length)
+			s.Out[i*2+offsetIdx+1] = byte(length >> 8)
+		}
+	}
+
+	return s.Out, nil
+}
+
+// compress4Xp will compress 4 streams using separate goroutines.
+func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
+	if len(src) < 12 {
+		return nil, ErrIncompressible
+	}
+	// Add placeholder for output length
+	s.Out = s.Out[:6]
+
+	segmentSize := (len(src) + 3) / 4
+	var wg sync.WaitGroup
+	var errs [4]error
+	wg.Add(4)
+	for i := 0; i < 4; i++ {
+		toDo := src
+		if len(toDo) > segmentSize {
+			toDo = toDo[:segmentSize]
+		}
+		src = src[len(toDo):]
+
+		// Separate goroutine for each block.
+		go func(i int) {
+			s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo)
+			wg.Done()
+		}(i)
+	}
+	wg.Wait()
+	for i := 0; i < 4; i++ {
+		if errs[i] != nil {
+			return nil, errs[i]
+		}
+		o := s.tmpOut[i]
+		// Write compressed length as little endian before block.
+		if i < 3 {
+			// Last length is not written.
+			s.Out[i*2] = byte(len(o))
+			s.Out[i*2+1] = byte(len(o) >> 8)
+		}
+
+		// Write output.
+		s.Out = append(s.Out, o...)
+	}
+	return s.Out, nil
+}
+
+// countSimple will create a simple histogram in s.count.
+// Returns the biggest count.
+// Does not update s.clearCount.
+func (s *Scratch) countSimple(in []byte) (max int, reuse bool) {
+	reuse = true
+	for _, v := range in {
+		s.count[v]++
+	}
+	m := uint32(0)
+	if len(s.prevTable) > 0 {
+		for i, v := range s.count[:] {
+			if v > m {
+				m = v
+			}
+			if v > 0 {
+				s.symbolLen = uint16(i) + 1
+				if i >= len(s.prevTable) {
+					reuse = false
+				} else {
+					if s.prevTable[i].nBits == 0 {
+						reuse = false
+					}
+				}
+			}
+		}
+		return int(m), reuse
+	}
+	for i, v := range s.count[:] {
+		if v > m {
+			m = v
+		}
+		if v > 0 {
+			s.symbolLen = uint16(i) + 1
+		}
+	}
+	return int(m), false
+}
+
+func (s *Scratch) canUseTable(c cTable) bool {
+	if len(c) < int(s.symbolLen) {
+		return false
+	}
+	for i, v := range s.count[:s.symbolLen] {
+		if v != 0 && c[i].nBits == 0 {
+			return false
+		}
+	}
+	return true
+}
+
+func (s *Scratch) validateTable(c cTable) bool {
+	if len(c) < int(s.symbolLen) {
+		return false
+	}
+	for i, v := range s.count[:s.symbolLen] {
+		if v != 0 {
+			if c[i].nBits == 0 {
+				return false
+			}
+			if c[i].nBits > s.actualTableLog {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// minTableLog provides the minimum logSize to safely represent a distribution.
+func (s *Scratch) minTableLog() uint8 {
+	minBitsSrc := highBit32(uint32(s.br.remain())) + 1
+	minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2
+	if minBitsSrc < minBitsSymbols {
+		return uint8(minBitsSrc)
+	}
+	return uint8(minBitsSymbols)
+}
+
+// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog
+func (s *Scratch) optimalTableLog() {
+	tableLog := s.TableLog
+	minBits := s.minTableLog()
+	maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1
+	if maxBitsSrc < tableLog {
+		// Accuracy can be reduced
+		tableLog = maxBitsSrc
+	}
+	if minBits > tableLog {
+		tableLog = minBits
+	}
+	// Need a minimum to safely represent all symbol values
+	if tableLog < minTablelog {
+		tableLog = minTablelog
+	}
+	if tableLog > tableLogMax {
+		tableLog = tableLogMax
+	}
+	s.actualTableLog = tableLog
+}
+
+type cTableEntry struct {
+	val   uint16
+	nBits uint8
+	// We have 8 bits extra
+}
+
+const huffNodesMask = huffNodesLen - 1
+
+func (s *Scratch) buildCTable() error {
+	s.optimalTableLog()
+	s.huffSort()
+	if cap(s.cTable) < maxSymbolValue+1 {
+		s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1)
+	} else {
+		s.cTable = s.cTable[:s.symbolLen]
+		for i := range s.cTable {
+			s.cTable[i] = cTableEntry{}
+		}
+	}
+
+	var startNode = int16(s.symbolLen)
+	nonNullRank := s.symbolLen - 1
+
+	nodeNb := startNode
+	huffNode := s.nodes[1 : huffNodesLen+1]
+
+	// This overlays the slice above, but allows "-1" index lookups.
+	// Different from reference implementation.
+	huffNode0 := s.nodes[0 : huffNodesLen+1]
+
+	for huffNode[nonNullRank].count == 0 {
+		nonNullRank--
+	}
+
+	lowS := int16(nonNullRank)
+	nodeRoot := nodeNb + lowS - 1
+	lowN := nodeNb
+	huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count
+	huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb)
+	nodeNb++
+	lowS -= 2
+	for n := nodeNb; n <= nodeRoot; n++ {
+		huffNode[n].count = 1 << 30
+	}
+	// fake entry, strong barrier
+	huffNode0[0].count = 1 << 31
+
+	// create parents
+	for nodeNb <= nodeRoot {
+		var n1, n2 int16
+		if huffNode0[lowS+1].count < huffNode0[lowN+1].count {
+			n1 = lowS
+			lowS--
+		} else {
+			n1 = lowN
+			lowN++
+		}
+		if huffNode0[lowS+1].count < huffNode0[lowN+1].count {
+			n2 = lowS
+			lowS--
+		} else {
+			n2 = lowN
+			lowN++
+		}
+
+		huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count
+		huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb)
+		nodeNb++
+	}
+
+	// distribute weights (unlimited tree height)
+	huffNode[nodeRoot].nbBits = 0
+	for n := nodeRoot - 1; n >= startNode; n-- {
+		huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1
+	}
+	for n := uint16(0); n <= nonNullRank; n++ {
+		huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1
+	}
+	s.actualTableLog = s.setMaxHeight(int(nonNullRank))
+	maxNbBits := s.actualTableLog
+
+	// fill result into tree (val, nbBits)
+	if maxNbBits > tableLogMax {
+		return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax)
+	}
+	var nbPerRank [tableLogMax + 1]uint16
+	var valPerRank [16]uint16
+	for _, v := range huffNode[:nonNullRank+1] {
+		nbPerRank[v.nbBits]++
+	}
+	// determine stating value per rank
+	{
+		min := uint16(0)
+		for n := maxNbBits; n > 0; n-- {
+			// get starting value within each rank
+			valPerRank[n] = min
+			min += nbPerRank[n]
+			min >>= 1
+		}
+	}
+
+	// push nbBits per symbol, symbol order
+	for _, v := range huffNode[:nonNullRank+1] {
+		s.cTable[v.symbol].nBits = v.nbBits
+	}
+
+	// assign value within rank, symbol order
+	t := s.cTable[:s.symbolLen]
+	for n, val := range t {
+		nbits := val.nBits & 15
+		v := valPerRank[nbits]
+		t[n].val = v
+		valPerRank[nbits] = v + 1
+	}
+
+	return nil
+}
+
+// huffSort will sort symbols, decreasing order.
+func (s *Scratch) huffSort() {
+	type rankPos struct {
+		base    uint32
+		current uint32
+	}
+
+	// Clear nodes
+	nodes := s.nodes[:huffNodesLen+1]
+	s.nodes = nodes
+	nodes = nodes[1 : huffNodesLen+1]
+
+	// Sort into buckets based on length of symbol count.
+	var rank [32]rankPos
+	for _, v := range s.count[:s.symbolLen] {
+		r := highBit32(v+1) & 31
+		rank[r].base++
+	}
+	// maxBitLength is log2(BlockSizeMax) + 1
+	const maxBitLength = 18 + 1
+	for n := maxBitLength; n > 0; n-- {
+		rank[n-1].base += rank[n].base
+	}
+	for n := range rank[:maxBitLength] {
+		rank[n].current = rank[n].base
+	}
+	for n, c := range s.count[:s.symbolLen] {
+		r := (highBit32(c+1) + 1) & 31
+		pos := rank[r].current
+		rank[r].current++
+		prev := nodes[(pos-1)&huffNodesMask]
+		for pos > rank[r].base && c > prev.count {
+			nodes[pos&huffNodesMask] = prev
+			pos--
+			prev = nodes[(pos-1)&huffNodesMask]
+		}
+		nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)}
+	}
+}
+
+func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
+	maxNbBits := s.actualTableLog
+	huffNode := s.nodes[1 : huffNodesLen+1]
+	//huffNode = huffNode[: huffNodesLen]
+
+	largestBits := huffNode[lastNonNull].nbBits
+
+	// early exit : no elt > maxNbBits
+	if largestBits <= maxNbBits {
+		return largestBits
+	}
+	totalCost := int(0)
+	baseCost := int(1) << (largestBits - maxNbBits)
+	n := uint32(lastNonNull)
+
+	for huffNode[n].nbBits > maxNbBits {
+		totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits))
+		huffNode[n].nbBits = maxNbBits
+		n--
+	}
+	// n stops at huffNode[n].nbBits <= maxNbBits
+
+	for huffNode[n].nbBits == maxNbBits {
+		n--
+	}
+	// n end at index of smallest symbol using < maxNbBits
+
+	// renorm totalCost
+	totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */
+
+	// repay normalized cost
+	{
+		const noSymbol = 0xF0F0F0F0
+		var rankLast [tableLogMax + 2]uint32
+
+		for i := range rankLast[:] {
+			rankLast[i] = noSymbol
+		}
+
+		// Get pos of last (smallest) symbol per rank
+		{
+			currentNbBits := maxNbBits
+			for pos := int(n); pos >= 0; pos-- {
+				if huffNode[pos].nbBits >= currentNbBits {
+					continue
+				}
+				currentNbBits = huffNode[pos].nbBits // < maxNbBits
+				rankLast[maxNbBits-currentNbBits] = uint32(pos)
+			}
+		}
+
+		for totalCost > 0 {
+			nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1
+
+			for ; nBitsToDecrease > 1; nBitsToDecrease-- {
+				highPos := rankLast[nBitsToDecrease]
+				lowPos := rankLast[nBitsToDecrease-1]
+				if highPos == noSymbol {
+					continue
+				}
+				if lowPos == noSymbol {
+					break
+				}
+				highTotal := huffNode[highPos].count
+				lowTotal := 2 * huffNode[lowPos].count
+				if highTotal <= lowTotal {
+					break
+				}
+			}
+			// only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !)
+			// HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary
+			// FIXME: try to remove
+			for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) {
+				nBitsToDecrease++
+			}
+			totalCost -= 1 << (nBitsToDecrease - 1)
+			if rankLast[nBitsToDecrease-1] == noSymbol {
+				// this rank is no longer empty
+				rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]
+			}
+			huffNode[rankLast[nBitsToDecrease]].nbBits++
+			if rankLast[nBitsToDecrease] == 0 {
+				/* special case, reached largest symbol */
+				rankLast[nBitsToDecrease] = noSymbol
+			} else {
+				rankLast[nBitsToDecrease]--
+				if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease {
+					rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */
+				}
+			}
+		}
+
+		for totalCost < 0 { /* Sometimes, cost correction overshoot */
+			if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
+				for huffNode[n].nbBits == maxNbBits {
+					n--
+				}
+				huffNode[n+1].nbBits--
+				rankLast[1] = n + 1
+				totalCost++
+				continue
+			}
+			huffNode[rankLast[1]+1].nbBits--
+			rankLast[1]++
+			totalCost++
+		}
+	}
+	return maxNbBits
+}
+
+type nodeElt struct {
+	count  uint32
+	parent uint16
+	symbol byte
+	nbBits uint8
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
new file mode 100644
index 0000000..41703bb
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -0,0 +1,1164 @@
+package huff0
+
+import (
+	"errors"
+	"fmt"
+	"io"
+
+	"github.com/klauspost/compress/fse"
+)
+
+type dTable struct {
+	single []dEntrySingle
+	double []dEntryDouble
+}
+
+// single-symbols decoding
+type dEntrySingle struct {
+	entry uint16
+}
+
+// double-symbols decoding
+type dEntryDouble struct {
+	seq   uint16
+	nBits uint8
+	len   uint8
+}
+
+// Uses special code for all tables that are < 8 bits.
+const use8BitTables = true
+
+// ReadTable will read a table from the input.
+// The size of the input may be larger than the table definition.
+// Any content remaining after the table definition will be returned.
+// If no Scratch is provided a new one is allocated.
+// The returned Scratch can be used for encoding or decoding input using this table.
+func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
+	s, err = s.prepare(in)
+	if err != nil {
+		return s, nil, err
+	}
+	if len(in) <= 1 {
+		return s, nil, errors.New("input too small for table")
+	}
+	iSize := in[0]
+	in = in[1:]
+	if iSize >= 128 {
+		// Uncompressed
+		oSize := iSize - 127
+		iSize = (oSize + 1) / 2
+		if int(iSize) > len(in) {
+			return s, nil, errors.New("input too small for table")
+		}
+		for n := uint8(0); n < oSize; n += 2 {
+			v := in[n/2]
+			s.huffWeight[n] = v >> 4
+			s.huffWeight[n+1] = v & 15
+		}
+		s.symbolLen = uint16(oSize)
+		in = in[iSize:]
+	} else {
+		if len(in) < int(iSize) {
+			return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in))
+		}
+		// FSE compressed weights
+		s.fse.DecompressLimit = 255
+		hw := s.huffWeight[:]
+		s.fse.Out = hw
+		b, err := fse.Decompress(in[:iSize], s.fse)
+		s.fse.Out = nil
+		if err != nil {
+			return s, nil, err
+		}
+		if len(b) > 255 {
+			return s, nil, errors.New("corrupt input: output table too large")
+		}
+		s.symbolLen = uint16(len(b))
+		in = in[iSize:]
+	}
+
+	// collect weight stats
+	var rankStats [16]uint32
+	weightTotal := uint32(0)
+	for _, v := range s.huffWeight[:s.symbolLen] {
+		if v > tableLogMax {
+			return s, nil, errors.New("corrupt input: weight too large")
+		}
+		v2 := v & 15
+		rankStats[v2]++
+		// (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0.
+		weightTotal += (1 << v2) >> 1
+	}
+	if weightTotal == 0 {
+		return s, nil, errors.New("corrupt input: weights zero")
+	}
+
+	// get last non-null symbol weight (implied, total must be 2^n)
+	{
+		tableLog := highBit32(weightTotal) + 1
+		if tableLog > tableLogMax {
+			return s, nil, errors.New("corrupt input: tableLog too big")
+		}
+		s.actualTableLog = uint8(tableLog)
+		// determine last weight
+		{
+			total := uint32(1) << tableLog
+			rest := total - weightTotal
+			verif := uint32(1) << highBit32(rest)
+			lastWeight := highBit32(rest) + 1
+			if verif != rest {
+				// last value must be a clean power of 2
+				return s, nil, errors.New("corrupt input: last value not power of two")
+			}
+			s.huffWeight[s.symbolLen] = uint8(lastWeight)
+			s.symbolLen++
+			rankStats[lastWeight]++
+		}
+	}
+
+	if (rankStats[1] < 2) || (rankStats[1]&1 != 0) {
+		// by construction : at least 2 elts of rank 1, must be even
+		return s, nil, errors.New("corrupt input: min elt size, even check failed ")
+	}
+
+	// TODO: Choose between single/double symbol decoding
+
+	// Calculate starting value for each rank
+	{
+		var nextRankStart uint32
+		for n := uint8(1); n < s.actualTableLog+1; n++ {
+			current := nextRankStart
+			nextRankStart += rankStats[n] << (n - 1)
+			rankStats[n] = current
+		}
+	}
+
+	// fill DTable (always full size)
+	tSize := 1 << tableLogMax
+	if len(s.dt.single) != tSize {
+		s.dt.single = make([]dEntrySingle, tSize)
+	}
+	cTable := s.prevTable
+	if cap(cTable) < maxSymbolValue+1 {
+		cTable = make([]cTableEntry, 0, maxSymbolValue+1)
+	}
+	cTable = cTable[:maxSymbolValue+1]
+	s.prevTable = cTable[:s.symbolLen]
+	s.prevTableLog = s.actualTableLog
+
+	for n, w := range s.huffWeight[:s.symbolLen] {
+		if w == 0 {
+			cTable[n] = cTableEntry{
+				val:   0,
+				nBits: 0,
+			}
+			continue
+		}
+		length := (uint32(1) << w) >> 1
+		d := dEntrySingle{
+			entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8),
+		}
+
+		rank := &rankStats[w]
+		cTable[n] = cTableEntry{
+			val:   uint16(*rank >> (w - 1)),
+			nBits: uint8(d.entry),
+		}
+
+		single := s.dt.single[*rank : *rank+length]
+		for i := range single {
+			single[i] = d
+		}
+		*rank += length
+	}
+
+	return s, in, nil
+}
+
+// Decompress1X will decompress a 1X encoded stream.
+// The length of the supplied input must match the end of a block exactly.
+// Before this is called, the table must be initialized with ReadTable unless
+// the encoder re-used the table.
+// deprecated: Use the stateless Decoder() to get a concurrent version.
+func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) {
+	if cap(s.Out) < s.MaxDecodedSize {
+		s.Out = make([]byte, s.MaxDecodedSize)
+	}
+	s.Out = s.Out[:0:s.MaxDecodedSize]
+	s.Out, err = s.Decoder().Decompress1X(s.Out, in)
+	return s.Out, err
+}
+
+// Decompress4X will decompress a 4X encoded stream.
+// Before this is called, the table must be initialized with ReadTable unless
+// the encoder re-used the table.
+// The length of the supplied input must match the end of a block exactly.
+// The destination size of the uncompressed data must be known and provided.
+// deprecated: Use the stateless Decoder() to get a concurrent version.
+func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) {
+	if dstSize > s.MaxDecodedSize {
+		return nil, ErrMaxDecodedSizeExceeded
+	}
+	if cap(s.Out) < dstSize {
+		s.Out = make([]byte, s.MaxDecodedSize)
+	}
+	s.Out = s.Out[:0:dstSize]
+	s.Out, err = s.Decoder().Decompress4X(s.Out, in)
+	return s.Out, err
+}
+
+// Decoder will return a stateless decoder that can be used by multiple
+// decompressors concurrently.
+// Before this is called, the table must be initialized with ReadTable.
+// The Decoder is still linked to the scratch buffer so that cannot be reused.
+// However, it is safe to discard the scratch.
+func (s *Scratch) Decoder() *Decoder {
+	return &Decoder{
+		dt:             s.dt,
+		actualTableLog: s.actualTableLog,
+	}
+}
+
+// Decoder provides stateless decoding.
+type Decoder struct {
+	dt             dTable
+	actualTableLog uint8
+}
+
+// Decompress1X will decompress a 1X encoded stream.
+// The cap of the output buffer will be the maximum decompressed size.
+// The length of the supplied input must match the end of a block exactly.
+func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
+	if len(d.dt.single) == 0 {
+		return nil, errors.New("no table loaded")
+	}
+	if use8BitTables && d.actualTableLog <= 8 {
+		return d.decompress1X8Bit(dst, src)
+	}
+	var br bitReaderShifted
+	err := br.init(src)
+	if err != nil {
+		return dst, err
+	}
+	maxDecodedSize := cap(dst)
+	dst = dst[:0]
+
+	// Avoid bounds check by always having full sized table.
+	const tlSize = 1 << tableLogMax
+	const tlMask = tlSize - 1
+	dt := d.dt.single[:tlSize]
+
+	// Use temp table to avoid bound checks/append penalty.
+	var buf [256]byte
+	var off uint8
+
+	for br.off >= 8 {
+		br.fillFast()
+		v := dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+		br.advance(uint8(v.entry))
+		buf[off+0] = uint8(v.entry >> 8)
+
+		v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+		br.advance(uint8(v.entry))
+		buf[off+1] = uint8(v.entry >> 8)
+
+		// Refill
+		br.fillFast()
+
+		v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+		br.advance(uint8(v.entry))
+		buf[off+2] = uint8(v.entry >> 8)
+
+		v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+		br.advance(uint8(v.entry))
+		buf[off+3] = uint8(v.entry >> 8)
+
+		off += 4
+		if off == 0 {
+			if len(dst)+256 > maxDecodedSize {
+				br.close()
+				return nil, ErrMaxDecodedSizeExceeded
+			}
+			dst = append(dst, buf[:]...)
+		}
+	}
+
+	if len(dst)+int(off) > maxDecodedSize {
+		br.close()
+		return nil, ErrMaxDecodedSizeExceeded
+	}
+	dst = append(dst, buf[:off]...)
+
+	// br < 8, so uint8 is fine
+	bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
+	for bitsLeft > 0 {
+		br.fill()
+		if false && br.bitsRead >= 32 {
+			if br.off >= 4 {
+				v := br.in[br.off-4:]
+				v = v[:4]
+				low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+				br.value = (br.value << 32) | uint64(low)
+				br.bitsRead -= 32
+				br.off -= 4
+			} else {
+				for br.off > 0 {
+					br.value = (br.value << 8) | uint64(br.in[br.off-1])
+					br.bitsRead -= 8
+					br.off--
+				}
+			}
+		}
+		if len(dst) >= maxDecodedSize {
+			br.close()
+			return nil, ErrMaxDecodedSizeExceeded
+		}
+		v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
+		nBits := uint8(v.entry)
+		br.advance(nBits)
+		bitsLeft -= nBits
+		dst = append(dst, uint8(v.entry>>8))
+	}
+	return dst, br.close()
+}
+
+// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8.
+// The cap of the output buffer will be the maximum decompressed size.
+// The length of the supplied input must match the end of a block exactly.
+func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
+	if d.actualTableLog == 8 {
+		return d.decompress1X8BitExactly(dst, src)
+	}
+	var br bitReaderBytes
+	err := br.init(src)
+	if err != nil {
+		return dst, err
+	}
+	maxDecodedSize := cap(dst)
+	dst = dst[:0]
+
+	// Avoid bounds check by always having full sized table.
+	dt := d.dt.single[:256]
+
+	// Use temp table to avoid bound checks/append penalty.
+	var buf [256]byte
+	var off uint8
+
+	shift := (8 - d.actualTableLog) & 7
+
+	//fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog)
+	for br.off >= 4 {
+		br.fillFast()
+		v := dt[br.peekByteFast()>>shift]
+		br.advance(uint8(v.entry))
+		buf[off+0] = uint8(v.entry >> 8)
+
+		v = dt[br.peekByteFast()>>shift]
+		br.advance(uint8(v.entry))
+		buf[off+1] = uint8(v.entry >> 8)
+
+		v = dt[br.peekByteFast()>>shift]
+		br.advance(uint8(v.entry))
+		buf[off+2] = uint8(v.entry >> 8)
+
+		v = dt[br.peekByteFast()>>shift]
+		br.advance(uint8(v.entry))
+		buf[off+3] = uint8(v.entry >> 8)
+
+		off += 4
+		if off == 0 {
+			if len(dst)+256 > maxDecodedSize {
+				br.close()
+				return nil, ErrMaxDecodedSizeExceeded
+			}
+			dst = append(dst, buf[:]...)
+		}
+	}
+
+	if len(dst)+int(off) > maxDecodedSize {
+		br.close()
+		return nil, ErrMaxDecodedSizeExceeded
+	}
+	dst = append(dst, buf[:off]...)
+
+	// br < 4, so uint8 is fine
+	bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead))
+	for bitsLeft > 0 {
+		if br.bitsRead >= 64-8 {
+			for br.off > 0 {
+				br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8)
+				br.bitsRead -= 8
+				br.off--
+			}
+		}
+		if len(dst) >= maxDecodedSize {
+			br.close()
+			return nil, ErrMaxDecodedSizeExceeded
+		}
+		v := dt[br.peekByteFast()>>shift]
+		nBits := uint8(v.entry)
+		br.advance(nBits)
+		bitsLeft -= int8(nBits)
+		dst = append(dst, uint8(v.entry>>8))
+	}
+	return dst, br.close()
+}
+
+// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8.
+// The cap of the output buffer will be the maximum decompressed size.
+// The length of the supplied input must match the end of a block exactly.
+func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
+	var br bitReaderBytes
+	err := br.init(src)
+	if err != nil {
+		return dst, err
+	}
+	maxDecodedSize := cap(dst)
+	dst = dst[:0]
+
+	// Avoid bounds check by always having full sized table.
+	dt := d.dt.single[:256]
+
+	// Use temp table to avoid bound checks/append penalty.
+	var buf [256]byte
+	var off uint8
+
+	const shift = 0
+
+	//fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog)
+	for br.off >= 4 {
+		br.fillFast()
+		v := dt[br.peekByteFast()>>shift]
+		br.advance(uint8(v.entry))
+		buf[off+0] = uint8(v.entry >> 8)
+
+		v = dt[br.peekByteFast()>>shift]
+		br.advance(uint8(v.entry))
+		buf[off+1] = uint8(v.entry >> 8)
+
+		v = dt[br.peekByteFast()>>shift]
+		br.advance(uint8(v.entry))
+		buf[off+2] = uint8(v.entry >> 8)
+
+		v = dt[br.peekByteFast()>>shift]
+		br.advance(uint8(v.entry))
+		buf[off+3] = uint8(v.entry >> 8)
+
+		off += 4
+		if off == 0 {
+			if len(dst)+256 > maxDecodedSize {
+				br.close()
+				return nil, ErrMaxDecodedSizeExceeded
+			}
+			dst = append(dst, buf[:]...)
+		}
+	}
+
+	if len(dst)+int(off) > maxDecodedSize {
+		br.close()
+		return nil, ErrMaxDecodedSizeExceeded
+	}
+	dst = append(dst, buf[:off]...)
+
+	// br < 4, so uint8 is fine
+	bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead))
+	for bitsLeft > 0 {
+		if br.bitsRead >= 64-8 {
+			for br.off > 0 {
+				br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8)
+				br.bitsRead -= 8
+				br.off--
+			}
+		}
+		if len(dst) >= maxDecodedSize {
+			br.close()
+			return nil, ErrMaxDecodedSizeExceeded
+		}
+		v := dt[br.peekByteFast()>>shift]
+		nBits := uint8(v.entry)
+		br.advance(nBits)
+		bitsLeft -= int8(nBits)
+		dst = append(dst, uint8(v.entry>>8))
+	}
+	return dst, br.close()
+}
+
+// Decompress4X will decompress a 4X encoded stream.
+// The length of the supplied input must match the end of a block exactly.
+// The *capacity* of the dst slice must match the destination size of
+// the uncompressed data exactly.
+func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
+	if len(d.dt.single) == 0 {
+		return nil, errors.New("no table loaded")
+	}
+	if len(src) < 6+(4*1) {
+		return nil, errors.New("input too small")
+	}
+	if use8BitTables && d.actualTableLog <= 8 {
+		return d.decompress4X8bit(dst, src)
+	}
+
+	var br [4]bitReaderShifted
+	start := 6
+	for i := 0; i < 3; i++ {
+		length := int(src[i*2]) | (int(src[i*2+1]) << 8)
+		if start+length >= len(src) {
+			return nil, errors.New("truncated input (or invalid offset)")
+		}
+		err := br[i].init(src[start : start+length])
+		if err != nil {
+			return nil, err
+		}
+		start += length
+	}
+	err := br[3].init(src[start:])
+	if err != nil {
+		return nil, err
+	}
+
+	// destination, offset to match first output
+	dstSize := cap(dst)
+	dst = dst[:dstSize]
+	out := dst
+	dstEvery := (dstSize + 3) / 4
+
+	const tlSize = 1 << tableLogMax
+	const tlMask = tlSize - 1
+	single := d.dt.single[:tlSize]
+
+	// Use temp table to avoid bound checks/append penalty.
+	var buf [256]byte
+	var off uint8
+	var decoded int
+
+	// Decode 2 values from each decoder/loop.
+	const bufoff = 256 / 4
+	for {
+		if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
+			break
+		}
+
+		{
+			const stream = 0
+			const stream2 = 1
+			br[stream].fillFast()
+			br[stream2].fillFast()
+
+			val := br[stream].peekBitsFast(d.actualTableLog)
+			v := single[val&tlMask]
+			br[stream].advance(uint8(v.entry))
+			buf[off+bufoff*stream] = uint8(v.entry >> 8)
+
+			val2 := br[stream2].peekBitsFast(d.actualTableLog)
+			v2 := single[val2&tlMask]
+			br[stream2].advance(uint8(v2.entry))
+			buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
+
+			val = br[stream].peekBitsFast(d.actualTableLog)
+			v = single[val&tlMask]
+			br[stream].advance(uint8(v.entry))
+			buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
+
+			val2 = br[stream2].peekBitsFast(d.actualTableLog)
+			v2 = single[val2&tlMask]
+			br[stream2].advance(uint8(v2.entry))
+			buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
+		}
+
+		{
+			const stream = 2
+			const stream2 = 3
+			br[stream].fillFast()
+			br[stream2].fillFast()
+
+			val := br[stream].peekBitsFast(d.actualTableLog)
+			v := single[val&tlMask]
+			br[stream].advance(uint8(v.entry))
+			buf[off+bufoff*stream] = uint8(v.entry >> 8)
+
+			val2 := br[stream2].peekBitsFast(d.actualTableLog)
+			v2 := single[val2&tlMask]
+			br[stream2].advance(uint8(v2.entry))
+			buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
+
+			val = br[stream].peekBitsFast(d.actualTableLog)
+			v = single[val&tlMask]
+			br[stream].advance(uint8(v.entry))
+			buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
+
+			val2 = br[stream2].peekBitsFast(d.actualTableLog)
+			v2 = single[val2&tlMask]
+			br[stream2].advance(uint8(v2.entry))
+			buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
+		}
+
+		off += 2
+
+		if off == bufoff {
+			if bufoff > dstEvery {
+				return nil, errors.New("corruption detected: stream overrun 1")
+			}
+			copy(out, buf[:bufoff])
+			copy(out[dstEvery:], buf[bufoff:bufoff*2])
+			copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
+			copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
+			off = 0
+			out = out[bufoff:]
+			decoded += 256
+			// There must at least be 3 buffers left.
+			if len(out) < dstEvery*3 {
+				return nil, errors.New("corruption detected: stream overrun 2")
+			}
+		}
+	}
+	if off > 0 {
+		ioff := int(off)
+		if len(out) < dstEvery*3+ioff {
+			return nil, errors.New("corruption detected: stream overrun 3")
+		}
+		copy(out, buf[:off])
+		copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
+		copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
+		copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
+		decoded += int(off) * 4
+		out = out[off:]
+	}
+
+	// Decode remaining.
+	for i := range br {
+		offset := dstEvery * i
+		br := &br[i]
+		bitsLeft := br.off*8 + uint(64-br.bitsRead)
+		for bitsLeft > 0 {
+			br.fill()
+			if false && br.bitsRead >= 32 {
+				if br.off >= 4 {
+					v := br.in[br.off-4:]
+					v = v[:4]
+					low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+					br.value = (br.value << 32) | uint64(low)
+					br.bitsRead -= 32
+					br.off -= 4
+				} else {
+					for br.off > 0 {
+						br.value = (br.value << 8) | uint64(br.in[br.off-1])
+						br.bitsRead -= 8
+						br.off--
+					}
+				}
+			}
+			// end inline...
+			if offset >= len(out) {
+				return nil, errors.New("corruption detected: stream overrun 4")
+			}
+
+			// Read value and increment offset.
+			val := br.peekBitsFast(d.actualTableLog)
+			v := single[val&tlMask].entry
+			nBits := uint8(v)
+			br.advance(nBits)
+			bitsLeft -= uint(nBits)
+			out[offset] = uint8(v >> 8)
+			offset++
+		}
+		decoded += offset - dstEvery*i
+		err = br.close()
+		if err != nil {
+			return nil, err
+		}
+	}
+	if dstSize != decoded {
+		return nil, errors.New("corruption detected: short output block")
+	}
+	return dst, nil
+}
+
+// Decompress4X will decompress a 4X encoded stream.
+// The length of the supplied input must match the end of a block exactly.
+// The *capacity* of the dst slice must match the destination size of
+// the uncompressed data exactly.
+func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
+	if d.actualTableLog == 8 {
+		return d.decompress4X8bitExactly(dst, src)
+	}
+
+	var br [4]bitReaderBytes
+	start := 6
+	for i := 0; i < 3; i++ {
+		length := int(src[i*2]) | (int(src[i*2+1]) << 8)
+		if start+length >= len(src) {
+			return nil, errors.New("truncated input (or invalid offset)")
+		}
+		err := br[i].init(src[start : start+length])
+		if err != nil {
+			return nil, err
+		}
+		start += length
+	}
+	err := br[3].init(src[start:])
+	if err != nil {
+		return nil, err
+	}
+
+	// destination, offset to match first output
+	dstSize := cap(dst)
+	dst = dst[:dstSize]
+	out := dst
+	dstEvery := (dstSize + 3) / 4
+
+	shift := (8 - d.actualTableLog) & 7
+
+	const tlSize = 1 << 8
+	const tlMask = tlSize - 1
+	single := d.dt.single[:tlSize]
+
+	// Use temp table to avoid bound checks/append penalty.
+	var buf [256]byte
+	var off uint8
+	var decoded int
+
+	// Decode 4 values from each decoder/loop.
+	const bufoff = 256 / 4
+	for {
+		if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
+			break
+		}
+
+		{
+			// Interleave 2 decodes.
+			const stream = 0
+			const stream2 = 1
+			br[stream].fillFast()
+			br[stream2].fillFast()
+
+			v := single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 := single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+
+			v = single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream+1] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 = single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+
+			v = single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream+2] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 = single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+
+			v = single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream+3] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 = single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+		}
+
+		{
+			const stream = 2
+			const stream2 = 3
+			br[stream].fillFast()
+			br[stream2].fillFast()
+
+			v := single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 := single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+
+			v = single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream+1] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 = single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+
+			v = single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream+2] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 = single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+
+			v = single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream+3] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 = single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+		}
+
+		off += 4
+
+		if off == bufoff {
+			if bufoff > dstEvery {
+				return nil, errors.New("corruption detected: stream overrun 1")
+			}
+			copy(out, buf[:bufoff])
+			copy(out[dstEvery:], buf[bufoff:bufoff*2])
+			copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
+			copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
+			off = 0
+			out = out[bufoff:]
+			decoded += 256
+			// There must at least be 3 buffers left.
+			if len(out) < dstEvery*3 {
+				return nil, errors.New("corruption detected: stream overrun 2")
+			}
+		}
+	}
+	if off > 0 {
+		ioff := int(off)
+		if len(out) < dstEvery*3+ioff {
+			return nil, errors.New("corruption detected: stream overrun 3")
+		}
+		copy(out, buf[:off])
+		copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
+		copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
+		copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
+		decoded += int(off) * 4
+		out = out[off:]
+	}
+
+	// Decode remaining.
+	for i := range br {
+		offset := dstEvery * i
+		br := &br[i]
+		bitsLeft := int(br.off*8) + int(64-br.bitsRead)
+		for bitsLeft > 0 {
+			if br.finished() {
+				return nil, io.ErrUnexpectedEOF
+			}
+			if br.bitsRead >= 56 {
+				if br.off >= 4 {
+					v := br.in[br.off-4:]
+					v = v[:4]
+					low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+					br.value |= uint64(low) << (br.bitsRead - 32)
+					br.bitsRead -= 32
+					br.off -= 4
+				} else {
+					for br.off > 0 {
+						br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8)
+						br.bitsRead -= 8
+						br.off--
+					}
+				}
+			}
+			// end inline...
+			if offset >= len(out) {
+				return nil, errors.New("corruption detected: stream overrun 4")
+			}
+
+			// Read value and increment offset.
+			v := single[br.peekByteFast()>>shift].entry
+			nBits := uint8(v)
+			br.advance(nBits)
+			bitsLeft -= int(nBits)
+			out[offset] = uint8(v >> 8)
+			offset++
+		}
+		decoded += offset - dstEvery*i
+		err = br.close()
+		if err != nil {
+			return nil, err
+		}
+	}
+	if dstSize != decoded {
+		return nil, errors.New("corruption detected: short output block")
+	}
+	return dst, nil
+}
+
+// Decompress4X will decompress a 4X encoded stream.
+// The length of the supplied input must match the end of a block exactly.
+// The *capacity* of the dst slice must match the destination size of
+// the uncompressed data exactly.
+func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
+	var br [4]bitReaderBytes
+	start := 6
+	for i := 0; i < 3; i++ {
+		length := int(src[i*2]) | (int(src[i*2+1]) << 8)
+		if start+length >= len(src) {
+			return nil, errors.New("truncated input (or invalid offset)")
+		}
+		err := br[i].init(src[start : start+length])
+		if err != nil {
+			return nil, err
+		}
+		start += length
+	}
+	err := br[3].init(src[start:])
+	if err != nil {
+		return nil, err
+	}
+
+	// destination, offset to match first output
+	dstSize := cap(dst)
+	dst = dst[:dstSize]
+	out := dst
+	dstEvery := (dstSize + 3) / 4
+
+	const shift = 0
+	const tlSize = 1 << 8
+	const tlMask = tlSize - 1
+	single := d.dt.single[:tlSize]
+
+	// Use temp table to avoid bound checks/append penalty.
+	var buf [256]byte
+	var off uint8
+	var decoded int
+
+	// Decode 4 values from each decoder/loop.
+	const bufoff = 256 / 4
+	for {
+		if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
+			break
+		}
+
+		{
+			// Interleave 2 decodes.
+			const stream = 0
+			const stream2 = 1
+			br[stream].fillFast()
+			br[stream2].fillFast()
+
+			v := single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 := single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+
+			v = single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream+1] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 = single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+
+			v = single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream+2] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 = single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+
+			v = single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream+3] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 = single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+		}
+
+		{
+			const stream = 2
+			const stream2 = 3
+			br[stream].fillFast()
+			br[stream2].fillFast()
+
+			v := single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 := single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+
+			v = single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream+1] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 = single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+
+			v = single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream+2] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 = single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+
+			v = single[br[stream].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream+3] = uint8(v >> 8)
+			br[stream].advance(uint8(v))
+
+			v2 = single[br[stream2].peekByteFast()>>shift].entry
+			buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
+			br[stream2].advance(uint8(v2))
+		}
+
+		off += 4
+
+		if off == bufoff {
+			if bufoff > dstEvery {
+				return nil, errors.New("corruption detected: stream overrun 1")
+			}
+			copy(out, buf[:bufoff])
+			copy(out[dstEvery:], buf[bufoff:bufoff*2])
+			copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
+			copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
+			off = 0
+			out = out[bufoff:]
+			decoded += 256
+			// There must at least be 3 buffers left.
+			if len(out) < dstEvery*3 {
+				return nil, errors.New("corruption detected: stream overrun 2")
+			}
+		}
+	}
+	if off > 0 {
+		ioff := int(off)
+		if len(out) < dstEvery*3+ioff {
+			return nil, errors.New("corruption detected: stream overrun 3")
+		}
+		copy(out, buf[:off])
+		copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
+		copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
+		copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
+		decoded += int(off) * 4
+		out = out[off:]
+	}
+
+	// Decode remaining.
+	for i := range br {
+		offset := dstEvery * i
+		br := &br[i]
+		bitsLeft := int(br.off*8) + int(64-br.bitsRead)
+		for bitsLeft > 0 {
+			if br.finished() {
+				return nil, io.ErrUnexpectedEOF
+			}
+			if br.bitsRead >= 56 {
+				if br.off >= 4 {
+					v := br.in[br.off-4:]
+					v = v[:4]
+					low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+					br.value |= uint64(low) << (br.bitsRead - 32)
+					br.bitsRead -= 32
+					br.off -= 4
+				} else {
+					for br.off > 0 {
+						br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8)
+						br.bitsRead -= 8
+						br.off--
+					}
+				}
+			}
+			// end inline...
+			if offset >= len(out) {
+				return nil, errors.New("corruption detected: stream overrun 4")
+			}
+
+			// Read value and increment offset.
+			v := single[br.peekByteFast()>>shift].entry
+			nBits := uint8(v)
+			br.advance(nBits)
+			bitsLeft -= int(nBits)
+			out[offset] = uint8(v >> 8)
+			offset++
+		}
+		decoded += offset - dstEvery*i
+		err = br.close()
+		if err != nil {
+			return nil, err
+		}
+	}
+	if dstSize != decoded {
+		return nil, errors.New("corruption detected: short output block")
+	}
+	return dst, nil
+}
+
+// matches will compare a decoding table to a coding table.
+// Errors are written to the writer.
+// Nothing will be written if table is ok.
+func (s *Scratch) matches(ct cTable, w io.Writer) {
+	if s == nil || len(s.dt.single) == 0 {
+		return
+	}
+	dt := s.dt.single[:1<<s.actualTableLog]
+	tablelog := s.actualTableLog
+	ok := 0
+	broken := 0
+	for sym, enc := range ct {
+		errs := 0
+		broken++
+		if enc.nBits == 0 {
+			for _, dec := range dt {
+				if uint8(dec.entry>>8) == byte(sym) {
+					fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym)
+					errs++
+					break
+				}
+			}
+			if errs == 0 {
+				broken--
+			}
+			continue
+		}
+		// Unused bits in input
+		ub := tablelog - enc.nBits
+		top := enc.val << ub
+		// decoder looks at top bits.
+		dec := dt[top]
+		if uint8(dec.entry) != enc.nBits {
+			fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry))
+			errs++
+		}
+		if uint8(dec.entry>>8) != uint8(sym) {
+			fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8))
+			errs++
+		}
+		if errs > 0 {
+			fmt.Fprintf(w, "%d errros in base, stopping\n", errs)
+			continue
+		}
+		// Ensure that all combinations are covered.
+		for i := uint16(0); i < (1 << ub); i++ {
+			vval := top | i
+			dec := dt[vval]
+			if uint8(dec.entry) != enc.nBits {
+				fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry))
+				errs++
+			}
+			if uint8(dec.entry>>8) != uint8(sym) {
+				fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8))
+				errs++
+			}
+			if errs > 20 {
+				fmt.Fprintf(w, "%d errros, stopping\n", errs)
+				break
+			}
+		}
+		if errs == 0 {
+			ok++
+			broken--
+		}
+	}
+	if broken > 0 {
+		fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok)
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go
new file mode 100644
index 0000000..7ec2022
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/huff0.go
@@ -0,0 +1,273 @@
+// Package huff0 provides fast huffman encoding as used in zstd.
+//
+// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details.
+package huff0
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"math/bits"
+
+	"github.com/klauspost/compress/fse"
+)
+
+const (
+	maxSymbolValue = 255
+
+	// zstandard limits tablelog to 11, see:
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description
+	tableLogMax     = 11
+	tableLogDefault = 11
+	minTablelog     = 5
+	huffNodesLen    = 512
+
+	// BlockSizeMax is maximum input size for a single block uncompressed.
+	BlockSizeMax = 1<<18 - 1
+)
+
+var (
+	// ErrIncompressible is returned when input is judged to be too hard to compress.
+	ErrIncompressible = errors.New("input is not compressible")
+
+	// ErrUseRLE is returned from the compressor when the input is a single byte value repeated.
+	ErrUseRLE = errors.New("input is single value repeated")
+
+	// ErrTooBig is return if input is too large for a single block.
+	ErrTooBig = errors.New("input too big")
+
+	// ErrMaxDecodedSizeExceeded is return if input is too large for a single block.
+	ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded")
+)
+
+type ReusePolicy uint8
+
+const (
+	// ReusePolicyAllow will allow reuse if it produces smaller output.
+	ReusePolicyAllow ReusePolicy = iota
+
+	// ReusePolicyPrefer will re-use aggressively if possible.
+	// This will not check if a new table will produce smaller output,
+	// except if the current table is impossible to use or
+	// compressed output is bigger than input.
+	ReusePolicyPrefer
+
+	// ReusePolicyNone will disable re-use of tables.
+	// This is slightly faster than ReusePolicyAllow but may produce larger output.
+	ReusePolicyNone
+
+	// ReusePolicyMust must allow reuse and produce smaller output.
+	ReusePolicyMust
+)
+
+type Scratch struct {
+	count [maxSymbolValue + 1]uint32
+
+	// Per block parameters.
+	// These can be used to override compression parameters of the block.
+	// Do not touch, unless you know what you are doing.
+
+	// Out is output buffer.
+	// If the scratch is re-used before the caller is done processing the output,
+	// set this field to nil.
+	// Otherwise the output buffer will be re-used for next Compression/Decompression step
+	// and allocation will be avoided.
+	Out []byte
+
+	// OutTable will contain the table data only, if a new table has been generated.
+	// Slice of the returned data.
+	OutTable []byte
+
+	// OutData will contain the compressed data.
+	// Slice of the returned data.
+	OutData []byte
+
+	// MaxDecodedSize will set the maximum allowed output size.
+	// This value will automatically be set to BlockSizeMax if not set.
+	// Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded.
+	MaxDecodedSize int
+
+	br byteReader
+
+	// MaxSymbolValue will override the maximum symbol value of the next block.
+	MaxSymbolValue uint8
+
+	// TableLog will attempt to override the tablelog for the next block.
+	// Must be <= 11 and >= 5.
+	TableLog uint8
+
+	// Reuse will specify the reuse policy
+	Reuse ReusePolicy
+
+	// WantLogLess allows to specify a log 2 reduction that should at least be achieved,
+	// otherwise the block will be returned as incompressible.
+	// The reduction should then at least be (input size >> WantLogLess)
+	// If WantLogLess == 0 any improvement will do.
+	WantLogLess uint8
+
+	symbolLen      uint16 // Length of active part of the symbol table.
+	maxCount       int    // count of the most probable symbol
+	clearCount     bool   // clear count
+	actualTableLog uint8  // Selected tablelog.
+	prevTableLog   uint8  // Tablelog for previous table
+	prevTable      cTable // Table used for previous compression.
+	cTable         cTable // compression table
+	dt             dTable // decompression table
+	nodes          []nodeElt
+	tmpOut         [4][]byte
+	fse            *fse.Scratch
+	huffWeight     [maxSymbolValue + 1]byte
+}
+
+// TransferCTable will transfer the previously used compression table.
+func (s *Scratch) TransferCTable(src *Scratch) {
+	if cap(s.prevTable) < len(src.prevTable) {
+		s.prevTable = make(cTable, 0, maxSymbolValue+1)
+	}
+	s.prevTable = s.prevTable[:len(src.prevTable)]
+	copy(s.prevTable, src.prevTable)
+	s.prevTableLog = src.prevTableLog
+}
+
+func (s *Scratch) prepare(in []byte) (*Scratch, error) {
+	if len(in) > BlockSizeMax {
+		return nil, ErrTooBig
+	}
+	if s == nil {
+		s = &Scratch{}
+	}
+	if s.MaxSymbolValue == 0 {
+		s.MaxSymbolValue = maxSymbolValue
+	}
+	if s.TableLog == 0 {
+		s.TableLog = tableLogDefault
+	}
+	if s.TableLog > tableLogMax || s.TableLog < minTablelog {
+		return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax)
+	}
+	if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax {
+		s.MaxDecodedSize = BlockSizeMax
+	}
+	if s.clearCount && s.maxCount == 0 {
+		for i := range s.count {
+			s.count[i] = 0
+		}
+		s.clearCount = false
+	}
+	if cap(s.Out) == 0 {
+		s.Out = make([]byte, 0, len(in))
+	}
+	s.Out = s.Out[:0]
+
+	s.OutTable = nil
+	s.OutData = nil
+	if cap(s.nodes) < huffNodesLen+1 {
+		s.nodes = make([]nodeElt, 0, huffNodesLen+1)
+	}
+	s.nodes = s.nodes[:0]
+	if s.fse == nil {
+		s.fse = &fse.Scratch{}
+	}
+	s.br.init(in)
+
+	return s, nil
+}
+
+type cTable []cTableEntry
+
+func (c cTable) write(s *Scratch) error {
+	var (
+		// precomputed conversion table
+		bitsToWeight [tableLogMax + 1]byte
+		huffLog      = s.actualTableLog
+		// last weight is not saved.
+		maxSymbolValue = uint8(s.symbolLen - 1)
+		huffWeight     = s.huffWeight[:256]
+	)
+	const (
+		maxFSETableLog = 6
+	)
+	// convert to weight
+	bitsToWeight[0] = 0
+	for n := uint8(1); n < huffLog+1; n++ {
+		bitsToWeight[n] = huffLog + 1 - n
+	}
+
+	// Acquire histogram for FSE.
+	hist := s.fse.Histogram()
+	hist = hist[:256]
+	for i := range hist[:16] {
+		hist[i] = 0
+	}
+	for n := uint8(0); n < maxSymbolValue; n++ {
+		v := bitsToWeight[c[n].nBits] & 15
+		huffWeight[n] = v
+		hist[v]++
+	}
+
+	// FSE compress if feasible.
+	if maxSymbolValue >= 2 {
+		huffMaxCnt := uint32(0)
+		huffMax := uint8(0)
+		for i, v := range hist[:16] {
+			if v == 0 {
+				continue
+			}
+			huffMax = byte(i)
+			if v > huffMaxCnt {
+				huffMaxCnt = v
+			}
+		}
+		s.fse.HistogramFinished(huffMax, int(huffMaxCnt))
+		s.fse.TableLog = maxFSETableLog
+		b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse)
+		if err == nil && len(b) < int(s.symbolLen>>1) {
+			s.Out = append(s.Out, uint8(len(b)))
+			s.Out = append(s.Out, b...)
+			return nil
+		}
+		// Unable to compress (RLE/uncompressible)
+	}
+	// write raw values as 4-bits (max : 15)
+	if maxSymbolValue > (256 - 128) {
+		// should not happen : likely means source cannot be compressed
+		return ErrIncompressible
+	}
+	op := s.Out
+	// special case, pack weights 4 bits/weight.
+	op = append(op, 128|(maxSymbolValue-1))
+	// be sure it doesn't cause msan issue in final combination
+	huffWeight[maxSymbolValue] = 0
+	for n := uint16(0); n < uint16(maxSymbolValue); n += 2 {
+		op = append(op, (huffWeight[n]<<4)|huffWeight[n+1])
+	}
+	s.Out = op
+	return nil
+}
+
+// estimateSize returns the estimated size in bytes of the input represented in the
+// histogram supplied.
+func (c cTable) estimateSize(hist []uint32) int {
+	nbBits := uint32(7)
+	for i, v := range c[:len(hist)] {
+		nbBits += uint32(v.nBits) * hist[i]
+	}
+	return int(nbBits >> 3)
+}
+
+// minSize returns the minimum possible size considering the shannon limit.
+func (s *Scratch) minSize(total int) int {
+	nbBits := float64(7)
+	fTotal := float64(total)
+	for _, v := range s.count[:s.symbolLen] {
+		n := float64(v)
+		if n > 0 {
+			nbBits += math.Log2(fTotal/n) * n
+		}
+	}
+	return int(nbBits) >> 3
+}
+
+func highBit32(val uint32) (n uint32) {
+	return uint32(bits.Len32(val) - 1)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
new file mode 100644
index 0000000..e7d7eb0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -0,0 +1,417 @@
+# zstd 
+
+[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. 
+It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder.
+A high performance compression algorithm is implemented. For now focused on speed. 
+
+This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. 
+
+This package is pure Go and without use of "unsafe". 
+
+The `zstd` package is provided as open source software using a Go standard license.
+
+Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors.
+
+## Installation
+
+Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`.
+
+Godoc Documentation: https://godoc.org/github.com/klauspost/compress/zstd
+
+
+## Compressor
+
+### Status: 
+
+STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively 
+used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates.
+
+There may still be specific combinations of data types/size/settings that could lead to edge cases, 
+so as always, testing is recommended.  
+
+For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. 
+
+* The "Fastest" compression ratio is roughly equivalent to zstd level 1. 
+* The "Default" compression ratio is roughly equivalent to zstd level 3 (default).
+* The "Better" compression ratio is roughly equivalent to zstd level 7.
+* The "Best" compression ratio is roughly equivalent to zstd level 11.
+
+In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. 
+The compression ratio compared to stdlib is around level 3, but usually 3x as fast.
+
+ 
+### Usage
+
+An Encoder can be used for either compressing a stream via the
+`io.WriteCloser` interface supported by the Encoder or as multiple independent
+tasks via the `EncodeAll` function.
+Smaller encodes are encouraged to use the EncodeAll function.
+Use `NewWriter` to create a new instance that can be used for both.
+
+To create a writer with default options, do like this:
+
+```Go
+// Compress input to output.
+func Compress(in io.Reader, out io.Writer) error {
+    enc, err := zstd.NewWriter(out)
+    if err != nil {
+        return err
+    }
+    _, err = io.Copy(enc, in)
+    if err != nil {
+        enc.Close()
+        return err
+    }
+    return enc.Close()
+}
+```
+
+Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called.
+Even if your encode fails, you should still call `Close()` to release any resources that may be held up.  
+
+The above is fine for big encodes. However, whenever possible try to *reuse* the writer.
+
+To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. 
+This will allow the encoder to reuse all resources and avoid wasteful allocations. 
+
+Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part 
+of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change 
+in the future. So if you want to limit concurrency for future updates, specify the concurrency
+you would like.
+
+You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined 
+compression settings can be specified.
+
+#### Future Compatibility Guarantees
+
+This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change.
+
+The goal will be to keep the default efficiency at the default zstd (level 3). 
+However the encoding should never be assumed to remain the same, 
+and you should not use hashes of compressed output for similarity checks.
+
+The Encoder can be assumed to produce the same output from the exact same code version.
+However, the may be modes in the future that break this, 
+although they will not be enabled without an explicit option.   
+
+This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder.
+
+Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59),
+[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) 
+and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames).
+
+#### Blocks
+
+For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`.
+
+`EncodeAll` will encode all input in src and append it to dst.
+This function can be called concurrently, but each call will only run on a single goroutine.
+
+Encoded blocks can be concatenated and the result will be the combined input stream.
+Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`.
+
+Especially when encoding blocks you should take special care to reuse the encoder. 
+This will effectively make it run without allocations after a warmup period. 
+To make it run completely without allocations, supply a destination buffer with space for all content.   
+
+```Go
+import "github.com/klauspost/compress/zstd"
+
+// Create a writer that caches compressors.
+// For this operation type we supply a nil Reader.
+var encoder, _ = zstd.NewWriter(nil)
+
+// Compress a buffer. 
+// If you have a destination buffer, the allocation in the call can also be eliminated.
+func Compress(src []byte) []byte {
+    return encoder.EncodeAll(src, make([]byte, 0, len(src)))
+} 
+```
+
+You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` 
+option when creating the writer.
+
+Using the Encoder for both a stream and individual blocks concurrently is safe. 
+
+### Performance
+
+I have collected some speed examples to compare speed and compression against other compressors.
+
+* `file` is the input file.
+* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library.
+* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best".
+* `insize`/`outsize` is the input/output size.
+* `millis` is the number of milliseconds used for compression.
+* `mb/s` is megabytes (2^20 bytes) per second.
+
+```
+Silesia Corpus:
+http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip
+
+This package:
+file    out     level   insize      outsize     millis  mb/s
+silesia.tar zskp    1   211947520   73101992    643     313.87
+silesia.tar zskp    2   211947520   67504318    969     208.38
+silesia.tar zskp    3   211947520   64595893    2007    100.68
+silesia.tar zskp    4   211947520   60995370    7691    26.28
+
+cgo zstd:
+silesia.tar zstd    1   211947520   73605392    543     371.56
+silesia.tar zstd    3   211947520   66793289    864     233.68
+silesia.tar zstd    6   211947520   62916450    1913    105.66
+silesia.tar zstd    9   211947520   60212393    5063    39.92
+
+gzip, stdlib/this package:
+silesia.tar gzstd   1   211947520   80007735    1654    122.21
+silesia.tar gzkp    1   211947520   80369488    1168    173.06
+
+GOB stream of binary data. Highly compressible.
+https://files.klauspost.com/compress/gob-stream.7z
+
+file        out     level   insize  outsize     millis  mb/s
+gob-stream  zskp    1   1911399616  235022249   3088    590.30
+gob-stream  zskp    2   1911399616  205669791   3786    481.34
+gob-stream  zskp    3   1911399616  175034659   9636    189.17
+gob-stream  zskp    4   1911399616  167273881   29337   62.13
+gob-stream  zstd    1   1911399616  249810424   2637    691.26
+gob-stream  zstd    3   1911399616  208192146   3490    522.31
+gob-stream  zstd    6   1911399616  193632038   6687    272.56
+gob-stream  zstd    9   1911399616  177620386   16175   112.70
+gob-stream  gzstd   1   1911399616  357382641   10251   177.82
+gob-stream  gzkp    1   1911399616  362156523   5695    320.08
+
+The test data for the Large Text Compression Benchmark is the first
+10^9 bytes of the English Wikipedia dump on Mar. 3, 2006.
+http://mattmahoney.net/dc/textdata.html
+
+file    out level   insize      outsize     millis  mb/s
+enwik9  zskp    1   1000000000  343848582   3609    264.18
+enwik9  zskp    2   1000000000  317276632   5746    165.97
+enwik9  zskp    3   1000000000  292243069   12162   78.41
+enwik9  zskp    4   1000000000  275241169   36430   26.18
+enwik9  zstd    1   1000000000  358072021   3110    306.65
+enwik9  zstd    3   1000000000  313734672   4784    199.35
+enwik9  zstd    6   1000000000  295138875   10290   92.68
+enwik9  zstd    9   1000000000  278348700   28549   33.40
+enwik9  gzstd   1   1000000000  382578136   9604    99.30
+enwik9  gzkp    1   1000000000  383825945   6544    145.73
+
+Highly compressible JSON file.
+https://files.klauspost.com/compress/github-june-2days-2019.json.zst
+
+file                        out level   insize      outsize     millis  mb/s
+github-june-2days-2019.json zskp    1   6273951764  699045015   10620   563.40
+github-june-2days-2019.json zskp    2   6273951764  617881763   11687   511.96
+github-june-2days-2019.json zskp    3   6273951764  524340691   34043   175.75
+github-june-2days-2019.json zskp    4   6273951764  503314661   93811   63.78
+github-june-2days-2019.json zstd    1   6273951764  766284037   8450    708.00
+github-june-2days-2019.json zstd    3   6273951764  661889476   10927   547.57
+github-june-2days-2019.json zstd    6   6273951764  642756859   22996   260.18
+github-june-2days-2019.json zstd    9   6273951764  601974523   52413   114.16
+github-june-2days-2019.json gzstd   1   6273951764  1164400847  29948   199.79
+github-june-2days-2019.json gzkp    1   6273951764  1128755542  19236   311.03
+
+VM Image, Linux mint with a few installed applications:
+https://files.klauspost.com/compress/rawstudio-mint14.7z
+
+file                    out level   insize      outsize     millis  mb/s
+rawstudio-mint14.tar    zskp    1   8558382592  3667489370  20210   403.84
+rawstudio-mint14.tar    zskp    2   8558382592  3364592300  31873   256.07
+rawstudio-mint14.tar    zskp    3   8558382592  3158085214  77675   105.08
+rawstudio-mint14.tar    zskp    4   8558382592  3020370044  404956  20.16
+rawstudio-mint14.tar    zstd    1   8558382592  3609250104  17136   476.27
+rawstudio-mint14.tar    zstd    3   8558382592  3341679997  29262   278.92
+rawstudio-mint14.tar    zstd    6   8558382592  3235846406  77904   104.77
+rawstudio-mint14.tar    zstd    9   8558382592  3160778861  140946  57.91
+rawstudio-mint14.tar    gzstd   1   8558382592  3926257486  57722   141.40
+rawstudio-mint14.tar    gzkp    1   8558382592  3970463184  41749   195.49
+
+CSV data:
+https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst
+
+file                    out level   insize      outsize     millis  mb/s
+nyc-taxi-data-10M.csv   zskp    1   3325605752  641339945   8925    355.35
+nyc-taxi-data-10M.csv   zskp    2   3325605752  591748091   11268   281.44
+nyc-taxi-data-10M.csv   zskp    3   3325605752  530289687   25239   125.66
+nyc-taxi-data-10M.csv   zskp    4   3325605752  490907191   65939   48.10
+nyc-taxi-data-10M.csv   zstd    1   3325605752  687399637   8233    385.18
+nyc-taxi-data-10M.csv   zstd    3   3325605752  598514411   10065   315.07
+nyc-taxi-data-10M.csv   zstd    6   3325605752  570522953   20038   158.27
+nyc-taxi-data-10M.csv   zstd    9   3325605752  517554797   64565   49.12
+nyc-taxi-data-10M.csv   gzstd   1   3325605752  928656485   23876   132.83
+nyc-taxi-data-10M.csv   gzkp    1   3325605752  924718719   16388   193.53
+```
+
+## Decompressor
+
+Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested.
+
+This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz),
+kindly supplied by [fuzzit.dev](https://fuzzit.dev/). 
+The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, 
+or run it past its limits with ANY input provided.  
+ 
+### Usage
+
+The package has been designed for two main usages, big streams of data and smaller in-memory buffers. 
+There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`.
+
+For streaming use a simple setup could look like this:
+
+```Go
+import "github.com/klauspost/compress/zstd"
+
+func Decompress(in io.Reader, out io.Writer) error {
+    d, err := zstd.NewReader(in)
+    if err != nil {
+        return err
+    }
+    defer d.Close()
+    
+    // Copy content...
+    _, err = io.Copy(out, d)
+    return err
+}
+```
+
+It is important to use the "Close" function when you no longer need the Reader to stop running goroutines. 
+See "Allocation-less operation" below.
+
+For decoding buffers, it could look something like this:
+
+```Go
+import "github.com/klauspost/compress/zstd"
+
+// Create a reader that caches decompressors.
+// For this operation type we supply a nil Reader.
+var decoder, _ = zstd.NewReader(nil)
+
+// Decompress a buffer. We don't supply a destination buffer,
+// so it will be allocated by the decoder.
+func Decompress(src []byte) ([]byte, error) {
+    return decoder.DecodeAll(src, nil)
+} 
+```
+
+Both of these cases should provide the functionality needed. 
+The decoder can be used for *concurrent* decompression of multiple buffers. 
+It will only allow a certain number of concurrent operations to run. 
+To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder.   
+
+### Dictionaries
+
+Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed.
+
+Dictionaries are added individually to Decoders.
+Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder.
+To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data.
+Several dictionaries can be added at once.
+
+The dictionary will be used automatically for the data that specifies them.
+A re-used Decoder will still contain the dictionaries registered.
+
+When registering multiple dictionaries with the same ID, the last one will be used.
+
+It is possible to use dictionaries when compressing data.
+
+To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used 
+and it will likely be used even if it doesn't improve compression. 
+
+The used dictionary must be used to decompress the content.
+
+For any real gains, the dictionary should be built with similar data. 
+If an unsuitable dictionary is used the output may be slightly larger than using no dictionary.
+Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data.
+For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). 
+
+For now there is a fixed startup performance penalty for compressing content with dictionaries. 
+This will likely be improved over time. Just be aware to test performance when implementing.  
+
+### Allocation-less operation
+
+The decoder has been designed to operate without allocations after a warmup. 
+
+This means that you should *store* the decoder for best performance. 
+To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream.
+A decoder can safely be re-used even if the previous stream failed.
+
+To release the resources, you must call the `Close()` function on a decoder.
+After this it can *no longer be reused*, but all running goroutines will be stopped.
+So you *must* use this if you will no longer need the Reader.
+
+For decompressing smaller buffers a single decoder can be used.
+When decoding buffers, you can supply a destination slice with length 0 and your expected capacity.
+In this case no unneeded allocations should be made. 
+
+### Concurrency
+
+The buffer decoder does everything on the same goroutine and does nothing concurrently.
+It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that.
+
+The stream decoder operates on
+
+* One goroutine reads input and splits the input to several block decoders.
+* A number of decoders will decode blocks.
+* A goroutine coordinates these blocks and sends history from one to the next.
+
+So effectively this also means the decoder will "read ahead" and prepare data to always be available for output.
+
+Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency.
+
+In practice this means that concurrency is often limited to utilizing about 2 cores effectively.
+ 
+ 
+### Benchmarks
+
+These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd).
+
+The first two are streaming decodes and the last are smaller inputs. 
+ 
+```
+BenchmarkDecoderSilesia-8                          3     385000067 ns/op     550.51 MB/s        5498 B/op          8 allocs/op
+BenchmarkDecoderSilesiaCgo-8                       6     197666567 ns/op    1072.25 MB/s      270672 B/op          8 allocs/op
+
+BenchmarkDecoderEnwik9-8                           1    2027001600 ns/op     493.34 MB/s       10496 B/op         18 allocs/op
+BenchmarkDecoderEnwik9Cgo-8                        2     979499200 ns/op    1020.93 MB/s      270672 B/op          8 allocs/op
+
+Concurrent performance:
+
+BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16                28915         42469 ns/op    4340.07 MB/s         114 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16           116505          9965 ns/op    11900.16 MB/s         16 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16              8952        134272 ns/op    3588.70 MB/s         915 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16               11820        102538 ns/op    4161.90 MB/s         594 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16             34782         34184 ns/op    3661.88 MB/s          60 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16              27712         43447 ns/op    3500.58 MB/s          99 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16                 62826         18750 ns/op    21845.10 MB/s        104 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16          631545          1794 ns/op    57078.74 MB/s          2 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16         1690140           712 ns/op    172938.13 MB/s         1 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16                 10432        113593 ns/op    6180.73 MB/s        1143 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/html.zst-16                    113206         10671 ns/op    9596.27 MB/s          15 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16          1530615           779 ns/op    5229.49 MB/s           0 B/op          0 allocs/op
+
+BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16             65217         16192 ns/op    11383.34 MB/s         46 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16        292671          4039 ns/op    29363.19 MB/s          6 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16          26314         46021 ns/op    10470.43 MB/s        293 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16            33897         34900 ns/op    12227.96 MB/s        205 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16         104348         11433 ns/op    10949.01 MB/s         20 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16           75949         15510 ns/op    9805.60 MB/s          32 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16             173910          6756 ns/op    60624.29 MB/s         37 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16       923076          1339 ns/op    76474.87 MB/s          1 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16       922920          1351 ns/op    91102.57 MB/s          2 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16              27649         43618 ns/op    16096.19 MB/s        407 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16                 279073          4160 ns/op    24614.18 MB/s          6 B/op          0 allocs/op
+BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16        749938          1579 ns/op    2581.71 MB/s           0 B/op          0 allocs/op
+```
+
+This reflects the performance around May 2020, but this may be out of date.
+
+# Contributions
+
+Contributions are always welcome. 
+For new features/fixes, remember to add tests and for performance enhancements include benchmarks.
+
+For sending files for reproducing errors use a service like [goobox](https://goobox.io/#/upload) or similar to share your files.
+
+For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan).
+
+This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare.
diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go
new file mode 100644
index 0000000..8544585
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go
@@ -0,0 +1,136 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+	"math/bits"
+)
+
+// bitReader reads a bitstream in reverse.
+// The last set bit indicates the start of the stream and is used
+// for aligning the input.
+type bitReader struct {
+	in       []byte
+	off      uint   // next byte to read is at in[off - 1]
+	value    uint64 // Maybe use [16]byte, but shifting is awkward.
+	bitsRead uint8
+}
+
+// init initializes and resets the bit reader.
+func (b *bitReader) init(in []byte) error {
+	if len(in) < 1 {
+		return errors.New("corrupt stream: too short")
+	}
+	b.in = in
+	b.off = uint(len(in))
+	// The highest bit of the last byte indicates where to start
+	v := in[len(in)-1]
+	if v == 0 {
+		return errors.New("corrupt stream, did not find end of stream")
+	}
+	b.bitsRead = 64
+	b.value = 0
+	if len(in) >= 8 {
+		b.fillFastStart()
+	} else {
+		b.fill()
+		b.fill()
+	}
+	b.bitsRead += 8 - uint8(highBits(uint32(v)))
+	return nil
+}
+
+// getBits will return n bits. n can be 0.
+func (b *bitReader) getBits(n uint8) int {
+	if n == 0 /*|| b.bitsRead >= 64 */ {
+		return 0
+	}
+	return b.getBitsFast(n)
+}
+
+// getBitsFast requires that at least one bit is requested every time.
+// There are no checks if the buffer is filled.
+func (b *bitReader) getBitsFast(n uint8) int {
+	const regMask = 64 - 1
+	v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
+	b.bitsRead += n
+	return int(v)
+}
+
+// fillFast() will make sure at least 32 bits are available.
+// There must be at least 4 bytes available.
+func (b *bitReader) fillFast() {
+	if b.bitsRead < 32 {
+		return
+	}
+	// 2 bounds checks.
+	v := b.in[b.off-4:]
+	v = v[:4]
+	low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+	b.value = (b.value << 32) | uint64(low)
+	b.bitsRead -= 32
+	b.off -= 4
+}
+
+// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
+func (b *bitReader) fillFastStart() {
+	// Do single re-slice to avoid bounds checks.
+	b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+	b.bitsRead = 0
+	b.off -= 8
+}
+
+// fill() will make sure at least 32 bits are available.
+func (b *bitReader) fill() {
+	if b.bitsRead < 32 {
+		return
+	}
+	if b.off >= 4 {
+		v := b.in[b.off-4:]
+		v = v[:4]
+		low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+		b.value = (b.value << 32) | uint64(low)
+		b.bitsRead -= 32
+		b.off -= 4
+		return
+	}
+	for b.off > 0 {
+		b.value = (b.value << 8) | uint64(b.in[b.off-1])
+		b.bitsRead -= 8
+		b.off--
+	}
+}
+
+// finished returns true if all bits have been read from the bit stream.
+func (b *bitReader) finished() bool {
+	return b.off == 0 && b.bitsRead >= 64
+}
+
+// overread returns true if more bits have been requested than is on the stream.
+func (b *bitReader) overread() bool {
+	return b.bitsRead > 64
+}
+
+// remain returns the number of bits remaining.
+func (b *bitReader) remain() uint {
+	return b.off*8 + 64 - uint(b.bitsRead)
+}
+
+// close the bitstream and returns an error if out-of-buffer reads occurred.
+func (b *bitReader) close() error {
+	// Release reference.
+	b.in = nil
+	if b.bitsRead > 64 {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+
+func highBits(val uint32) (n uint32) {
+	return uint32(bits.Len32(val) - 1)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
new file mode 100644
index 0000000..303ae90
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
@@ -0,0 +1,169 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package zstd
+
+import "fmt"
+
+// bitWriter will write bits.
+// First bit will be LSB of the first byte of output.
+type bitWriter struct {
+	bitContainer uint64
+	nBits        uint8
+	out          []byte
+}
+
+// bitMask16 is bitmasks. Has extra to avoid bounds check.
+var bitMask16 = [32]uint16{
+	0, 1, 3, 7, 0xF, 0x1F,
+	0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
+	0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF,
+	0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
+	0xFFFF, 0xFFFF} /* up to 16 bits */
+
+var bitMask32 = [32]uint32{
+	0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF,
+	0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF,
+	0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF,
+	0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF,
+} // up to 32 bits
+
+// addBits16NC will add up to 16 bits.
+// It will not check if there is space for them,
+// so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
+	b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63)
+	b.nBits += bits
+}
+
+// addBits32NC will add up to 32 bits.
+// It will not check if there is space for them,
+// so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits32NC(value uint32, bits uint8) {
+	b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63)
+	b.nBits += bits
+}
+
+// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
+// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
+	b.bitContainer |= uint64(value) << (b.nBits & 63)
+	b.nBits += bits
+}
+
+// flush will flush all pending full bytes.
+// There will be at least 56 bits available for writing when this has been called.
+// Using flush32 is faster, but leaves less space for writing.
+func (b *bitWriter) flush() {
+	v := b.nBits >> 3
+	switch v {
+	case 0:
+	case 1:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+		)
+	case 2:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+		)
+	case 3:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+		)
+	case 4:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+		)
+	case 5:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+		)
+	case 6:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+			byte(b.bitContainer>>40),
+		)
+	case 7:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+			byte(b.bitContainer>>40),
+			byte(b.bitContainer>>48),
+		)
+	case 8:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+			byte(b.bitContainer>>40),
+			byte(b.bitContainer>>48),
+			byte(b.bitContainer>>56),
+		)
+	default:
+		panic(fmt.Errorf("bits (%d) > 64", b.nBits))
+	}
+	b.bitContainer >>= v << 3
+	b.nBits &= 7
+}
+
+// flush32 will flush out, so there are at least 32 bits available for writing.
+func (b *bitWriter) flush32() {
+	if b.nBits < 32 {
+		return
+	}
+	b.out = append(b.out,
+		byte(b.bitContainer),
+		byte(b.bitContainer>>8),
+		byte(b.bitContainer>>16),
+		byte(b.bitContainer>>24))
+	b.nBits -= 32
+	b.bitContainer >>= 32
+}
+
+// flushAlign will flush remaining full bytes and align to next byte boundary.
+func (b *bitWriter) flushAlign() {
+	nbBytes := (b.nBits + 7) >> 3
+	for i := uint8(0); i < nbBytes; i++ {
+		b.out = append(b.out, byte(b.bitContainer>>(i*8)))
+	}
+	b.nBits = 0
+	b.bitContainer = 0
+}
+
+// close will write the alignment bit and write the final byte(s)
+// to the output.
+func (b *bitWriter) close() error {
+	// End mark
+	b.addBits16Clean(1, 1)
+	// flush until next byte.
+	b.flushAlign()
+	return nil
+}
+
+// reset and continue writing by appending to out.
+func (b *bitWriter) reset(out []byte) {
+	b.bitContainer = 0
+	b.nBits = 0
+	b.out = out
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
new file mode 100644
index 0000000..b51d922
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -0,0 +1,739 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"sync"
+
+	"github.com/klauspost/compress/huff0"
+	"github.com/klauspost/compress/zstd/internal/xxhash"
+)
+
+type blockType uint8
+
+//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex
+
+const (
+	blockTypeRaw blockType = iota
+	blockTypeRLE
+	blockTypeCompressed
+	blockTypeReserved
+)
+
+type literalsBlockType uint8
+
+const (
+	literalsBlockRaw literalsBlockType = iota
+	literalsBlockRLE
+	literalsBlockCompressed
+	literalsBlockTreeless
+)
+
+const (
+	// maxCompressedBlockSize is the biggest allowed compressed block size (128KB)
+	maxCompressedBlockSize = 128 << 10
+
+	// Maximum possible block size (all Raw+Uncompressed).
+	maxBlockSize = (1 << 21) - 1
+
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header
+	maxCompressedLiteralSize = 1 << 18
+	maxRLELiteralSize        = 1 << 20
+	maxMatchLen              = 131074
+	maxSequences             = 0x7f00 + 0xffff
+
+	// We support slightly less than the reference decoder to be able to
+	// use ints on 32 bit archs.
+	maxOffsetBits = 30
+)
+
+var (
+	huffDecoderPool = sync.Pool{New: func() interface{} {
+		return &huff0.Scratch{}
+	}}
+
+	fseDecoderPool = sync.Pool{New: func() interface{} {
+		return &fseDecoder{}
+	}}
+)
+
+type blockDec struct {
+	// Raw source data of the block.
+	data        []byte
+	dataStorage []byte
+
+	// Destination of the decoded data.
+	dst []byte
+
+	// Buffer for literals data.
+	literalBuf []byte
+
+	// Window size of the block.
+	WindowSize uint64
+
+	history     chan *history
+	input       chan struct{}
+	result      chan decodeOutput
+	sequenceBuf []seq
+	err         error
+	decWG       sync.WaitGroup
+
+	// Frame to use for singlethreaded decoding.
+	// Should not be used by the decoder itself since parent may be another frame.
+	localFrame *frameDec
+
+	// Block is RLE, this is the size.
+	RLESize uint32
+	tmp     [4]byte
+
+	Type blockType
+
+	// Is this the last block of a frame?
+	Last bool
+
+	// Use less memory
+	lowMem bool
+}
+
+func (b *blockDec) String() string {
+	if b == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize)
+}
+
+func newBlockDec(lowMem bool) *blockDec {
+	b := blockDec{
+		lowMem:  lowMem,
+		result:  make(chan decodeOutput, 1),
+		input:   make(chan struct{}, 1),
+		history: make(chan *history, 1),
+	}
+	b.decWG.Add(1)
+	go b.startDecoder()
+	return &b
+}
+
+// reset will reset the block.
+// Input must be a start of a block and will be at the end of the block when returned.
+func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
+	b.WindowSize = windowSize
+	tmp := br.readSmall(3)
+	if tmp == nil {
+		if debug {
+			println("Reading block header:", io.ErrUnexpectedEOF)
+		}
+		return io.ErrUnexpectedEOF
+	}
+	bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16)
+	b.Last = bh&1 != 0
+	b.Type = blockType((bh >> 1) & 3)
+	// find size.
+	cSize := int(bh >> 3)
+	maxSize := maxBlockSize
+	switch b.Type {
+	case blockTypeReserved:
+		return ErrReservedBlockType
+	case blockTypeRLE:
+		b.RLESize = uint32(cSize)
+		if b.lowMem {
+			maxSize = cSize
+		}
+		cSize = 1
+	case blockTypeCompressed:
+		if debug {
+			println("Data size on stream:", cSize)
+		}
+		b.RLESize = 0
+		maxSize = maxCompressedBlockSize
+		if windowSize < maxCompressedBlockSize && b.lowMem {
+			maxSize = int(windowSize)
+		}
+		if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize {
+			if debug {
+				printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b)
+			}
+			return ErrCompressedSizeTooBig
+		}
+	case blockTypeRaw:
+		b.RLESize = 0
+		// We do not need a destination for raw blocks.
+		maxSize = -1
+	default:
+		panic("Invalid block type")
+	}
+
+	// Read block data.
+	if cap(b.dataStorage) < cSize {
+		if b.lowMem {
+			b.dataStorage = make([]byte, 0, cSize)
+		} else {
+			b.dataStorage = make([]byte, 0, maxBlockSize)
+		}
+	}
+	if cap(b.dst) <= maxSize {
+		b.dst = make([]byte, 0, maxSize+1)
+	}
+	var err error
+	b.data, err = br.readBig(cSize, b.dataStorage)
+	if err != nil {
+		if debug {
+			println("Reading block:", err, "(", cSize, ")", len(b.data))
+			printf("%T", br)
+		}
+		return err
+	}
+	return nil
+}
+
+// sendEOF will make the decoder send EOF on this frame.
+func (b *blockDec) sendErr(err error) {
+	b.Last = true
+	b.Type = blockTypeReserved
+	b.err = err
+	b.input <- struct{}{}
+}
+
+// Close will release resources.
+// Closed blockDec cannot be reset.
+func (b *blockDec) Close() {
+	close(b.input)
+	close(b.history)
+	close(b.result)
+	b.decWG.Wait()
+}
+
+// decodeAsync will prepare decoding the block when it receives input.
+// This will separate output and history.
+func (b *blockDec) startDecoder() {
+	defer b.decWG.Done()
+	for range b.input {
+		//println("blockDec: Got block input")
+		switch b.Type {
+		case blockTypeRLE:
+			if cap(b.dst) < int(b.RLESize) {
+				if b.lowMem {
+					b.dst = make([]byte, b.RLESize)
+				} else {
+					b.dst = make([]byte, maxBlockSize)
+				}
+			}
+			o := decodeOutput{
+				d:   b,
+				b:   b.dst[:b.RLESize],
+				err: nil,
+			}
+			v := b.data[0]
+			for i := range o.b {
+				o.b[i] = v
+			}
+			hist := <-b.history
+			hist.append(o.b)
+			b.result <- o
+		case blockTypeRaw:
+			o := decodeOutput{
+				d:   b,
+				b:   b.data,
+				err: nil,
+			}
+			hist := <-b.history
+			hist.append(o.b)
+			b.result <- o
+		case blockTypeCompressed:
+			b.dst = b.dst[:0]
+			err := b.decodeCompressed(nil)
+			o := decodeOutput{
+				d:   b,
+				b:   b.dst,
+				err: err,
+			}
+			if debug {
+				println("Decompressed to", len(b.dst), "bytes, error:", err)
+			}
+			b.result <- o
+		case blockTypeReserved:
+			// Used for returning errors.
+			<-b.history
+			b.result <- decodeOutput{
+				d:   b,
+				b:   nil,
+				err: b.err,
+			}
+		default:
+			panic("Invalid block type")
+		}
+		if debug {
+			println("blockDec: Finished block")
+		}
+	}
+}
+
+// decodeAsync will prepare decoding the block when it receives the history.
+// If history is provided, it will not fetch it from the channel.
+func (b *blockDec) decodeBuf(hist *history) error {
+	switch b.Type {
+	case blockTypeRLE:
+		if cap(b.dst) < int(b.RLESize) {
+			if b.lowMem {
+				b.dst = make([]byte, b.RLESize)
+			} else {
+				b.dst = make([]byte, maxBlockSize)
+			}
+		}
+		b.dst = b.dst[:b.RLESize]
+		v := b.data[0]
+		for i := range b.dst {
+			b.dst[i] = v
+		}
+		hist.appendKeep(b.dst)
+		return nil
+	case blockTypeRaw:
+		hist.appendKeep(b.data)
+		return nil
+	case blockTypeCompressed:
+		saved := b.dst
+		b.dst = hist.b
+		hist.b = nil
+		err := b.decodeCompressed(hist)
+		if debug {
+			println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err)
+		}
+		hist.b = b.dst
+		b.dst = saved
+		return err
+	case blockTypeReserved:
+		// Used for returning errors.
+		return b.err
+	default:
+		panic("Invalid block type")
+	}
+}
+
+// decodeCompressed will start decompressing a block.
+// If no history is supplied the decoder will decodeAsync as much as possible
+// before fetching from blockDec.history
+func (b *blockDec) decodeCompressed(hist *history) error {
+	in := b.data
+	delayedHistory := hist == nil
+
+	if delayedHistory {
+		// We must always grab history.
+		defer func() {
+			if hist == nil {
+				<-b.history
+			}
+		}()
+	}
+	// There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header
+	if len(in) < 2 {
+		return ErrBlockTooSmall
+	}
+	litType := literalsBlockType(in[0] & 3)
+	var litRegenSize int
+	var litCompSize int
+	sizeFormat := (in[0] >> 2) & 3
+	var fourStreams bool
+	switch litType {
+	case literalsBlockRaw, literalsBlockRLE:
+		switch sizeFormat {
+		case 0, 2:
+			// Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte.
+			litRegenSize = int(in[0] >> 3)
+			in = in[1:]
+		case 1:
+			// Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes.
+			litRegenSize = int(in[0]>>4) + (int(in[1]) << 4)
+			in = in[2:]
+		case 3:
+			//  Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes.
+			if len(in) < 3 {
+				println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
+				return ErrBlockTooSmall
+			}
+			litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12)
+			in = in[3:]
+		}
+	case literalsBlockCompressed, literalsBlockTreeless:
+		switch sizeFormat {
+		case 0, 1:
+			// Both Regenerated_Size and Compressed_Size use 10 bits (0-1023).
+			if len(in) < 3 {
+				println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
+				return ErrBlockTooSmall
+			}
+			n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12)
+			litRegenSize = int(n & 1023)
+			litCompSize = int(n >> 10)
+			fourStreams = sizeFormat == 1
+			in = in[3:]
+		case 2:
+			fourStreams = true
+			if len(in) < 4 {
+				println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
+				return ErrBlockTooSmall
+			}
+			n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20)
+			litRegenSize = int(n & 16383)
+			litCompSize = int(n >> 14)
+			in = in[4:]
+		case 3:
+			fourStreams = true
+			if len(in) < 5 {
+				println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
+				return ErrBlockTooSmall
+			}
+			n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28)
+			litRegenSize = int(n & 262143)
+			litCompSize = int(n >> 18)
+			in = in[5:]
+		}
+	}
+	if debug {
+		println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams)
+	}
+	var literals []byte
+	var huff *huff0.Scratch
+	switch litType {
+	case literalsBlockRaw:
+		if len(in) < litRegenSize {
+			println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize)
+			return ErrBlockTooSmall
+		}
+		literals = in[:litRegenSize]
+		in = in[litRegenSize:]
+		//printf("Found %d uncompressed literals\n", litRegenSize)
+	case literalsBlockRLE:
+		if len(in) < 1 {
+			println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1)
+			return ErrBlockTooSmall
+		}
+		if cap(b.literalBuf) < litRegenSize {
+			if b.lowMem {
+				b.literalBuf = make([]byte, litRegenSize)
+			} else {
+				if litRegenSize > maxCompressedLiteralSize {
+					// Exceptional
+					b.literalBuf = make([]byte, litRegenSize)
+				} else {
+					b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize)
+
+				}
+			}
+		}
+		literals = b.literalBuf[:litRegenSize]
+		v := in[0]
+		for i := range literals {
+			literals[i] = v
+		}
+		in = in[1:]
+		if debug {
+			printf("Found %d RLE compressed literals\n", litRegenSize)
+		}
+	case literalsBlockTreeless:
+		if len(in) < litCompSize {
+			println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
+			return ErrBlockTooSmall
+		}
+		// Store compressed literals, so we defer decoding until we get history.
+		literals = in[:litCompSize]
+		in = in[litCompSize:]
+		if debug {
+			printf("Found %d compressed literals\n", litCompSize)
+		}
+	case literalsBlockCompressed:
+		if len(in) < litCompSize {
+			println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
+			return ErrBlockTooSmall
+		}
+		literals = in[:litCompSize]
+		in = in[litCompSize:]
+		huff = huffDecoderPool.Get().(*huff0.Scratch)
+		var err error
+		// Ensure we have space to store it.
+		if cap(b.literalBuf) < litRegenSize {
+			if b.lowMem {
+				b.literalBuf = make([]byte, 0, litRegenSize)
+			} else {
+				b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
+			}
+		}
+		if huff == nil {
+			huff = &huff0.Scratch{}
+		}
+		huff, literals, err = huff0.ReadTable(literals, huff)
+		if err != nil {
+			println("reading huffman table:", err)
+			return err
+		}
+		// Use our out buffer.
+		if fourStreams {
+			literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
+		} else {
+			literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals)
+		}
+		if err != nil {
+			println("decoding compressed literals:", err)
+			return err
+		}
+		// Make sure we don't leak our literals buffer
+		if len(literals) != litRegenSize {
+			return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
+		}
+		if debug {
+			printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize)
+		}
+	}
+
+	// Decode Sequences
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section
+	if len(in) < 1 {
+		return ErrBlockTooSmall
+	}
+	seqHeader := in[0]
+	nSeqs := 0
+	switch {
+	case seqHeader == 0:
+		in = in[1:]
+	case seqHeader < 128:
+		nSeqs = int(seqHeader)
+		in = in[1:]
+	case seqHeader < 255:
+		if len(in) < 2 {
+			return ErrBlockTooSmall
+		}
+		nSeqs = int(seqHeader-128)<<8 | int(in[1])
+		in = in[2:]
+	case seqHeader == 255:
+		if len(in) < 3 {
+			return ErrBlockTooSmall
+		}
+		nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8)
+		in = in[3:]
+	}
+	// Allocate sequences
+	if cap(b.sequenceBuf) < nSeqs {
+		if b.lowMem {
+			b.sequenceBuf = make([]seq, nSeqs)
+		} else {
+			// Allocate max
+			b.sequenceBuf = make([]seq, nSeqs, maxSequences)
+		}
+	} else {
+		// Reuse buffer
+		b.sequenceBuf = b.sequenceBuf[:nSeqs]
+	}
+	var seqs = &sequenceDecs{}
+	if nSeqs > 0 {
+		if len(in) < 1 {
+			return ErrBlockTooSmall
+		}
+		br := byteReader{b: in, off: 0}
+		compMode := br.Uint8()
+		br.advance(1)
+		if debug {
+			printf("Compression modes: 0b%b", compMode)
+		}
+		for i := uint(0); i < 3; i++ {
+			mode := seqCompMode((compMode >> (6 - i*2)) & 3)
+			if debug {
+				println("Table", tableIndex(i), "is", mode)
+			}
+			var seq *sequenceDec
+			switch tableIndex(i) {
+			case tableLiteralLengths:
+				seq = &seqs.litLengths
+			case tableOffsets:
+				seq = &seqs.offsets
+			case tableMatchLengths:
+				seq = &seqs.matchLengths
+			default:
+				panic("unknown table")
+			}
+			switch mode {
+			case compModePredefined:
+				seq.fse = &fsePredef[i]
+			case compModeRLE:
+				if br.remain() < 1 {
+					return ErrBlockTooSmall
+				}
+				v := br.Uint8()
+				br.advance(1)
+				dec := fseDecoderPool.Get().(*fseDecoder)
+				symb, err := decSymbolValue(v, symbolTableX[i])
+				if err != nil {
+					printf("RLE Transform table (%v) error: %v", tableIndex(i), err)
+					return err
+				}
+				dec.setRLE(symb)
+				seq.fse = dec
+				if debug {
+					printf("RLE set to %+v, code: %v", symb, v)
+				}
+			case compModeFSE:
+				println("Reading table for", tableIndex(i))
+				dec := fseDecoderPool.Get().(*fseDecoder)
+				err := dec.readNCount(&br, uint16(maxTableSymbol[i]))
+				if err != nil {
+					println("Read table error:", err)
+					return err
+				}
+				err = dec.transform(symbolTableX[i])
+				if err != nil {
+					println("Transform table error:", err)
+					return err
+				}
+				if debug {
+					println("Read table ok", "symbolLen:", dec.symbolLen)
+				}
+				seq.fse = dec
+			case compModeRepeat:
+				seq.repeat = true
+			}
+			if br.overread() {
+				return io.ErrUnexpectedEOF
+			}
+		}
+		in = br.unread()
+	}
+
+	// Wait for history.
+	// All time spent after this is critical since it is strictly sequential.
+	if hist == nil {
+		hist = <-b.history
+		if hist.error {
+			return ErrDecoderClosed
+		}
+	}
+
+	// Decode treeless literal block.
+	if litType == literalsBlockTreeless {
+		// TODO: We could send the history early WITHOUT the stream history.
+		//   This would allow decoding treeless literals before the byte history is available.
+		//   Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless.
+		//   So not much obvious gain here.
+
+		if hist.huffTree == nil {
+			return errors.New("literal block was treeless, but no history was defined")
+		}
+		// Ensure we have space to store it.
+		if cap(b.literalBuf) < litRegenSize {
+			if b.lowMem {
+				b.literalBuf = make([]byte, 0, litRegenSize)
+			} else {
+				b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
+			}
+		}
+		var err error
+		// Use our out buffer.
+		huff = hist.huffTree
+		if fourStreams {
+			literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
+		} else {
+			literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals)
+		}
+		// Make sure we don't leak our literals buffer
+		if err != nil {
+			println("decompressing literals:", err)
+			return err
+		}
+		if len(literals) != litRegenSize {
+			return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
+		}
+	} else {
+		if hist.huffTree != nil && huff != nil {
+			if hist.dict == nil || hist.dict.litEnc != hist.huffTree {
+				huffDecoderPool.Put(hist.huffTree)
+			}
+			hist.huffTree = nil
+		}
+	}
+	if huff != nil {
+		hist.huffTree = huff
+	}
+	if debug {
+		println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.")
+	}
+
+	if nSeqs == 0 {
+		// Decompressed content is defined entirely as Literals Section content.
+		b.dst = append(b.dst, literals...)
+		if delayedHistory {
+			hist.append(literals)
+		}
+		return nil
+	}
+
+	seqs, err := seqs.mergeHistory(&hist.decoders)
+	if err != nil {
+		return err
+	}
+	if debug {
+		println("History merged ok")
+	}
+	br := &bitReader{}
+	if err := br.init(in); err != nil {
+		return err
+	}
+
+	// TODO: Investigate if sending history without decoders are faster.
+	//   This would allow the sequences to be decoded async and only have to construct stream history.
+	//   If only recent offsets were not transferred, this would be an obvious win.
+	// 	 Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded.
+
+	hbytes := hist.b
+	if len(hbytes) > hist.windowSize {
+		hbytes = hbytes[len(hbytes)-hist.windowSize:]
+		// We do not need history any more.
+		if hist.dict != nil {
+			hist.dict.content = nil
+		}
+	}
+
+	if err := seqs.initialize(br, hist, literals, b.dst); err != nil {
+		println("initializing sequences:", err)
+		return err
+	}
+
+	err = seqs.decode(nSeqs, br, hbytes)
+	if err != nil {
+		return err
+	}
+	if !br.finished() {
+		return fmt.Errorf("%d extra bits on block, should be 0", br.remain())
+	}
+
+	err = br.close()
+	if err != nil {
+		printf("Closing sequences: %v, %+v\n", err, *br)
+	}
+	if len(b.data) > maxCompressedBlockSize {
+		return fmt.Errorf("compressed block size too large (%d)", len(b.data))
+	}
+	// Set output and release references.
+	b.dst = seqs.out
+	seqs.out, seqs.literals, seqs.hist = nil, nil, nil
+
+	if !delayedHistory {
+		// If we don't have delayed history, no need to update.
+		hist.recentOffsets = seqs.prevOffset
+		return nil
+	}
+	if b.Last {
+		// if last block we don't care about history.
+		println("Last block, no history returned")
+		hist.b = hist.b[:0]
+		return nil
+	}
+	hist.append(b.dst)
+	hist.recentOffsets = seqs.prevOffset
+	if debug {
+		println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.")
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go
new file mode 100644
index 0000000..e1be092
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go
@@ -0,0 +1,871 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"math/bits"
+
+	"github.com/klauspost/compress/huff0"
+)
+
+type blockEnc struct {
+	size       int
+	literals   []byte
+	sequences  []seq
+	coders     seqCoders
+	litEnc     *huff0.Scratch
+	dictLitEnc *huff0.Scratch
+	wr         bitWriter
+
+	extraLits         int
+	output            []byte
+	recentOffsets     [3]uint32
+	prevRecentOffsets [3]uint32
+
+	last   bool
+	lowMem bool
+}
+
+// init should be used once the block has been created.
+// If called more than once, the effect is the same as calling reset.
+func (b *blockEnc) init() {
+	if b.lowMem {
+		// 1K literals
+		if cap(b.literals) < 1<<10 {
+			b.literals = make([]byte, 0, 1<<10)
+		}
+		const defSeqs = 20
+		if cap(b.sequences) < defSeqs {
+			b.sequences = make([]seq, 0, defSeqs)
+		}
+		// 1K
+		if cap(b.output) < 1<<10 {
+			b.output = make([]byte, 0, 1<<10)
+		}
+	} else {
+		if cap(b.literals) < maxCompressedBlockSize {
+			b.literals = make([]byte, 0, maxCompressedBlockSize)
+		}
+		const defSeqs = 200
+		if cap(b.sequences) < defSeqs {
+			b.sequences = make([]seq, 0, defSeqs)
+		}
+		if cap(b.output) < maxCompressedBlockSize {
+			b.output = make([]byte, 0, maxCompressedBlockSize)
+		}
+	}
+
+	if b.coders.mlEnc == nil {
+		b.coders.mlEnc = &fseEncoder{}
+		b.coders.mlPrev = &fseEncoder{}
+		b.coders.ofEnc = &fseEncoder{}
+		b.coders.ofPrev = &fseEncoder{}
+		b.coders.llEnc = &fseEncoder{}
+		b.coders.llPrev = &fseEncoder{}
+	}
+	b.litEnc = &huff0.Scratch{WantLogLess: 4}
+	b.reset(nil)
+}
+
+// initNewEncode can be used to reset offsets and encoders to the initial state.
+func (b *blockEnc) initNewEncode() {
+	b.recentOffsets = [3]uint32{1, 4, 8}
+	b.litEnc.Reuse = huff0.ReusePolicyNone
+	b.coders.setPrev(nil, nil, nil)
+}
+
+// reset will reset the block for a new encode, but in the same stream,
+// meaning that state will be carried over, but the block content is reset.
+// If a previous block is provided, the recent offsets are carried over.
+func (b *blockEnc) reset(prev *blockEnc) {
+	b.extraLits = 0
+	b.literals = b.literals[:0]
+	b.size = 0
+	b.sequences = b.sequences[:0]
+	b.output = b.output[:0]
+	b.last = false
+	if prev != nil {
+		b.recentOffsets = prev.prevRecentOffsets
+	}
+	b.dictLitEnc = nil
+}
+
+// reset will reset the block for a new encode, but in the same stream,
+// meaning that state will be carried over, but the block content is reset.
+// If a previous block is provided, the recent offsets are carried over.
+func (b *blockEnc) swapEncoders(prev *blockEnc) {
+	b.coders.swap(&prev.coders)
+	b.litEnc, prev.litEnc = prev.litEnc, b.litEnc
+}
+
+// blockHeader contains the information for a block header.
+type blockHeader uint32
+
+// setLast sets the 'last' indicator on a block.
+func (h *blockHeader) setLast(b bool) {
+	if b {
+		*h = *h | 1
+	} else {
+		const mask = (1 << 24) - 2
+		*h = *h & mask
+	}
+}
+
+// setSize will store the compressed size of a block.
+func (h *blockHeader) setSize(v uint32) {
+	const mask = 7
+	*h = (*h)&mask | blockHeader(v<<3)
+}
+
+// setType sets the block type.
+func (h *blockHeader) setType(t blockType) {
+	const mask = 1 | (((1 << 24) - 1) ^ 7)
+	*h = (*h & mask) | blockHeader(t<<1)
+}
+
+// appendTo will append the block header to a slice.
+func (h blockHeader) appendTo(b []byte) []byte {
+	return append(b, uint8(h), uint8(h>>8), uint8(h>>16))
+}
+
+// String returns a string representation of the block.
+func (h blockHeader) String() string {
+	return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1)
+}
+
+// literalsHeader contains literals header information.
+type literalsHeader uint64
+
+// setType can be used to set the type of literal block.
+func (h *literalsHeader) setType(t literalsBlockType) {
+	const mask = math.MaxUint64 - 3
+	*h = (*h & mask) | literalsHeader(t)
+}
+
+// setSize can be used to set a single size, for uncompressed and RLE content.
+func (h *literalsHeader) setSize(regenLen int) {
+	inBits := bits.Len32(uint32(regenLen))
+	// Only retain 2 bits
+	const mask = 3
+	lh := uint64(*h & mask)
+	switch {
+	case inBits < 5:
+		lh |= (uint64(regenLen) << 3) | (1 << 60)
+		if debug {
+			got := int(lh>>3) & 0xff
+			if got != regenLen {
+				panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)"))
+			}
+		}
+	case inBits < 12:
+		lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60)
+	case inBits < 20:
+		lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60)
+	default:
+		panic(fmt.Errorf("internal error: block too big (%d)", regenLen))
+	}
+	*h = literalsHeader(lh)
+}
+
+// setSizes will set the size of a compressed literals section and the input length.
+func (h *literalsHeader) setSizes(compLen, inLen int, single bool) {
+	compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen))
+	// Only retain 2 bits
+	const mask = 3
+	lh := uint64(*h & mask)
+	switch {
+	case compBits <= 10 && inBits <= 10:
+		if !single {
+			lh |= 1 << 2
+		}
+		lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60)
+		if debug {
+			const mmask = (1 << 24) - 1
+			n := (lh >> 4) & mmask
+			if int(n&1023) != inLen {
+				panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits))
+			}
+			if int(n>>10) != compLen {
+				panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits))
+			}
+		}
+	case compBits <= 14 && inBits <= 14:
+		lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60)
+		if single {
+			panic("single stream used with more than 10 bits length.")
+		}
+	case compBits <= 18 && inBits <= 18:
+		lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60)
+		if single {
+			panic("single stream used with more than 10 bits length.")
+		}
+	default:
+		panic("internal error: block too big")
+	}
+	*h = literalsHeader(lh)
+}
+
+// appendTo will append the literals header to a byte slice.
+func (h literalsHeader) appendTo(b []byte) []byte {
+	size := uint8(h >> 60)
+	switch size {
+	case 1:
+		b = append(b, uint8(h))
+	case 2:
+		b = append(b, uint8(h), uint8(h>>8))
+	case 3:
+		b = append(b, uint8(h), uint8(h>>8), uint8(h>>16))
+	case 4:
+		b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24))
+	case 5:
+		b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32))
+	default:
+		panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size))
+	}
+	return b
+}
+
+// size returns the output size with currently set values.
+func (h literalsHeader) size() int {
+	return int(h >> 60)
+}
+
+func (h literalsHeader) String() string {
+	return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60)
+}
+
+// pushOffsets will push the recent offsets to the backup store.
+func (b *blockEnc) pushOffsets() {
+	b.prevRecentOffsets = b.recentOffsets
+}
+
+// pushOffsets will push the recent offsets to the backup store.
+func (b *blockEnc) popOffsets() {
+	b.recentOffsets = b.prevRecentOffsets
+}
+
+// matchOffset will adjust recent offsets and return the adjusted one,
+// if it matches a previous offset.
+func (b *blockEnc) matchOffset(offset, lits uint32) uint32 {
+	// Check if offset is one of the recent offsets.
+	// Adjusts the output offset accordingly.
+	// Gives a tiny bit of compression, typically around 1%.
+	if true {
+		if lits > 0 {
+			switch offset {
+			case b.recentOffsets[0]:
+				offset = 1
+			case b.recentOffsets[1]:
+				b.recentOffsets[1] = b.recentOffsets[0]
+				b.recentOffsets[0] = offset
+				offset = 2
+			case b.recentOffsets[2]:
+				b.recentOffsets[2] = b.recentOffsets[1]
+				b.recentOffsets[1] = b.recentOffsets[0]
+				b.recentOffsets[0] = offset
+				offset = 3
+			default:
+				b.recentOffsets[2] = b.recentOffsets[1]
+				b.recentOffsets[1] = b.recentOffsets[0]
+				b.recentOffsets[0] = offset
+				offset += 3
+			}
+		} else {
+			switch offset {
+			case b.recentOffsets[1]:
+				b.recentOffsets[1] = b.recentOffsets[0]
+				b.recentOffsets[0] = offset
+				offset = 1
+			case b.recentOffsets[2]:
+				b.recentOffsets[2] = b.recentOffsets[1]
+				b.recentOffsets[1] = b.recentOffsets[0]
+				b.recentOffsets[0] = offset
+				offset = 2
+			case b.recentOffsets[0] - 1:
+				b.recentOffsets[2] = b.recentOffsets[1]
+				b.recentOffsets[1] = b.recentOffsets[0]
+				b.recentOffsets[0] = offset
+				offset = 3
+			default:
+				b.recentOffsets[2] = b.recentOffsets[1]
+				b.recentOffsets[1] = b.recentOffsets[0]
+				b.recentOffsets[0] = offset
+				offset += 3
+			}
+		}
+	} else {
+		offset += 3
+	}
+	return offset
+}
+
+// encodeRaw can be used to set the output to a raw representation of supplied bytes.
+func (b *blockEnc) encodeRaw(a []byte) {
+	var bh blockHeader
+	bh.setLast(b.last)
+	bh.setSize(uint32(len(a)))
+	bh.setType(blockTypeRaw)
+	b.output = bh.appendTo(b.output[:0])
+	b.output = append(b.output, a...)
+	if debug {
+		println("Adding RAW block, length", len(a), "last:", b.last)
+	}
+}
+
+// encodeRaw can be used to set the output to a raw representation of supplied bytes.
+func (b *blockEnc) encodeRawTo(dst, src []byte) []byte {
+	var bh blockHeader
+	bh.setLast(b.last)
+	bh.setSize(uint32(len(src)))
+	bh.setType(blockTypeRaw)
+	dst = bh.appendTo(dst)
+	dst = append(dst, src...)
+	if debug {
+		println("Adding RAW block, length", len(src), "last:", b.last)
+	}
+	return dst
+}
+
+// encodeLits can be used if the block is only litLen.
+func (b *blockEnc) encodeLits(lits []byte, raw bool) error {
+	var bh blockHeader
+	bh.setLast(b.last)
+	bh.setSize(uint32(len(lits)))
+
+	// Don't compress extremely small blocks
+	if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw {
+		if debug {
+			println("Adding RAW block, length", len(lits), "last:", b.last)
+		}
+		bh.setType(blockTypeRaw)
+		b.output = bh.appendTo(b.output)
+		b.output = append(b.output, lits...)
+		return nil
+	}
+
+	var (
+		out            []byte
+		reUsed, single bool
+		err            error
+	)
+	if b.dictLitEnc != nil {
+		b.litEnc.TransferCTable(b.dictLitEnc)
+		b.litEnc.Reuse = huff0.ReusePolicyAllow
+		b.dictLitEnc = nil
+	}
+	if len(lits) >= 1024 {
+		// Use 4 Streams.
+		out, reUsed, err = huff0.Compress4X(lits, b.litEnc)
+	} else if len(lits) > 32 {
+		// Use 1 stream
+		single = true
+		out, reUsed, err = huff0.Compress1X(lits, b.litEnc)
+	} else {
+		err = huff0.ErrIncompressible
+	}
+
+	switch err {
+	case huff0.ErrIncompressible:
+		if debug {
+			println("Adding RAW block, length", len(lits), "last:", b.last)
+		}
+		bh.setType(blockTypeRaw)
+		b.output = bh.appendTo(b.output)
+		b.output = append(b.output, lits...)
+		return nil
+	case huff0.ErrUseRLE:
+		if debug {
+			println("Adding RLE block, length", len(lits))
+		}
+		bh.setType(blockTypeRLE)
+		b.output = bh.appendTo(b.output)
+		b.output = append(b.output, lits[0])
+		return nil
+	case nil:
+	default:
+		return err
+	}
+	// Compressed...
+	// Now, allow reuse
+	b.litEnc.Reuse = huff0.ReusePolicyAllow
+	bh.setType(blockTypeCompressed)
+	var lh literalsHeader
+	if reUsed {
+		if debug {
+			println("Reused tree, compressed to", len(out))
+		}
+		lh.setType(literalsBlockTreeless)
+	} else {
+		if debug {
+			println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable))
+		}
+		lh.setType(literalsBlockCompressed)
+	}
+	// Set sizes
+	lh.setSizes(len(out), len(lits), single)
+	bh.setSize(uint32(len(out) + lh.size() + 1))
+
+	// Write block headers.
+	b.output = bh.appendTo(b.output)
+	b.output = lh.appendTo(b.output)
+	// Add compressed data.
+	b.output = append(b.output, out...)
+	// No sequences.
+	b.output = append(b.output, 0)
+	return nil
+}
+
+// fuzzFseEncoder can be used to fuzz the FSE encoder.
+func fuzzFseEncoder(data []byte) int {
+	if len(data) > maxSequences || len(data) < 2 {
+		return 0
+	}
+	enc := fseEncoder{}
+	hist := enc.Histogram()[:256]
+	maxSym := uint8(0)
+	for i, v := range data {
+		v = v & 63
+		data[i] = v
+		hist[v]++
+		if v > maxSym {
+			maxSym = v
+		}
+	}
+	if maxSym == 0 {
+		// All 0
+		return 0
+	}
+	maxCount := func(a []uint32) int {
+		var max uint32
+		for _, v := range a {
+			if v > max {
+				max = v
+			}
+		}
+		return int(max)
+	}
+	cnt := maxCount(hist[:maxSym])
+	if cnt == len(data) {
+		// RLE
+		return 0
+	}
+	enc.HistogramFinished(maxSym, cnt)
+	err := enc.normalizeCount(len(data))
+	if err != nil {
+		return 0
+	}
+	_, err = enc.writeCount(nil)
+	if err != nil {
+		panic(err)
+	}
+	return 1
+}
+
+// encode will encode the block and append the output in b.output.
+// Previous offset codes must be pushed if more blocks are expected.
+func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
+	if len(b.sequences) == 0 {
+		return b.encodeLits(b.literals, rawAllLits)
+	}
+	// We want some difference to at least account for the headers.
+	saved := b.size - len(b.literals) - (b.size >> 5)
+	if saved < 16 {
+		if org == nil {
+			return errIncompressible
+		}
+		b.popOffsets()
+		return b.encodeLits(org, rawAllLits)
+	}
+
+	var bh blockHeader
+	var lh literalsHeader
+	bh.setLast(b.last)
+	bh.setType(blockTypeCompressed)
+	// Store offset of the block header. Needed when we know the size.
+	bhOffset := len(b.output)
+	b.output = bh.appendTo(b.output)
+
+	var (
+		out            []byte
+		reUsed, single bool
+		err            error
+	)
+	if b.dictLitEnc != nil {
+		b.litEnc.TransferCTable(b.dictLitEnc)
+		b.litEnc.Reuse = huff0.ReusePolicyAllow
+		b.dictLitEnc = nil
+	}
+	if len(b.literals) >= 1024 && !raw {
+		// Use 4 Streams.
+		out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
+	} else if len(b.literals) > 32 && !raw {
+		// Use 1 stream
+		single = true
+		out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
+	} else {
+		err = huff0.ErrIncompressible
+	}
+
+	switch err {
+	case huff0.ErrIncompressible:
+		lh.setType(literalsBlockRaw)
+		lh.setSize(len(b.literals))
+		b.output = lh.appendTo(b.output)
+		b.output = append(b.output, b.literals...)
+		if debug {
+			println("Adding literals RAW, length", len(b.literals))
+		}
+	case huff0.ErrUseRLE:
+		lh.setType(literalsBlockRLE)
+		lh.setSize(len(b.literals))
+		b.output = lh.appendTo(b.output)
+		b.output = append(b.output, b.literals[0])
+		if debug {
+			println("Adding literals RLE")
+		}
+	case nil:
+		// Compressed litLen...
+		if reUsed {
+			if debug {
+				println("reused tree")
+			}
+			lh.setType(literalsBlockTreeless)
+		} else {
+			if debug {
+				println("new tree, size:", len(b.litEnc.OutTable))
+			}
+			lh.setType(literalsBlockCompressed)
+			if debug {
+				_, _, err := huff0.ReadTable(out, nil)
+				if err != nil {
+					panic(err)
+				}
+			}
+		}
+		lh.setSizes(len(out), len(b.literals), single)
+		if debug {
+			printf("Compressed %d literals to %d bytes", len(b.literals), len(out))
+			println("Adding literal header:", lh)
+		}
+		b.output = lh.appendTo(b.output)
+		b.output = append(b.output, out...)
+		b.litEnc.Reuse = huff0.ReusePolicyAllow
+		if debug {
+			println("Adding literals compressed")
+		}
+	default:
+		if debug {
+			println("Adding literals ERROR:", err)
+		}
+		return err
+	}
+	// Sequence compression
+
+	// Write the number of sequences
+	switch {
+	case len(b.sequences) < 128:
+		b.output = append(b.output, uint8(len(b.sequences)))
+	case len(b.sequences) < 0x7f00: // TODO: this could be wrong
+		n := len(b.sequences)
+		b.output = append(b.output, 128+uint8(n>>8), uint8(n))
+	default:
+		n := len(b.sequences) - 0x7f00
+		b.output = append(b.output, 255, uint8(n), uint8(n>>8))
+	}
+	if debug {
+		println("Encoding", len(b.sequences), "sequences")
+	}
+	b.genCodes()
+	llEnc := b.coders.llEnc
+	ofEnc := b.coders.ofEnc
+	mlEnc := b.coders.mlEnc
+	err = llEnc.normalizeCount(len(b.sequences))
+	if err != nil {
+		return err
+	}
+	err = ofEnc.normalizeCount(len(b.sequences))
+	if err != nil {
+		return err
+	}
+	err = mlEnc.normalizeCount(len(b.sequences))
+	if err != nil {
+		return err
+	}
+
+	// Choose the best compression mode for each type.
+	// Will evaluate the new vs predefined and previous.
+	chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) {
+		// See if predefined/previous is better
+		hist := cur.count[:cur.symbolLen]
+		nSize := cur.approxSize(hist) + cur.maxHeaderSize()
+		predefSize := preDef.approxSize(hist)
+		prevSize := prev.approxSize(hist)
+
+		// Add a small penalty for new encoders.
+		// Don't bother with extremely small (<2 byte gains).
+		nSize = nSize + (nSize+2*8*16)>>4
+		switch {
+		case predefSize <= prevSize && predefSize <= nSize || forcePreDef:
+			if debug {
+				println("Using predefined", predefSize>>3, "<=", nSize>>3)
+			}
+			return preDef, compModePredefined
+		case prevSize <= nSize:
+			if debug {
+				println("Using previous", prevSize>>3, "<=", nSize>>3)
+			}
+			return prev, compModeRepeat
+		default:
+			if debug {
+				println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes")
+				println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen])
+			}
+			return cur, compModeFSE
+		}
+	}
+
+	// Write compression mode
+	var mode uint8
+	if llEnc.useRLE {
+		mode |= uint8(compModeRLE) << 6
+		llEnc.setRLE(b.sequences[0].llCode)
+		if debug {
+			println("llEnc.useRLE")
+		}
+	} else {
+		var m seqCompMode
+		llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths])
+		mode |= uint8(m) << 6
+	}
+	if ofEnc.useRLE {
+		mode |= uint8(compModeRLE) << 4
+		ofEnc.setRLE(b.sequences[0].ofCode)
+		if debug {
+			println("ofEnc.useRLE")
+		}
+	} else {
+		var m seqCompMode
+		ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets])
+		mode |= uint8(m) << 4
+	}
+
+	if mlEnc.useRLE {
+		mode |= uint8(compModeRLE) << 2
+		mlEnc.setRLE(b.sequences[0].mlCode)
+		if debug {
+			println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen)
+		}
+	} else {
+		var m seqCompMode
+		mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths])
+		mode |= uint8(m) << 2
+	}
+	b.output = append(b.output, mode)
+	if debug {
+		printf("Compression modes: 0b%b", mode)
+	}
+	b.output, err = llEnc.writeCount(b.output)
+	if err != nil {
+		return err
+	}
+	start := len(b.output)
+	b.output, err = ofEnc.writeCount(b.output)
+	if err != nil {
+		return err
+	}
+	if false {
+		println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount)
+		fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen)
+		for i, v := range ofEnc.norm[:ofEnc.symbolLen] {
+			fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v)
+		}
+	}
+	b.output, err = mlEnc.writeCount(b.output)
+	if err != nil {
+		return err
+	}
+
+	// Maybe in block?
+	wr := &b.wr
+	wr.reset(b.output)
+
+	var ll, of, ml cState
+
+	// Current sequence
+	seq := len(b.sequences) - 1
+	s := b.sequences[seq]
+	llEnc.setBits(llBitsTable[:])
+	mlEnc.setBits(mlBitsTable[:])
+	ofEnc.setBits(nil)
+
+	llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256]
+
+	// We have 3 bounds checks here (and in the loop).
+	// Since we are iterating backwards it is kinda hard to avoid.
+	llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode]
+	ll.init(wr, &llEnc.ct, llB)
+	of.init(wr, &ofEnc.ct, ofB)
+	wr.flush32()
+	ml.init(wr, &mlEnc.ct, mlB)
+
+	// Each of these lookups also generates a bounds check.
+	wr.addBits32NC(s.litLen, llB.outBits)
+	wr.addBits32NC(s.matchLen, mlB.outBits)
+	wr.flush32()
+	wr.addBits32NC(s.offset, ofB.outBits)
+	if debugSequences {
+		println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB)
+	}
+	seq--
+	if llEnc.maxBits+mlEnc.maxBits+ofEnc.maxBits <= 32 {
+		// No need to flush (common)
+		for seq >= 0 {
+			s = b.sequences[seq]
+			wr.flush32()
+			llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode]
+			// tabelog max is 8 for all.
+			of.encode(ofB)
+			ml.encode(mlB)
+			ll.encode(llB)
+			wr.flush32()
+
+			// We checked that all can stay within 32 bits
+			wr.addBits32NC(s.litLen, llB.outBits)
+			wr.addBits32NC(s.matchLen, mlB.outBits)
+			wr.addBits32NC(s.offset, ofB.outBits)
+
+			if debugSequences {
+				println("Encoded seq", seq, s)
+			}
+
+			seq--
+		}
+	} else {
+		for seq >= 0 {
+			s = b.sequences[seq]
+			wr.flush32()
+			llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode]
+			// tabelog max is below 8 for each.
+			of.encode(ofB)
+			ml.encode(mlB)
+			ll.encode(llB)
+			wr.flush32()
+
+			// ml+ll = max 32 bits total
+			wr.addBits32NC(s.litLen, llB.outBits)
+			wr.addBits32NC(s.matchLen, mlB.outBits)
+			wr.flush32()
+			wr.addBits32NC(s.offset, ofB.outBits)
+
+			if debugSequences {
+				println("Encoded seq", seq, s)
+			}
+
+			seq--
+		}
+	}
+	ml.flush(mlEnc.actualTableLog)
+	of.flush(ofEnc.actualTableLog)
+	ll.flush(llEnc.actualTableLog)
+	err = wr.close()
+	if err != nil {
+		return err
+	}
+	b.output = wr.out
+
+	if len(b.output)-3-bhOffset >= b.size {
+		// Maybe even add a bigger margin.
+		b.litEnc.Reuse = huff0.ReusePolicyNone
+		return errIncompressible
+	}
+
+	// Size is output minus block header.
+	bh.setSize(uint32(len(b.output)-bhOffset) - 3)
+	if debug {
+		println("Rewriting block header", bh)
+	}
+	_ = bh.appendTo(b.output[bhOffset:bhOffset])
+	b.coders.setPrev(llEnc, mlEnc, ofEnc)
+	return nil
+}
+
+var errIncompressible = errors.New("incompressible")
+
+func (b *blockEnc) genCodes() {
+	if len(b.sequences) == 0 {
+		// nothing to do
+		return
+	}
+
+	if len(b.sequences) > math.MaxUint16 {
+		panic("can only encode up to 64K sequences")
+	}
+	// No bounds checks after here:
+	llH := b.coders.llEnc.Histogram()[:256]
+	ofH := b.coders.ofEnc.Histogram()[:256]
+	mlH := b.coders.mlEnc.Histogram()[:256]
+	for i := range llH {
+		llH[i] = 0
+	}
+	for i := range ofH {
+		ofH[i] = 0
+	}
+	for i := range mlH {
+		mlH[i] = 0
+	}
+
+	var llMax, ofMax, mlMax uint8
+	for i, seq := range b.sequences {
+		v := llCode(seq.litLen)
+		seq.llCode = v
+		llH[v]++
+		if v > llMax {
+			llMax = v
+		}
+
+		v = ofCode(seq.offset)
+		seq.ofCode = v
+		ofH[v]++
+		if v > ofMax {
+			ofMax = v
+		}
+
+		v = mlCode(seq.matchLen)
+		seq.mlCode = v
+		mlH[v]++
+		if v > mlMax {
+			mlMax = v
+			if debugAsserts && mlMax > maxMatchLengthSymbol {
+				panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen))
+			}
+		}
+		b.sequences[i] = seq
+	}
+	maxCount := func(a []uint32) int {
+		var max uint32
+		for _, v := range a {
+			if v > max {
+				max = v
+			}
+		}
+		return int(max)
+	}
+	if debugAsserts && mlMax > maxMatchLengthSymbol {
+		panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax))
+	}
+	if debugAsserts && ofMax > maxOffsetBits {
+		panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax))
+	}
+	if debugAsserts && llMax > maxLiteralLengthSymbol {
+		panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax))
+	}
+
+	b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1]))
+	b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1]))
+	b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1]))
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go
new file mode 100644
index 0000000..01a01e4
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go
@@ -0,0 +1,85 @@
+// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT.
+
+package zstd
+
+import "strconv"
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[blockTypeRaw-0]
+	_ = x[blockTypeRLE-1]
+	_ = x[blockTypeCompressed-2]
+	_ = x[blockTypeReserved-3]
+}
+
+const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved"
+
+var _blockType_index = [...]uint8{0, 12, 24, 43, 60}
+
+func (i blockType) String() string {
+	if i >= blockType(len(_blockType_index)-1) {
+		return "blockType(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _blockType_name[_blockType_index[i]:_blockType_index[i+1]]
+}
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[literalsBlockRaw-0]
+	_ = x[literalsBlockRLE-1]
+	_ = x[literalsBlockCompressed-2]
+	_ = x[literalsBlockTreeless-3]
+}
+
+const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless"
+
+var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76}
+
+func (i literalsBlockType) String() string {
+	if i >= literalsBlockType(len(_literalsBlockType_index)-1) {
+		return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]]
+}
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[compModePredefined-0]
+	_ = x[compModeRLE-1]
+	_ = x[compModeFSE-2]
+	_ = x[compModeRepeat-3]
+}
+
+const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat"
+
+var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54}
+
+func (i seqCompMode) String() string {
+	if i >= seqCompMode(len(_seqCompMode_index)-1) {
+		return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]]
+}
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[tableLiteralLengths-0]
+	_ = x[tableOffsets-1]
+	_ = x[tableMatchLengths-2]
+}
+
+const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths"
+
+var _tableIndex_index = [...]uint8{0, 19, 31, 48}
+
+func (i tableIndex) String() string {
+	if i >= tableIndex(len(_tableIndex_index)-1) {
+		return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]]
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
new file mode 100644
index 0000000..658ef78
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
@@ -0,0 +1,127 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+)
+
+type byteBuffer interface {
+	// Read up to 8 bytes.
+	// Returns nil if no more input is available.
+	readSmall(n int) []byte
+
+	// Read >8 bytes.
+	// MAY use the destination slice.
+	readBig(n int, dst []byte) ([]byte, error)
+
+	// Read a single byte.
+	readByte() (byte, error)
+
+	// Skip n bytes.
+	skipN(n int) error
+}
+
+// in-memory buffer
+type byteBuf []byte
+
+func (b *byteBuf) readSmall(n int) []byte {
+	if debugAsserts && n > 8 {
+		panic(fmt.Errorf("small read > 8 (%d). use readBig", n))
+	}
+	bb := *b
+	if len(bb) < n {
+		return nil
+	}
+	r := bb[:n]
+	*b = bb[n:]
+	return r
+}
+
+func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) {
+	bb := *b
+	if len(bb) < n {
+		return nil, io.ErrUnexpectedEOF
+	}
+	r := bb[:n]
+	*b = bb[n:]
+	return r, nil
+}
+
+func (b *byteBuf) remain() []byte {
+	return *b
+}
+
+func (b *byteBuf) readByte() (byte, error) {
+	bb := *b
+	if len(bb) < 1 {
+		return 0, nil
+	}
+	r := bb[0]
+	*b = bb[1:]
+	return r, nil
+}
+
+func (b *byteBuf) skipN(n int) error {
+	bb := *b
+	if len(bb) < n {
+		return io.ErrUnexpectedEOF
+	}
+	*b = bb[n:]
+	return nil
+}
+
+// wrapper around a reader.
+type readerWrapper struct {
+	r   io.Reader
+	tmp [8]byte
+}
+
+func (r *readerWrapper) readSmall(n int) []byte {
+	if debugAsserts && n > 8 {
+		panic(fmt.Errorf("small read > 8 (%d). use readBig", n))
+	}
+	n2, err := io.ReadFull(r.r, r.tmp[:n])
+	// We only really care about the actual bytes read.
+	if n2 != n {
+		if debug {
+			println("readSmall: got", n2, "want", n, "err", err)
+		}
+		return nil
+	}
+	return r.tmp[:n]
+}
+
+func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) {
+	if cap(dst) < n {
+		dst = make([]byte, n)
+	}
+	n2, err := io.ReadFull(r.r, dst[:n])
+	if err == io.EOF && n > 0 {
+		err = io.ErrUnexpectedEOF
+	}
+	return dst[:n2], err
+}
+
+func (r *readerWrapper) readByte() (byte, error) {
+	n2, err := r.r.Read(r.tmp[:1])
+	if err != nil {
+		return 0, err
+	}
+	if n2 != 1 {
+		return 0, io.ErrUnexpectedEOF
+	}
+	return r.tmp[0], nil
+}
+
+func (r *readerWrapper) skipN(n int) error {
+	n2, err := io.CopyN(ioutil.Discard, r.r, int64(n))
+	if n2 != int64(n) {
+		err = io.ErrUnexpectedEOF
+	}
+	return err
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go
new file mode 100644
index 0000000..2c4fca1
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go
@@ -0,0 +1,88 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+// byteReader provides a byte reader that reads
+// little endian values from a byte stream.
+// The input stream is manually advanced.
+// The reader performs no bounds checks.
+type byteReader struct {
+	b   []byte
+	off int
+}
+
+// init will initialize the reader and set the input.
+func (b *byteReader) init(in []byte) {
+	b.b = in
+	b.off = 0
+}
+
+// advance the stream b n bytes.
+func (b *byteReader) advance(n uint) {
+	b.off += int(n)
+}
+
+// overread returns whether we have advanced too far.
+func (b *byteReader) overread() bool {
+	return b.off > len(b.b)
+}
+
+// Int32 returns a little endian int32 starting at current offset.
+func (b byteReader) Int32() int32 {
+	b2 := b.b[b.off:]
+	b2 = b2[:4]
+	v3 := int32(b2[3])
+	v2 := int32(b2[2])
+	v1 := int32(b2[1])
+	v0 := int32(b2[0])
+	return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24)
+}
+
+// Uint8 returns the next byte
+func (b *byteReader) Uint8() uint8 {
+	v := b.b[b.off]
+	return v
+}
+
+// Uint32 returns a little endian uint32 starting at current offset.
+func (b byteReader) Uint32() uint32 {
+	if r := b.remain(); r < 4 {
+		// Very rare
+		v := uint32(0)
+		for i := 1; i <= r; i++ {
+			v = (v << 8) | uint32(b.b[len(b.b)-i])
+		}
+		return v
+	}
+	b2 := b.b[b.off:]
+	b2 = b2[:4]
+	v3 := uint32(b2[3])
+	v2 := uint32(b2[2])
+	v1 := uint32(b2[1])
+	v0 := uint32(b2[0])
+	return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24)
+}
+
+// Uint32NC returns a little endian uint32 starting at current offset.
+// The caller must be sure if there are at least 4 bytes left.
+func (b byteReader) Uint32NC() uint32 {
+	b2 := b.b[b.off:]
+	b2 = b2[:4]
+	v3 := uint32(b2[3])
+	v2 := uint32(b2[2])
+	v1 := uint32(b2[1])
+	v0 := uint32(b2[0])
+	return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24)
+}
+
+// unread returns the unread portion of the input.
+func (b byteReader) unread() []byte {
+	return b.b[b.off:]
+}
+
+// remain will return the number of bytes remaining.
+func (b byteReader) remain() int {
+	return len(b.b) - b.off
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go
new file mode 100644
index 0000000..69736e8
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go
@@ -0,0 +1,202 @@
+// Copyright 2020+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+
+package zstd
+
+import (
+	"bytes"
+	"errors"
+	"io"
+)
+
+// HeaderMaxSize is the maximum size of a Frame and Block Header.
+// If less is sent to Header.Decode it *may* still contain enough information.
+const HeaderMaxSize = 14 + 3
+
+// Header contains information about the first frame and block within that.
+type Header struct {
+	// Window Size the window of data to keep while decoding.
+	// Will only be set if HasFCS is false.
+	WindowSize uint64
+
+	// Frame content size.
+	// Expected size of the entire frame.
+	FrameContentSize uint64
+
+	// Dictionary ID.
+	// If 0, no dictionary.
+	DictionaryID uint32
+
+	// First block information.
+	FirstBlock struct {
+		// OK will be set if first block could be decoded.
+		OK bool
+
+		// Is this the last block of a frame?
+		Last bool
+
+		// Is the data compressed?
+		// If true CompressedSize will be populated.
+		// Unfortunately DecompressedSize cannot be determined
+		// without decoding the blocks.
+		Compressed bool
+
+		// DecompressedSize is the expected decompressed size of the block.
+		// Will be 0 if it cannot be determined.
+		DecompressedSize int
+
+		// CompressedSize of the data in the block.
+		// Does not include the block header.
+		// Will be equal to DecompressedSize if not Compressed.
+		CompressedSize int
+	}
+
+	// Skippable will be true if the frame is meant to be skipped.
+	// No other information will be populated.
+	Skippable bool
+
+	// If set there is a checksum present for the block content.
+	HasCheckSum bool
+
+	// If this is true FrameContentSize will have a valid value
+	HasFCS bool
+
+	SingleSegment bool
+}
+
+// Decode the header from the beginning of the stream.
+// This will decode the frame header and the first block header if enough bytes are provided.
+// It is recommended to provide at least HeaderMaxSize bytes.
+// If the frame header cannot be read an error will be returned.
+// If there isn't enough input, io.ErrUnexpectedEOF is returned.
+// The FirstBlock.OK will indicate if enough information was available to decode the first block header.
+func (h *Header) Decode(in []byte) error {
+	if len(in) < 4 {
+		return io.ErrUnexpectedEOF
+	}
+	b, in := in[:4], in[4:]
+	if !bytes.Equal(b, frameMagic) {
+		if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 {
+			return ErrMagicMismatch
+		}
+		*h = Header{Skippable: true}
+		return nil
+	}
+	if len(in) < 1 {
+		return io.ErrUnexpectedEOF
+	}
+
+	// Clear output
+	*h = Header{}
+	fhd, in := in[0], in[1:]
+	h.SingleSegment = fhd&(1<<5) != 0
+	h.HasCheckSum = fhd&(1<<2) != 0
+
+	if fhd&(1<<3) != 0 {
+		return errors.New("reserved bit set on frame header")
+	}
+
+	// Read Window_Descriptor
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor
+	if !h.SingleSegment {
+		if len(in) < 1 {
+			return io.ErrUnexpectedEOF
+		}
+		var wd byte
+		wd, in = in[0], in[1:]
+		windowLog := 10 + (wd >> 3)
+		windowBase := uint64(1) << windowLog
+		windowAdd := (windowBase / 8) * uint64(wd&0x7)
+		h.WindowSize = windowBase + windowAdd
+	}
+
+	// Read Dictionary_ID
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id
+	if size := fhd & 3; size != 0 {
+		if size == 3 {
+			size = 4
+		}
+		if len(in) < int(size) {
+			return io.ErrUnexpectedEOF
+		}
+		b, in = in[:size], in[size:]
+		if b == nil {
+			return io.ErrUnexpectedEOF
+		}
+		switch size {
+		case 1:
+			h.DictionaryID = uint32(b[0])
+		case 2:
+			h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8)
+		case 4:
+			h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+		}
+	}
+
+	// Read Frame_Content_Size
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size
+	var fcsSize int
+	v := fhd >> 6
+	switch v {
+	case 0:
+		if h.SingleSegment {
+			fcsSize = 1
+		}
+	default:
+		fcsSize = 1 << v
+	}
+
+	if fcsSize > 0 {
+		h.HasFCS = true
+		if len(in) < fcsSize {
+			return io.ErrUnexpectedEOF
+		}
+		b, in = in[:fcsSize], in[fcsSize:]
+		if b == nil {
+			return io.ErrUnexpectedEOF
+		}
+		switch fcsSize {
+		case 1:
+			h.FrameContentSize = uint64(b[0])
+		case 2:
+			// When FCS_Field_Size is 2, the offset of 256 is added.
+			h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256
+		case 4:
+			h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24)
+		case 8:
+			d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+			d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24)
+			h.FrameContentSize = uint64(d1) | (uint64(d2) << 32)
+		}
+	}
+
+	// Frame Header done, we will not fail from now on.
+	if len(in) < 3 {
+		return nil
+	}
+	tmp := in[:3]
+	bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16)
+	h.FirstBlock.Last = bh&1 != 0
+	blockType := blockType((bh >> 1) & 3)
+	// find size.
+	cSize := int(bh >> 3)
+	switch blockType {
+	case blockTypeReserved:
+		return nil
+	case blockTypeRLE:
+		h.FirstBlock.Compressed = true
+		h.FirstBlock.DecompressedSize = cSize
+		h.FirstBlock.CompressedSize = 1
+	case blockTypeCompressed:
+		h.FirstBlock.Compressed = true
+		h.FirstBlock.CompressedSize = cSize
+	case blockTypeRaw:
+		h.FirstBlock.DecompressedSize = cSize
+		h.FirstBlock.CompressedSize = cSize
+	default:
+		panic("Invalid block type")
+	}
+
+	h.FirstBlock.OK = true
+	return nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
new file mode 100644
index 0000000..f593e46
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -0,0 +1,557 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"errors"
+	"io"
+	"sync"
+)
+
+// Decoder provides decoding of zstandard streams.
+// The decoder has been designed to operate without allocations after a warmup.
+// This means that you should store the decoder for best performance.
+// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream.
+// A decoder can safely be re-used even if the previous stream failed.
+// To release the resources, you must call the Close() function on a decoder.
+type Decoder struct {
+	o decoderOptions
+
+	// Unreferenced decoders, ready for use.
+	decoders chan *blockDec
+
+	// Streams ready to be decoded.
+	stream chan decodeStream
+
+	// Current read position used for Reader functionality.
+	current decoderState
+
+	// Custom dictionaries.
+	// Always uses copies.
+	dicts map[uint32]dict
+
+	// streamWg is the waitgroup for all streams
+	streamWg sync.WaitGroup
+}
+
+// decoderState is used for maintaining state when the decoder
+// is used for streaming.
+type decoderState struct {
+	// current block being written to stream.
+	decodeOutput
+
+	// output in order to be written to stream.
+	output chan decodeOutput
+
+	// cancel remaining output.
+	cancel chan struct{}
+
+	flushed bool
+}
+
+var (
+	// Check the interfaces we want to support.
+	_ = io.WriterTo(&Decoder{})
+	_ = io.Reader(&Decoder{})
+)
+
+// NewReader creates a new decoder.
+// A nil Reader can be provided in which case Reset can be used to start a decode.
+//
+// A Decoder can be used in two modes:
+//
+// 1) As a stream, or
+// 2) For stateless decoding using DecodeAll.
+//
+// Only a single stream can be decoded concurrently, but the same decoder
+// can run multiple concurrent stateless decodes. It is even possible to
+// use stateless decodes while a stream is being decoded.
+//
+// The Reset function can be used to initiate a new stream, which is will considerably
+// reduce the allocations normally caused by NewReader.
+func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
+	initPredefined()
+	var d Decoder
+	d.o.setDefault()
+	for _, o := range opts {
+		err := o(&d.o)
+		if err != nil {
+			return nil, err
+		}
+	}
+	d.current.output = make(chan decodeOutput, d.o.concurrent)
+	d.current.flushed = true
+
+	if r == nil {
+		d.current.err = ErrDecoderNilInput
+	}
+
+	// Transfer option dicts.
+	d.dicts = make(map[uint32]dict, len(d.o.dicts))
+	for _, dc := range d.o.dicts {
+		d.dicts[dc.id] = dc
+	}
+	d.o.dicts = nil
+
+	// Create decoders
+	d.decoders = make(chan *blockDec, d.o.concurrent)
+	for i := 0; i < d.o.concurrent; i++ {
+		dec := newBlockDec(d.o.lowMem)
+		dec.localFrame = newFrameDec(d.o)
+		d.decoders <- dec
+	}
+
+	if r == nil {
+		return &d, nil
+	}
+	return &d, d.Reset(r)
+}
+
+// Read bytes from the decompressed stream into p.
+// Returns the number of bytes written and any error that occurred.
+// When the stream is done, io.EOF will be returned.
+func (d *Decoder) Read(p []byte) (int, error) {
+	if d.stream == nil {
+		return 0, ErrDecoderNilInput
+	}
+	var n int
+	for {
+		if len(d.current.b) > 0 {
+			filled := copy(p, d.current.b)
+			p = p[filled:]
+			d.current.b = d.current.b[filled:]
+			n += filled
+		}
+		if len(p) == 0 {
+			break
+		}
+		if len(d.current.b) == 0 {
+			// We have an error and no more data
+			if d.current.err != nil {
+				break
+			}
+			if !d.nextBlock(n == 0) {
+				return n, nil
+			}
+		}
+	}
+	if len(d.current.b) > 0 {
+		if debug {
+			println("returning", n, "still bytes left:", len(d.current.b))
+		}
+		// Only return error at end of block
+		return n, nil
+	}
+	if d.current.err != nil {
+		d.drainOutput()
+	}
+	if debug {
+		println("returning", n, d.current.err, len(d.decoders))
+	}
+	return n, d.current.err
+}
+
+// Reset will reset the decoder the supplied stream after the current has finished processing.
+// Note that this functionality cannot be used after Close has been called.
+// Reset can be called with a nil reader to release references to the previous reader.
+// After being called with a nil reader, no other operations than Reset or DecodeAll or Close
+// should be used.
+func (d *Decoder) Reset(r io.Reader) error {
+	if d.current.err == ErrDecoderClosed {
+		return d.current.err
+	}
+
+	d.drainOutput()
+
+	if r == nil {
+		d.current.err = ErrDecoderNilInput
+		d.current.flushed = true
+		return nil
+	}
+
+	if d.stream == nil {
+		d.stream = make(chan decodeStream, 1)
+		d.streamWg.Add(1)
+		go d.startStreamDecoder(d.stream)
+	}
+
+	// If bytes buffer and < 1MB, do sync decoding anyway.
+	if bb, ok := r.(byter); ok && bb.Len() < 1<<20 {
+		bb2 := bb
+		if debug {
+			println("*bytes.Buffer detected, doing sync decode, len:", bb.Len())
+		}
+		b := bb2.Bytes()
+		var dst []byte
+		if cap(d.current.b) > 0 {
+			dst = d.current.b
+		}
+
+		dst, err := d.DecodeAll(b, dst[:0])
+		if err == nil {
+			err = io.EOF
+		}
+		d.current.b = dst
+		d.current.err = err
+		d.current.flushed = true
+		if debug {
+			println("sync decode to", len(dst), "bytes, err:", err)
+		}
+		return nil
+	}
+
+	// Remove current block.
+	d.current.decodeOutput = decodeOutput{}
+	d.current.err = nil
+	d.current.cancel = make(chan struct{})
+	d.current.flushed = false
+	d.current.d = nil
+
+	d.stream <- decodeStream{
+		r:      r,
+		output: d.current.output,
+		cancel: d.current.cancel,
+	}
+	return nil
+}
+
+// drainOutput will drain the output until errEndOfStream is sent.
+func (d *Decoder) drainOutput() {
+	if d.current.cancel != nil {
+		println("cancelling current")
+		close(d.current.cancel)
+		d.current.cancel = nil
+	}
+	if d.current.d != nil {
+		if debug {
+			printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders))
+		}
+		d.decoders <- d.current.d
+		d.current.d = nil
+		d.current.b = nil
+	}
+	if d.current.output == nil || d.current.flushed {
+		println("current already flushed")
+		return
+	}
+	for v := range d.current.output {
+		if v.d != nil {
+			if debug {
+				printf("re-adding decoder %p", v.d)
+			}
+			d.decoders <- v.d
+		}
+		if v.err == errEndOfStream {
+			println("current flushed")
+			d.current.flushed = true
+			return
+		}
+	}
+}
+
+// WriteTo writes data to w until there's no more data to write or when an error occurs.
+// The return value n is the number of bytes written.
+// Any error encountered during the write is also returned.
+func (d *Decoder) WriteTo(w io.Writer) (int64, error) {
+	if d.stream == nil {
+		return 0, ErrDecoderNilInput
+	}
+	var n int64
+	for {
+		if len(d.current.b) > 0 {
+			n2, err2 := w.Write(d.current.b)
+			n += int64(n2)
+			if err2 != nil && d.current.err == nil {
+				d.current.err = err2
+				break
+			}
+		}
+		if d.current.err != nil {
+			break
+		}
+		d.nextBlock(true)
+	}
+	err := d.current.err
+	if err != nil {
+		d.drainOutput()
+	}
+	if err == io.EOF {
+		err = nil
+	}
+	return n, err
+}
+
+// DecodeAll allows stateless decoding of a blob of bytes.
+// Output will be appended to dst, so if the destination size is known
+// you can pre-allocate the destination slice to avoid allocations.
+// DecodeAll can be used concurrently.
+// The Decoder concurrency limits will be respected.
+func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
+	if d.current.err == ErrDecoderClosed {
+		return dst, ErrDecoderClosed
+	}
+
+	// Grab a block decoder and frame decoder.
+	block := <-d.decoders
+	frame := block.localFrame
+	defer func() {
+		if debug {
+			printf("re-adding decoder: %p", block)
+		}
+		frame.rawInput = nil
+		frame.bBuf = nil
+		d.decoders <- block
+	}()
+	frame.bBuf = input
+
+	for {
+		frame.history.reset()
+		err := frame.reset(&frame.bBuf)
+		if err == io.EOF {
+			if debug {
+				println("frame reset return EOF")
+			}
+			return dst, nil
+		}
+		if frame.DictionaryID != nil {
+			dict, ok := d.dicts[*frame.DictionaryID]
+			if !ok {
+				return nil, ErrUnknownDictionary
+			}
+			frame.history.setDict(&dict)
+		}
+		if err != nil {
+			return dst, err
+		}
+		if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
+			return dst, ErrDecoderSizeExceeded
+		}
+		if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 {
+			// Never preallocate moe than 1 GB up front.
+			if cap(dst)-len(dst) < int(frame.FrameContentSize) {
+				dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
+				copy(dst2, dst)
+				dst = dst2
+			}
+		}
+		if cap(dst) == 0 {
+			// Allocate len(input) * 2 by default if nothing is provided
+			// and we didn't get frame content size.
+			size := len(input) * 2
+			// Cap to 1 MB.
+			if size > 1<<20 {
+				size = 1 << 20
+			}
+			if uint64(size) > d.o.maxDecodedSize {
+				size = int(d.o.maxDecodedSize)
+			}
+			dst = make([]byte, 0, size)
+		}
+
+		dst, err = frame.runDecoder(dst, block)
+		if err != nil {
+			return dst, err
+		}
+		if len(frame.bBuf) == 0 {
+			if debug {
+				println("frame dbuf empty")
+			}
+			break
+		}
+	}
+	return dst, nil
+}
+
+// nextBlock returns the next block.
+// If an error occurs d.err will be set.
+// Optionally the function can block for new output.
+// If non-blocking mode is used the returned boolean will be false
+// if no data was available without blocking.
+func (d *Decoder) nextBlock(blocking bool) (ok bool) {
+	if d.current.d != nil {
+		if debug {
+			printf("re-adding current decoder %p", d.current.d)
+		}
+		d.decoders <- d.current.d
+		d.current.d = nil
+	}
+	if d.current.err != nil {
+		// Keep error state.
+		return blocking
+	}
+
+	if blocking {
+		d.current.decodeOutput = <-d.current.output
+	} else {
+		select {
+		case d.current.decodeOutput = <-d.current.output:
+		default:
+			return false
+		}
+	}
+	if debug {
+		println("got", len(d.current.b), "bytes, error:", d.current.err)
+	}
+	return true
+}
+
+// Close will release all resources.
+// It is NOT possible to reuse the decoder after this.
+func (d *Decoder) Close() {
+	if d.current.err == ErrDecoderClosed {
+		return
+	}
+	d.drainOutput()
+	if d.stream != nil {
+		close(d.stream)
+		d.streamWg.Wait()
+		d.stream = nil
+	}
+	if d.decoders != nil {
+		close(d.decoders)
+		for dec := range d.decoders {
+			dec.Close()
+		}
+		d.decoders = nil
+	}
+	if d.current.d != nil {
+		d.current.d.Close()
+		d.current.d = nil
+	}
+	d.current.err = ErrDecoderClosed
+}
+
+// IOReadCloser returns the decoder as an io.ReadCloser for convenience.
+// Any changes to the decoder will be reflected, so the returned ReadCloser
+// can be reused along with the decoder.
+// io.WriterTo is also supported by the returned ReadCloser.
+func (d *Decoder) IOReadCloser() io.ReadCloser {
+	return closeWrapper{d: d}
+}
+
+// closeWrapper wraps a function call as a closer.
+type closeWrapper struct {
+	d *Decoder
+}
+
+// WriteTo forwards WriteTo calls to the decoder.
+func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) {
+	return c.d.WriteTo(w)
+}
+
+// Read forwards read calls to the decoder.
+func (c closeWrapper) Read(p []byte) (n int, err error) {
+	return c.d.Read(p)
+}
+
+// Close closes the decoder.
+func (c closeWrapper) Close() error {
+	c.d.Close()
+	return nil
+}
+
+type decodeOutput struct {
+	d   *blockDec
+	b   []byte
+	err error
+}
+
+type decodeStream struct {
+	r io.Reader
+
+	// Blocks ready to be written to output.
+	output chan decodeOutput
+
+	// cancel reading from the input
+	cancel chan struct{}
+}
+
+// errEndOfStream indicates that everything from the stream was read.
+var errEndOfStream = errors.New("end-of-stream")
+
+// Create Decoder:
+// Spawn n block decoders. These accept tasks to decode a block.
+// Create goroutine that handles stream processing, this will send history to decoders as they are available.
+// Decoders update the history as they decode.
+// When a block is returned:
+// 		a) history is sent to the next decoder,
+// 		b) content written to CRC.
+// 		c) return data to WRITER.
+// 		d) wait for next block to return data.
+// Once WRITTEN, the decoders reused by the writer frame decoder for re-use.
+func (d *Decoder) startStreamDecoder(inStream chan decodeStream) {
+	defer d.streamWg.Done()
+	frame := newFrameDec(d.o)
+	for stream := range inStream {
+		if debug {
+			println("got new stream")
+		}
+		br := readerWrapper{r: stream.r}
+	decodeStream:
+		for {
+			frame.history.reset()
+			err := frame.reset(&br)
+			if debug && err != nil {
+				println("Frame decoder returned", err)
+			}
+			if err == nil && frame.DictionaryID != nil {
+				dict, ok := d.dicts[*frame.DictionaryID]
+				if !ok {
+					err = ErrUnknownDictionary
+				} else {
+					frame.history.setDict(&dict)
+				}
+			}
+			if err != nil {
+				stream.output <- decodeOutput{
+					err: err,
+				}
+				break
+			}
+			if debug {
+				println("starting frame decoder")
+			}
+
+			// This goroutine will forward history between frames.
+			frame.frameDone.Add(1)
+			frame.initAsync()
+
+			go frame.startDecoder(stream.output)
+		decodeFrame:
+			// Go through all blocks of the frame.
+			for {
+				dec := <-d.decoders
+				select {
+				case <-stream.cancel:
+					if !frame.sendErr(dec, io.EOF) {
+						// To not let the decoder dangle, send it back.
+						stream.output <- decodeOutput{d: dec}
+					}
+					break decodeStream
+				default:
+				}
+				err := frame.next(dec)
+				switch err {
+				case io.EOF:
+					// End of current frame, no error
+					println("EOF on next block")
+					break decodeFrame
+				case nil:
+					continue
+				default:
+					println("block decoder returned", err)
+					break decodeStream
+				}
+			}
+			// All blocks have started decoding, check if there are more frames.
+			println("waiting for done")
+			frame.frameDone.Wait()
+			println("done waiting...")
+		}
+		frame.frameDone.Wait()
+		println("Sending EOS")
+		stream.output <- decodeOutput{err: errEndOfStream}
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
new file mode 100644
index 0000000..c0fd058
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
@@ -0,0 +1,83 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"errors"
+	"runtime"
+)
+
+// DOption is an option for creating a decoder.
+type DOption func(*decoderOptions) error
+
+// options retains accumulated state of multiple options.
+type decoderOptions struct {
+	lowMem         bool
+	concurrent     int
+	maxDecodedSize uint64
+	dicts          []dict
+}
+
+func (o *decoderOptions) setDefault() {
+	*o = decoderOptions{
+		// use less ram: true for now, but may change.
+		lowMem:     true,
+		concurrent: runtime.GOMAXPROCS(0),
+	}
+	o.maxDecodedSize = 1 << 63
+}
+
+// WithDecoderLowmem will set whether to use a lower amount of memory,
+// but possibly have to allocate more while running.
+func WithDecoderLowmem(b bool) DOption {
+	return func(o *decoderOptions) error { o.lowMem = b; return nil }
+}
+
+// WithDecoderConcurrency will set the concurrency,
+// meaning the maximum number of decoders to run concurrently.
+// The value supplied must be at least 1.
+// By default this will be set to GOMAXPROCS.
+func WithDecoderConcurrency(n int) DOption {
+	return func(o *decoderOptions) error {
+		if n <= 0 {
+			return errors.New("concurrency must be at least 1")
+		}
+		o.concurrent = n
+		return nil
+	}
+}
+
+// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory
+// non-streaming operations or maximum window size for streaming operations.
+// This can be used to control memory usage of potentially hostile content.
+// For streaming operations, the maximum window size is capped at 1<<30 bytes.
+// Maximum and default is 1 << 63 bytes.
+func WithDecoderMaxMemory(n uint64) DOption {
+	return func(o *decoderOptions) error {
+		if n == 0 {
+			return errors.New("WithDecoderMaxMemory must be at least 1")
+		}
+		if n > 1<<63 {
+			return errors.New("WithDecoderMaxmemory must be less than 1 << 63")
+		}
+		o.maxDecodedSize = n
+		return nil
+	}
+}
+
+// WithDecoderDicts allows to register one or more dictionaries for the decoder.
+// If several dictionaries with the same ID is provided the last one will be used.
+func WithDecoderDicts(dicts ...[]byte) DOption {
+	return func(o *decoderOptions) error {
+		for _, b := range dicts {
+			d, err := loadDict(b)
+			if err != nil {
+				return err
+			}
+			o.dicts = append(o.dicts, *d)
+		}
+		return nil
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go
new file mode 100644
index 0000000..fa25a18
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/dict.go
@@ -0,0 +1,122 @@
+package zstd
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+
+	"github.com/klauspost/compress/huff0"
+)
+
+type dict struct {
+	id uint32
+
+	litEnc              *huff0.Scratch
+	llDec, ofDec, mlDec sequenceDec
+	//llEnc, ofEnc, mlEnc []*fseEncoder
+	offsets [3]int
+	content []byte
+}
+
+var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec}
+
+// ID returns the dictionary id or 0 if d is nil.
+func (d *dict) ID() uint32 {
+	if d == nil {
+		return 0
+	}
+	return d.id
+}
+
+// DictContentSize returns the dictionary content size or 0 if d is nil.
+func (d *dict) DictContentSize() int {
+	if d == nil {
+		return 0
+	}
+	return len(d.content)
+}
+
+// Load a dictionary as described in
+// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
+func loadDict(b []byte) (*dict, error) {
+	// Check static field size.
+	if len(b) <= 8+(3*4) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	d := dict{
+		llDec: sequenceDec{fse: &fseDecoder{}},
+		ofDec: sequenceDec{fse: &fseDecoder{}},
+		mlDec: sequenceDec{fse: &fseDecoder{}},
+	}
+	if !bytes.Equal(b[:4], dictMagic[:]) {
+		return nil, ErrMagicMismatch
+	}
+	d.id = binary.LittleEndian.Uint32(b[4:8])
+	if d.id == 0 {
+		return nil, errors.New("dictionaries cannot have ID 0")
+	}
+
+	// Read literal table
+	var err error
+	d.litEnc, b, err = huff0.ReadTable(b[8:], nil)
+	if err != nil {
+		return nil, err
+	}
+	d.litEnc.Reuse = huff0.ReusePolicyMust
+
+	br := byteReader{
+		b:   b,
+		off: 0,
+	}
+	readDec := func(i tableIndex, dec *fseDecoder) error {
+		if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil {
+			return err
+		}
+		if br.overread() {
+			return io.ErrUnexpectedEOF
+		}
+		err = dec.transform(symbolTableX[i])
+		if err != nil {
+			println("Transform table error:", err)
+			return err
+		}
+		if debug {
+			println("Read table ok", "symbolLen:", dec.symbolLen)
+		}
+		// Set decoders as predefined so they aren't reused.
+		dec.preDefined = true
+		return nil
+	}
+
+	if err := readDec(tableOffsets, d.ofDec.fse); err != nil {
+		return nil, err
+	}
+	if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil {
+		return nil, err
+	}
+	if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil {
+		return nil, err
+	}
+	if br.remain() < 12 {
+		return nil, io.ErrUnexpectedEOF
+	}
+
+	d.offsets[0] = int(br.Uint32())
+	br.advance(4)
+	d.offsets[1] = int(br.Uint32())
+	br.advance(4)
+	d.offsets[2] = int(br.Uint32())
+	br.advance(4)
+	if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 {
+		return nil, errors.New("invalid offset in dictionary")
+	}
+	d.content = make([]byte, br.remain())
+	copy(d.content, br.unread())
+	if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) {
+		return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets)
+	}
+
+	return &d, nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go
new file mode 100644
index 0000000..60f2986
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go
@@ -0,0 +1,178 @@
+package zstd
+
+import (
+	"fmt"
+	"math/bits"
+
+	"github.com/klauspost/compress/zstd/internal/xxhash"
+)
+
+const (
+	dictShardBits = 6
+)
+
+type fastBase struct {
+	// cur is the offset at the start of hist
+	cur int32
+	// maximum offset. Should be at least 2x block size.
+	maxMatchOff int32
+	hist        []byte
+	crc         *xxhash.Digest
+	tmp         [8]byte
+	blk         *blockEnc
+	lastDictID  uint32
+	lowMem      bool
+}
+
+// CRC returns the underlying CRC writer.
+func (e *fastBase) CRC() *xxhash.Digest {
+	return e.crc
+}
+
+// AppendCRC will append the CRC to the destination slice and return it.
+func (e *fastBase) AppendCRC(dst []byte) []byte {
+	crc := e.crc.Sum(e.tmp[:0])
+	dst = append(dst, crc[7], crc[6], crc[5], crc[4])
+	return dst
+}
+
+// WindowSize returns the window size of the encoder,
+// or a window size small enough to contain the input size, if > 0.
+func (e *fastBase) WindowSize(size int) int32 {
+	if size > 0 && size < int(e.maxMatchOff) {
+		b := int32(1) << uint(bits.Len(uint(size)))
+		// Keep minimum window.
+		if b < 1024 {
+			b = 1024
+		}
+		return b
+	}
+	return e.maxMatchOff
+}
+
+// Block returns the current block.
+func (e *fastBase) Block() *blockEnc {
+	return e.blk
+}
+
+func (e *fastBase) addBlock(src []byte) int32 {
+	if debugAsserts && e.cur > bufferReset {
+		panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset))
+	}
+	// check if we have space already
+	if len(e.hist)+len(src) > cap(e.hist) {
+		if cap(e.hist) == 0 {
+			e.ensureHist(len(src))
+		} else {
+			if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) {
+				panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff))
+			}
+			// Move down
+			offset := int32(len(e.hist)) - e.maxMatchOff
+			copy(e.hist[0:e.maxMatchOff], e.hist[offset:])
+			e.cur += offset
+			e.hist = e.hist[:e.maxMatchOff]
+		}
+	}
+	s := int32(len(e.hist))
+	e.hist = append(e.hist, src...)
+	return s
+}
+
+// ensureHist will ensure that history can keep at least this many bytes.
+func (e *fastBase) ensureHist(n int) {
+	if cap(e.hist) >= n {
+		return
+	}
+	l := e.maxMatchOff
+	if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize {
+		l += maxCompressedBlockSize
+	} else {
+		l += e.maxMatchOff
+	}
+	// Make it at least 1MB.
+	if l < 1<<20 && !e.lowMem {
+		l = 1 << 20
+	}
+	// Make it at least the requested size.
+	if l < int32(n) {
+		l = int32(n)
+	}
+	e.hist = make([]byte, 0, l)
+}
+
+// useBlock will replace the block with the provided one,
+// but transfer recent offsets from the previous.
+func (e *fastBase) UseBlock(enc *blockEnc) {
+	enc.reset(e.blk)
+	e.blk = enc
+}
+
+func (e *fastBase) matchlenNoHist(s, t int32, src []byte) int32 {
+	// Extend the match to be as long as possible.
+	return int32(matchLen(src[s:], src[t:]))
+}
+
+func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
+	if debugAsserts {
+		if s < 0 {
+			err := fmt.Sprintf("s (%d) < 0", s)
+			panic(err)
+		}
+		if t < 0 {
+			err := fmt.Sprintf("s (%d) < 0", s)
+			panic(err)
+		}
+		if s-t > e.maxMatchOff {
+			err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff)
+			panic(err)
+		}
+		if len(src)-int(s) > maxCompressedBlockSize {
+			panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize))
+		}
+	}
+
+	// Extend the match to be as long as possible.
+	return int32(matchLen(src[s:], src[t:]))
+}
+
+// Reset the encoding table.
+func (e *fastBase) resetBase(d *dict, singleBlock bool) {
+	if e.blk == nil {
+		e.blk = &blockEnc{lowMem: e.lowMem}
+		e.blk.init()
+	} else {
+		e.blk.reset(nil)
+	}
+	e.blk.initNewEncode()
+	if e.crc == nil {
+		e.crc = xxhash.New()
+	} else {
+		e.crc.Reset()
+	}
+	if d != nil {
+		low := e.lowMem
+		if singleBlock {
+			e.lowMem = true
+		}
+		e.ensureHist(d.DictContentSize() + maxCompressedBlockSize)
+		e.lowMem = low
+	}
+
+	// We offset current position so everything will be out of reach.
+	// If above reset line, history will be purged.
+	if e.cur < bufferReset {
+		e.cur += e.maxMatchOff + int32(len(e.hist))
+	}
+	e.hist = e.hist[:0]
+	if d != nil {
+		// Set offsets (currently not used)
+		for i, off := range d.offsets {
+			e.blk.recentOffsets[i] = uint32(off)
+			e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i]
+		}
+		// Transfer litenc.
+		e.blk.dictLitEnc = d.litEnc
+		e.hist = append(e.hist, d.content...)
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go
new file mode 100644
index 0000000..dc1eed5
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go
@@ -0,0 +1,501 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"fmt"
+	"math/bits"
+)
+
+const (
+	bestLongTableBits = 20                     // Bits used in the long match table
+	bestLongTableSize = 1 << bestLongTableBits // Size of the table
+
+	// Note: Increasing the short table bits or making the hash shorter
+	// can actually lead to compression degradation since it will 'steal' more from the
+	// long match table and match offsets are quite big.
+	// This greatly depends on the type of input.
+	bestShortTableBits = 16                      // Bits used in the short match table
+	bestShortTableSize = 1 << bestShortTableBits // Size of the table
+)
+
+// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches.
+// The long match table contains the previous entry with the same hash,
+// effectively making it a "chain" of length 2.
+// When we find a long match we choose between the two values and select the longest.
+// When we find a short match, after checking the long, we check if we can find a long at n+1
+// and that it is longer (lazy matching).
+type bestFastEncoder struct {
+	fastBase
+	table         [bestShortTableSize]prevEntry
+	longTable     [bestLongTableSize]prevEntry
+	dictTable     []prevEntry
+	dictLongTable []prevEntry
+}
+
+// Encode improves compression...
+func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
+	const (
+		// Input margin is the number of bytes we read (8)
+		// and the maximum we will read ahead (2)
+		inputMargin            = 8 + 4
+		minNonLiteralBlockSize = 16
+	)
+
+	// Protect against e.cur wraparound.
+	for e.cur >= bufferReset {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = prevEntry{}
+			}
+			for i := range e.longTable[:] {
+				e.longTable[i] = prevEntry{}
+			}
+			e.cur = e.maxMatchOff
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+		for i := range e.table[:] {
+			v := e.table[i].offset
+			v2 := e.table[i].prev
+			if v < minOff {
+				v = 0
+				v2 = 0
+			} else {
+				v = v - e.cur + e.maxMatchOff
+				if v2 < minOff {
+					v2 = 0
+				} else {
+					v2 = v2 - e.cur + e.maxMatchOff
+				}
+			}
+			e.table[i] = prevEntry{
+				offset: v,
+				prev:   v2,
+			}
+		}
+		for i := range e.longTable[:] {
+			v := e.longTable[i].offset
+			v2 := e.longTable[i].prev
+			if v < minOff {
+				v = 0
+				v2 = 0
+			} else {
+				v = v - e.cur + e.maxMatchOff
+				if v2 < minOff {
+					v2 = 0
+				} else {
+					v2 = v2 - e.cur + e.maxMatchOff
+				}
+			}
+			e.longTable[i] = prevEntry{
+				offset: v,
+				prev:   v2,
+			}
+		}
+		e.cur = e.maxMatchOff
+		break
+	}
+
+	s := e.addBlock(src)
+	blk.size = len(src)
+	if len(src) < minNonLiteralBlockSize {
+		blk.extraLits = len(src)
+		blk.literals = blk.literals[:len(src)]
+		copy(blk.literals, src)
+		return
+	}
+
+	// Override src
+	src = e.hist
+	sLimit := int32(len(src)) - inputMargin
+	const kSearchStrength = 10
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := s
+	cv := load6432(src, s)
+
+	// Relative offsets
+	offset1 := int32(blk.recentOffsets[0])
+	offset2 := int32(blk.recentOffsets[1])
+	offset3 := int32(blk.recentOffsets[2])
+
+	addLiterals := func(s *seq, until int32) {
+		if until == nextEmit {
+			return
+		}
+		blk.literals = append(blk.literals, src[nextEmit:until]...)
+		s.litLen = uint32(until - nextEmit)
+	}
+	_ = addLiterals
+
+	if debug {
+		println("recent offsets:", blk.recentOffsets)
+	}
+
+encodeLoop:
+	for {
+		// We allow the encoder to optionally turn off repeat offsets across blocks
+		canRepeat := len(blk.sequences) > 2
+
+		if debugAsserts && canRepeat && offset1 == 0 {
+			panic("offset0 was 0")
+		}
+
+		type match struct {
+			offset int32
+			s      int32
+			length int32
+			rep    int32
+		}
+		matchAt := func(offset int32, s int32, first uint32, rep int32) match {
+			if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
+				return match{offset: offset, s: s}
+			}
+			return match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
+		}
+
+		bestOf := func(a, b match) match {
+			aScore := b.s - a.s + a.length
+			bScore := a.s - b.s + b.length
+			if a.rep < 0 {
+				aScore = aScore - int32(bits.Len32(uint32(a.offset)))/8
+			}
+			if b.rep < 0 {
+				bScore = bScore - int32(bits.Len32(uint32(b.offset)))/8
+			}
+			if aScore >= bScore {
+				return a
+			}
+			return b
+		}
+		const goodEnough = 100
+
+		nextHashL := hash8(cv, bestLongTableBits)
+		nextHashS := hash4x64(cv, bestShortTableBits)
+		candidateL := e.longTable[nextHashL]
+		candidateS := e.table[nextHashS]
+
+		best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
+		best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
+		best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1))
+		if canRepeat && best.length < goodEnough {
+			best = bestOf(best, matchAt(s-offset1+1, s+1, uint32(cv>>8), 1))
+			best = bestOf(best, matchAt(s-offset2+1, s+1, uint32(cv>>8), 2))
+			best = bestOf(best, matchAt(s-offset3+1, s+1, uint32(cv>>8), 3))
+			if best.length > 0 {
+				best = bestOf(best, matchAt(s-offset1+3, s+3, uint32(cv>>24), 1))
+				best = bestOf(best, matchAt(s-offset2+3, s+3, uint32(cv>>24), 2))
+				best = bestOf(best, matchAt(s-offset3+3, s+3, uint32(cv>>24), 3))
+			}
+		}
+		// Load next and check...
+		e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset}
+		e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset}
+
+		// Look far ahead, unless we have a really long match already...
+		if best.length < goodEnough {
+			// No match found, move forward on input, no need to check forward...
+			if best.length < 4 {
+				s += 1 + (s-nextEmit)>>(kSearchStrength-1)
+				if s >= sLimit {
+					break encodeLoop
+				}
+				cv = load6432(src, s)
+				continue
+			}
+
+			s++
+			candidateS = e.table[hash4x64(cv>>8, bestShortTableBits)]
+			cv = load6432(src, s)
+			cv2 := load6432(src, s+1)
+			candidateL = e.longTable[hash8(cv, bestLongTableBits)]
+			candidateL2 := e.longTable[hash8(cv2, bestLongTableBits)]
+
+			best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
+			best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1))
+			best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
+			best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1))
+			best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1))
+
+			// See if we can find a better match by checking where the current best ends.
+			// Use that offset to see if we can find a better full match.
+			if sAt := best.s + best.length; sAt < sLimit {
+				nextHashL := hash8(load6432(src, sAt), bestLongTableBits)
+				candidateEnd := e.longTable[nextHashL]
+				if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 {
+					bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1))
+					if pos := candidateEnd.prev - e.cur - best.length; pos >= 0 {
+						bestEnd = bestOf(bestEnd, matchAt(pos, best.s, load3232(src, best.s), -1))
+					}
+					best = bestEnd
+				}
+			}
+		}
+
+		// We have a match, we can store the forward value
+		if best.rep > 0 {
+			s = best.s
+			var seq seq
+			seq.matchLen = uint32(best.length - zstdMinMatch)
+
+			// We might be able to match backwards.
+			// Extend as long as we can.
+			start := best.s
+			// We end the search early, so we don't risk 0 literals
+			// and have to do special offset treatment.
+			startLimit := nextEmit + 1
+
+			tMin := s - e.maxMatchOff
+			if tMin < 0 {
+				tMin = 0
+			}
+			repIndex := best.offset
+			for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+				repIndex--
+				start--
+				seq.matchLen++
+			}
+			addLiterals(&seq, start)
+
+			// rep 0
+			seq.offset = uint32(best.rep)
+			if debugSequences {
+				println("repeat sequence", seq, "next s:", s)
+			}
+			blk.sequences = append(blk.sequences, seq)
+
+			// Index match start+1 (long) -> s - 1
+			index0 := s
+			s = best.s + best.length
+
+			nextEmit = s
+			if s >= sLimit {
+				if debug {
+					println("repeat ended", s, best.length)
+
+				}
+				break encodeLoop
+			}
+			// Index skipped...
+			off := index0 + e.cur
+			for index0 < s-1 {
+				cv0 := load6432(src, index0)
+				h0 := hash8(cv0, bestLongTableBits)
+				h1 := hash4x64(cv0, bestShortTableBits)
+				e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+				e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
+				off++
+				index0++
+			}
+			switch best.rep {
+			case 2:
+				offset1, offset2 = offset2, offset1
+			case 3:
+				offset1, offset2, offset3 = offset3, offset1, offset2
+			}
+			cv = load6432(src, s)
+			continue
+		}
+
+		// A 4-byte match has been found. Update recent offsets.
+		// We'll later see if more than 4 bytes.
+		s = best.s
+		t := best.offset
+		offset1, offset2, offset3 = s-t, offset1, offset2
+
+		if debugAsserts && s <= t {
+			panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+		}
+
+		if debugAsserts && canRepeat && int(offset1) > len(src) {
+			panic("invalid offset")
+		}
+
+		// Extend the n-byte match as long as possible.
+		l := best.length
+
+		// Extend backwards
+		tMin := s - e.maxMatchOff
+		if tMin < 0 {
+			tMin = 0
+		}
+		for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+			s--
+			t--
+			l++
+		}
+
+		// Write our sequence
+		var seq seq
+		seq.litLen = uint32(s - nextEmit)
+		seq.matchLen = uint32(l - zstdMinMatch)
+		if seq.litLen > 0 {
+			blk.literals = append(blk.literals, src[nextEmit:s]...)
+		}
+		seq.offset = uint32(s-t) + 3
+		s += l
+		if debugSequences {
+			println("sequence", seq, "next s:", s)
+		}
+		blk.sequences = append(blk.sequences, seq)
+		nextEmit = s
+		if s >= sLimit {
+			break encodeLoop
+		}
+
+		// Index match start+1 (long) -> s - 1
+		index0 := s - l + 1
+		// every entry
+		for index0 < s-1 {
+			cv0 := load6432(src, index0)
+			h0 := hash8(cv0, bestLongTableBits)
+			h1 := hash4x64(cv0, bestShortTableBits)
+			off := index0 + e.cur
+			e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+			e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
+			index0++
+		}
+
+		cv = load6432(src, s)
+		if !canRepeat {
+			continue
+		}
+
+		// Check offset 2
+		for {
+			o2 := s - offset2
+			if load3232(src, o2) != uint32(cv) {
+				// Do regular search
+				break
+			}
+
+			// Store this, since we have it.
+			nextHashS := hash4x64(cv, bestShortTableBits)
+			nextHashL := hash8(cv, bestLongTableBits)
+
+			// We have at least 4 byte match.
+			// No need to check backwards. We come straight from a match
+			l := 4 + e.matchlen(s+4, o2+4, src)
+
+			e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset}
+			e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset}
+			seq.matchLen = uint32(l) - zstdMinMatch
+			seq.litLen = 0
+
+			// Since litlen is always 0, this is offset 1.
+			seq.offset = 1
+			s += l
+			nextEmit = s
+			if debugSequences {
+				println("sequence", seq, "next s:", s)
+			}
+			blk.sequences = append(blk.sequences, seq)
+
+			// Swap offset 1 and 2.
+			offset1, offset2 = offset2, offset1
+			if s >= sLimit {
+				// Finished
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+	}
+
+	if int(nextEmit) < len(src) {
+		blk.literals = append(blk.literals, src[nextEmit:]...)
+		blk.extraLits = len(src) - int(nextEmit)
+	}
+	blk.recentOffsets[0] = uint32(offset1)
+	blk.recentOffsets[1] = uint32(offset2)
+	blk.recentOffsets[2] = uint32(offset3)
+	if debug {
+		println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+	}
+}
+
+// EncodeNoHist will encode a block with no history and no following blocks.
+// Most notable difference is that src will not be copied for history and
+// we do not need to check for max match length.
+func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
+	e.ensureHist(len(src))
+	e.Encode(blk, src)
+}
+
+// ResetDict will reset and set a dictionary if not nil
+func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) {
+	e.resetBase(d, singleBlock)
+	if d == nil {
+		return
+	}
+	// Init or copy dict table
+	if len(e.dictTable) != len(e.table) || d.id != e.lastDictID {
+		if len(e.dictTable) != len(e.table) {
+			e.dictTable = make([]prevEntry, len(e.table))
+		}
+		end := int32(len(d.content)) - 8 + e.maxMatchOff
+		for i := e.maxMatchOff; i < end; i += 4 {
+			const hashLog = bestShortTableBits
+
+			cv := load6432(d.content, i-e.maxMatchOff)
+			nextHash := hash4x64(cv, hashLog)      // 0 -> 4
+			nextHash1 := hash4x64(cv>>8, hashLog)  // 1 -> 5
+			nextHash2 := hash4x64(cv>>16, hashLog) // 2 -> 6
+			nextHash3 := hash4x64(cv>>24, hashLog) // 3 -> 7
+			e.dictTable[nextHash] = prevEntry{
+				prev:   e.dictTable[nextHash].offset,
+				offset: i,
+			}
+			e.dictTable[nextHash1] = prevEntry{
+				prev:   e.dictTable[nextHash1].offset,
+				offset: i + 1,
+			}
+			e.dictTable[nextHash2] = prevEntry{
+				prev:   e.dictTable[nextHash2].offset,
+				offset: i + 2,
+			}
+			e.dictTable[nextHash3] = prevEntry{
+				prev:   e.dictTable[nextHash3].offset,
+				offset: i + 3,
+			}
+		}
+		e.lastDictID = d.id
+	}
+
+	// Init or copy dict table
+	if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID {
+		if len(e.dictLongTable) != len(e.longTable) {
+			e.dictLongTable = make([]prevEntry, len(e.longTable))
+		}
+		if len(d.content) >= 8 {
+			cv := load6432(d.content, 0)
+			h := hash8(cv, bestLongTableBits)
+			e.dictLongTable[h] = prevEntry{
+				offset: e.maxMatchOff,
+				prev:   e.dictLongTable[h].offset,
+			}
+
+			end := int32(len(d.content)) - 8 + e.maxMatchOff
+			off := 8 // First to read
+			for i := e.maxMatchOff + 1; i < end; i++ {
+				cv = cv>>8 | (uint64(d.content[off]) << 56)
+				h := hash8(cv, bestLongTableBits)
+				e.dictLongTable[h] = prevEntry{
+					offset: i,
+					prev:   e.dictLongTable[h].offset,
+				}
+				off++
+			}
+		}
+		e.lastDictID = d.id
+	}
+	// Reset table to initial state
+	copy(e.longTable[:], e.dictLongTable)
+
+	e.cur = e.maxMatchOff
+	// Reset table to initial state
+	copy(e.table[:], e.dictTable)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go
new file mode 100644
index 0000000..6049542
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go
@@ -0,0 +1,1235 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import "fmt"
+
+const (
+	betterLongTableBits = 19                       // Bits used in the long match table
+	betterLongTableSize = 1 << betterLongTableBits // Size of the table
+
+	// Note: Increasing the short table bits or making the hash shorter
+	// can actually lead to compression degradation since it will 'steal' more from the
+	// long match table and match offsets are quite big.
+	// This greatly depends on the type of input.
+	betterShortTableBits = 13                        // Bits used in the short match table
+	betterShortTableSize = 1 << betterShortTableBits // Size of the table
+
+	betterLongTableShardCnt  = 1 << (betterLongTableBits - dictShardBits)    // Number of shards in the table
+	betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard
+
+	betterShortTableShardCnt  = 1 << (betterShortTableBits - dictShardBits)     // Number of shards in the table
+	betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard
+)
+
+type prevEntry struct {
+	offset int32
+	prev   int32
+}
+
+// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches.
+// The long match table contains the previous entry with the same hash,
+// effectively making it a "chain" of length 2.
+// When we find a long match we choose between the two values and select the longest.
+// When we find a short match, after checking the long, we check if we can find a long at n+1
+// and that it is longer (lazy matching).
+type betterFastEncoder struct {
+	fastBase
+	table     [betterShortTableSize]tableEntry
+	longTable [betterLongTableSize]prevEntry
+}
+
+type betterFastEncoderDict struct {
+	betterFastEncoder
+	dictTable            []tableEntry
+	dictLongTable        []prevEntry
+	shortTableShardDirty [betterShortTableShardCnt]bool
+	longTableShardDirty  [betterLongTableShardCnt]bool
+	allDirty             bool
+}
+
+// Encode improves compression...
+func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) {
+	const (
+		// Input margin is the number of bytes we read (8)
+		// and the maximum we will read ahead (2)
+		inputMargin            = 8 + 2
+		minNonLiteralBlockSize = 16
+	)
+
+	// Protect against e.cur wraparound.
+	for e.cur >= bufferReset {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = tableEntry{}
+			}
+			for i := range e.longTable[:] {
+				e.longTable[i] = prevEntry{}
+			}
+			e.cur = e.maxMatchOff
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+		for i := range e.table[:] {
+			v := e.table[i].offset
+			if v < minOff {
+				v = 0
+			} else {
+				v = v - e.cur + e.maxMatchOff
+			}
+			e.table[i].offset = v
+		}
+		for i := range e.longTable[:] {
+			v := e.longTable[i].offset
+			v2 := e.longTable[i].prev
+			if v < minOff {
+				v = 0
+				v2 = 0
+			} else {
+				v = v - e.cur + e.maxMatchOff
+				if v2 < minOff {
+					v2 = 0
+				} else {
+					v2 = v2 - e.cur + e.maxMatchOff
+				}
+			}
+			e.longTable[i] = prevEntry{
+				offset: v,
+				prev:   v2,
+			}
+		}
+		e.cur = e.maxMatchOff
+		break
+	}
+
+	s := e.addBlock(src)
+	blk.size = len(src)
+	if len(src) < minNonLiteralBlockSize {
+		blk.extraLits = len(src)
+		blk.literals = blk.literals[:len(src)]
+		copy(blk.literals, src)
+		return
+	}
+
+	// Override src
+	src = e.hist
+	sLimit := int32(len(src)) - inputMargin
+	// stepSize is the number of bytes to skip on every main loop iteration.
+	// It should be >= 1.
+	const stepSize = 1
+
+	const kSearchStrength = 9
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := s
+	cv := load6432(src, s)
+
+	// Relative offsets
+	offset1 := int32(blk.recentOffsets[0])
+	offset2 := int32(blk.recentOffsets[1])
+
+	addLiterals := func(s *seq, until int32) {
+		if until == nextEmit {
+			return
+		}
+		blk.literals = append(blk.literals, src[nextEmit:until]...)
+		s.litLen = uint32(until - nextEmit)
+	}
+	if debug {
+		println("recent offsets:", blk.recentOffsets)
+	}
+
+encodeLoop:
+	for {
+		var t int32
+		// We allow the encoder to optionally turn off repeat offsets across blocks
+		canRepeat := len(blk.sequences) > 2
+		var matched int32
+
+		for {
+			if debugAsserts && canRepeat && offset1 == 0 {
+				panic("offset0 was 0")
+			}
+
+			nextHashS := hash5(cv, betterShortTableBits)
+			nextHashL := hash8(cv, betterLongTableBits)
+			candidateL := e.longTable[nextHashL]
+			candidateS := e.table[nextHashS]
+
+			const repOff = 1
+			repIndex := s - offset1 + repOff
+			off := s + e.cur
+			e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset}
+			e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
+
+			if canRepeat {
+				if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
+					// Consider history as well.
+					var seq seq
+					lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+
+					seq.matchLen = uint32(lenght - zstdMinMatch)
+
+					// We might be able to match backwards.
+					// Extend as long as we can.
+					start := s + repOff
+					// We end the search early, so we don't risk 0 literals
+					// and have to do special offset treatment.
+					startLimit := nextEmit + 1
+
+					tMin := s - e.maxMatchOff
+					if tMin < 0 {
+						tMin = 0
+					}
+					for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+						repIndex--
+						start--
+						seq.matchLen++
+					}
+					addLiterals(&seq, start)
+
+					// rep 0
+					seq.offset = 1
+					if debugSequences {
+						println("repeat sequence", seq, "next s:", s)
+					}
+					blk.sequences = append(blk.sequences, seq)
+
+					// Index match start+1 (long) -> s - 1
+					index0 := s + repOff
+					s += lenght + repOff
+
+					nextEmit = s
+					if s >= sLimit {
+						if debug {
+							println("repeat ended", s, lenght)
+
+						}
+						break encodeLoop
+					}
+					// Index skipped...
+					for index0 < s-1 {
+						cv0 := load6432(src, index0)
+						cv1 := cv0 >> 8
+						h0 := hash8(cv0, betterLongTableBits)
+						off := index0 + e.cur
+						e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+						e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)}
+						index0 += 2
+					}
+					cv = load6432(src, s)
+					continue
+				}
+				const repOff2 = 1
+
+				// We deviate from the reference encoder and also check offset 2.
+				// Still slower and not much better, so disabled.
+				// repIndex = s - offset2 + repOff2
+				if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
+					// Consider history as well.
+					var seq seq
+					lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
+
+					seq.matchLen = uint32(lenght - zstdMinMatch)
+
+					// We might be able to match backwards.
+					// Extend as long as we can.
+					start := s + repOff2
+					// We end the search early, so we don't risk 0 literals
+					// and have to do special offset treatment.
+					startLimit := nextEmit + 1
+
+					tMin := s - e.maxMatchOff
+					if tMin < 0 {
+						tMin = 0
+					}
+					for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+						repIndex--
+						start--
+						seq.matchLen++
+					}
+					addLiterals(&seq, start)
+
+					// rep 2
+					seq.offset = 2
+					if debugSequences {
+						println("repeat sequence 2", seq, "next s:", s)
+					}
+					blk.sequences = append(blk.sequences, seq)
+
+					index0 := s + repOff2
+					s += lenght + repOff2
+					nextEmit = s
+					if s >= sLimit {
+						if debug {
+							println("repeat ended", s, lenght)
+
+						}
+						break encodeLoop
+					}
+
+					// Index skipped...
+					for index0 < s-1 {
+						cv0 := load6432(src, index0)
+						cv1 := cv0 >> 8
+						h0 := hash8(cv0, betterLongTableBits)
+						off := index0 + e.cur
+						e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+						e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)}
+						index0 += 2
+					}
+					cv = load6432(src, s)
+					// Swap offsets
+					offset1, offset2 = offset2, offset1
+					continue
+				}
+			}
+			// Find the offsets of our two matches.
+			coffsetL := candidateL.offset - e.cur
+			coffsetLP := candidateL.prev - e.cur
+
+			// Check if we have a long match.
+			if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
+				// Found a long match, at least 8 bytes.
+				matched = e.matchlen(s+8, coffsetL+8, src) + 8
+				t = coffsetL
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugMatches {
+					println("long match")
+				}
+
+				if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) {
+					// Found a long match, at least 8 bytes.
+					prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8
+					if prevMatch > matched {
+						matched = prevMatch
+						t = coffsetLP
+					}
+					if debugAsserts && s <= t {
+						panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+					}
+					if debugAsserts && s-t > e.maxMatchOff {
+						panic("s - t >e.maxMatchOff")
+					}
+					if debugMatches {
+						println("long match")
+					}
+				}
+				break
+			}
+
+			// Check if we have a long match on prev.
+			if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) {
+				// Found a long match, at least 8 bytes.
+				matched = e.matchlen(s+8, coffsetLP+8, src) + 8
+				t = coffsetLP
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugMatches {
+					println("long match")
+				}
+				break
+			}
+
+			coffsetS := candidateS.offset - e.cur
+
+			// Check if we have a short match.
+			if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
+				// found a regular match
+				matched = e.matchlen(s+4, coffsetS+4, src) + 4
+
+				// See if we can find a long match at s+1
+				const checkAt = 1
+				cv := load6432(src, s+checkAt)
+				nextHashL = hash8(cv, betterLongTableBits)
+				candidateL = e.longTable[nextHashL]
+				coffsetL = candidateL.offset - e.cur
+
+				// We can store it, since we have at least a 4 byte match.
+				e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset}
+				if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
+					// Found a long match, at least 8 bytes.
+					matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8
+					if matchedNext > matched {
+						t = coffsetL
+						s += checkAt
+						matched = matchedNext
+						if debugMatches {
+							println("long match (after short)")
+						}
+						break
+					}
+				}
+
+				// Check prev long...
+				coffsetL = candidateL.prev - e.cur
+				if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
+					// Found a long match, at least 8 bytes.
+					matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8
+					if matchedNext > matched {
+						t = coffsetL
+						s += checkAt
+						matched = matchedNext
+						if debugMatches {
+							println("prev long match (after short)")
+						}
+						break
+					}
+				}
+				t = coffsetS
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugAsserts && t < 0 {
+					panic("t<0")
+				}
+				if debugMatches {
+					println("short match")
+				}
+				break
+			}
+
+			// No match found, move forward in input.
+			s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+			if s >= sLimit {
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+
+		// Try to find a better match by searching for a long match at the end of the current best match
+		if true && s+matched < sLimit {
+			nextHashL := hash8(load6432(src, s+matched), betterLongTableBits)
+			cv := load3232(src, s)
+			candidateL := e.longTable[nextHashL]
+			coffsetL := candidateL.offset - e.cur - matched
+			if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+				// Found a long match, at least 4 bytes.
+				matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+				if matchedNext > matched {
+					t = coffsetL
+					matched = matchedNext
+					if debugMatches {
+						println("long match at end-of-match")
+					}
+				}
+			}
+
+			// Check prev long...
+			if true {
+				coffsetL = candidateL.prev - e.cur - matched
+				if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+					// Found a long match, at least 4 bytes.
+					matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+					if matchedNext > matched {
+						t = coffsetL
+						matched = matchedNext
+						if debugMatches {
+							println("prev long match at end-of-match")
+						}
+					}
+				}
+			}
+		}
+		// A match has been found. Update recent offsets.
+		offset2 = offset1
+		offset1 = s - t
+
+		if debugAsserts && s <= t {
+			panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+		}
+
+		if debugAsserts && canRepeat && int(offset1) > len(src) {
+			panic("invalid offset")
+		}
+
+		// Extend the n-byte match as long as possible.
+		l := matched
+
+		// Extend backwards
+		tMin := s - e.maxMatchOff
+		if tMin < 0 {
+			tMin = 0
+		}
+		for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+			s--
+			t--
+			l++
+		}
+
+		// Write our sequence
+		var seq seq
+		seq.litLen = uint32(s - nextEmit)
+		seq.matchLen = uint32(l - zstdMinMatch)
+		if seq.litLen > 0 {
+			blk.literals = append(blk.literals, src[nextEmit:s]...)
+		}
+		seq.offset = uint32(s-t) + 3
+		s += l
+		if debugSequences {
+			println("sequence", seq, "next s:", s)
+		}
+		blk.sequences = append(blk.sequences, seq)
+		nextEmit = s
+		if s >= sLimit {
+			break encodeLoop
+		}
+
+		// Index match start+1 (long) -> s - 1
+		index0 := s - l + 1
+		for index0 < s-1 {
+			cv0 := load6432(src, index0)
+			cv1 := cv0 >> 8
+			h0 := hash8(cv0, betterLongTableBits)
+			off := index0 + e.cur
+			e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+			e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)}
+			index0 += 2
+		}
+
+		cv = load6432(src, s)
+		if !canRepeat {
+			continue
+		}
+
+		// Check offset 2
+		for {
+			o2 := s - offset2
+			if load3232(src, o2) != uint32(cv) {
+				// Do regular search
+				break
+			}
+
+			// Store this, since we have it.
+			nextHashS := hash5(cv, betterShortTableBits)
+			nextHashL := hash8(cv, betterLongTableBits)
+
+			// We have at least 4 byte match.
+			// No need to check backwards. We come straight from a match
+			l := 4 + e.matchlen(s+4, o2+4, src)
+
+			e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset}
+			e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+			seq.matchLen = uint32(l) - zstdMinMatch
+			seq.litLen = 0
+
+			// Since litlen is always 0, this is offset 1.
+			seq.offset = 1
+			s += l
+			nextEmit = s
+			if debugSequences {
+				println("sequence", seq, "next s:", s)
+			}
+			blk.sequences = append(blk.sequences, seq)
+
+			// Swap offset 1 and 2.
+			offset1, offset2 = offset2, offset1
+			if s >= sLimit {
+				// Finished
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+	}
+
+	if int(nextEmit) < len(src) {
+		blk.literals = append(blk.literals, src[nextEmit:]...)
+		blk.extraLits = len(src) - int(nextEmit)
+	}
+	blk.recentOffsets[0] = uint32(offset1)
+	blk.recentOffsets[1] = uint32(offset2)
+	if debug {
+		println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+	}
+}
+
+// EncodeNoHist will encode a block with no history and no following blocks.
+// Most notable difference is that src will not be copied for history and
+// we do not need to check for max match length.
+func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
+	e.ensureHist(len(src))
+	e.Encode(blk, src)
+}
+
+// Encode improves compression...
+func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) {
+	const (
+		// Input margin is the number of bytes we read (8)
+		// and the maximum we will read ahead (2)
+		inputMargin            = 8 + 2
+		minNonLiteralBlockSize = 16
+	)
+
+	// Protect against e.cur wraparound.
+	for e.cur >= bufferReset {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = tableEntry{}
+			}
+			for i := range e.longTable[:] {
+				e.longTable[i] = prevEntry{}
+			}
+			e.cur = e.maxMatchOff
+			e.allDirty = true
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+		for i := range e.table[:] {
+			v := e.table[i].offset
+			if v < minOff {
+				v = 0
+			} else {
+				v = v - e.cur + e.maxMatchOff
+			}
+			e.table[i].offset = v
+		}
+		for i := range e.longTable[:] {
+			v := e.longTable[i].offset
+			v2 := e.longTable[i].prev
+			if v < minOff {
+				v = 0
+				v2 = 0
+			} else {
+				v = v - e.cur + e.maxMatchOff
+				if v2 < minOff {
+					v2 = 0
+				} else {
+					v2 = v2 - e.cur + e.maxMatchOff
+				}
+			}
+			e.longTable[i] = prevEntry{
+				offset: v,
+				prev:   v2,
+			}
+		}
+		e.allDirty = true
+		e.cur = e.maxMatchOff
+		break
+	}
+
+	s := e.addBlock(src)
+	blk.size = len(src)
+	if len(src) < minNonLiteralBlockSize {
+		blk.extraLits = len(src)
+		blk.literals = blk.literals[:len(src)]
+		copy(blk.literals, src)
+		return
+	}
+
+	// Override src
+	src = e.hist
+	sLimit := int32(len(src)) - inputMargin
+	// stepSize is the number of bytes to skip on every main loop iteration.
+	// It should be >= 1.
+	const stepSize = 1
+
+	const kSearchStrength = 9
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := s
+	cv := load6432(src, s)
+
+	// Relative offsets
+	offset1 := int32(blk.recentOffsets[0])
+	offset2 := int32(blk.recentOffsets[1])
+
+	addLiterals := func(s *seq, until int32) {
+		if until == nextEmit {
+			return
+		}
+		blk.literals = append(blk.literals, src[nextEmit:until]...)
+		s.litLen = uint32(until - nextEmit)
+	}
+	if debug {
+		println("recent offsets:", blk.recentOffsets)
+	}
+
+encodeLoop:
+	for {
+		var t int32
+		// We allow the encoder to optionally turn off repeat offsets across blocks
+		canRepeat := len(blk.sequences) > 2
+		var matched int32
+
+		for {
+			if debugAsserts && canRepeat && offset1 == 0 {
+				panic("offset0 was 0")
+			}
+
+			nextHashS := hash5(cv, betterShortTableBits)
+			nextHashL := hash8(cv, betterLongTableBits)
+			candidateL := e.longTable[nextHashL]
+			candidateS := e.table[nextHashS]
+
+			const repOff = 1
+			repIndex := s - offset1 + repOff
+			off := s + e.cur
+			e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset}
+			e.markLongShardDirty(nextHashL)
+			e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
+			e.markShortShardDirty(nextHashS)
+
+			if canRepeat {
+				if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
+					// Consider history as well.
+					var seq seq
+					lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+
+					seq.matchLen = uint32(lenght - zstdMinMatch)
+
+					// We might be able to match backwards.
+					// Extend as long as we can.
+					start := s + repOff
+					// We end the search early, so we don't risk 0 literals
+					// and have to do special offset treatment.
+					startLimit := nextEmit + 1
+
+					tMin := s - e.maxMatchOff
+					if tMin < 0 {
+						tMin = 0
+					}
+					for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+						repIndex--
+						start--
+						seq.matchLen++
+					}
+					addLiterals(&seq, start)
+
+					// rep 0
+					seq.offset = 1
+					if debugSequences {
+						println("repeat sequence", seq, "next s:", s)
+					}
+					blk.sequences = append(blk.sequences, seq)
+
+					// Index match start+1 (long) -> s - 1
+					index0 := s + repOff
+					s += lenght + repOff
+
+					nextEmit = s
+					if s >= sLimit {
+						if debug {
+							println("repeat ended", s, lenght)
+
+						}
+						break encodeLoop
+					}
+					// Index skipped...
+					for index0 < s-1 {
+						cv0 := load6432(src, index0)
+						cv1 := cv0 >> 8
+						h0 := hash8(cv0, betterLongTableBits)
+						off := index0 + e.cur
+						e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+						e.markLongShardDirty(h0)
+						h1 := hash5(cv1, betterShortTableBits)
+						e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
+						e.markShortShardDirty(h1)
+						index0 += 2
+					}
+					cv = load6432(src, s)
+					continue
+				}
+				const repOff2 = 1
+
+				// We deviate from the reference encoder and also check offset 2.
+				// Still slower and not much better, so disabled.
+				// repIndex = s - offset2 + repOff2
+				if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
+					// Consider history as well.
+					var seq seq
+					lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
+
+					seq.matchLen = uint32(lenght - zstdMinMatch)
+
+					// We might be able to match backwards.
+					// Extend as long as we can.
+					start := s + repOff2
+					// We end the search early, so we don't risk 0 literals
+					// and have to do special offset treatment.
+					startLimit := nextEmit + 1
+
+					tMin := s - e.maxMatchOff
+					if tMin < 0 {
+						tMin = 0
+					}
+					for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+						repIndex--
+						start--
+						seq.matchLen++
+					}
+					addLiterals(&seq, start)
+
+					// rep 2
+					seq.offset = 2
+					if debugSequences {
+						println("repeat sequence 2", seq, "next s:", s)
+					}
+					blk.sequences = append(blk.sequences, seq)
+
+					index0 := s + repOff2
+					s += lenght + repOff2
+					nextEmit = s
+					if s >= sLimit {
+						if debug {
+							println("repeat ended", s, lenght)
+
+						}
+						break encodeLoop
+					}
+
+					// Index skipped...
+					for index0 < s-1 {
+						cv0 := load6432(src, index0)
+						cv1 := cv0 >> 8
+						h0 := hash8(cv0, betterLongTableBits)
+						off := index0 + e.cur
+						e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+						e.markLongShardDirty(h0)
+						h1 := hash5(cv1, betterShortTableBits)
+						e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
+						e.markShortShardDirty(h1)
+						index0 += 2
+					}
+					cv = load6432(src, s)
+					// Swap offsets
+					offset1, offset2 = offset2, offset1
+					continue
+				}
+			}
+			// Find the offsets of our two matches.
+			coffsetL := candidateL.offset - e.cur
+			coffsetLP := candidateL.prev - e.cur
+
+			// Check if we have a long match.
+			if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
+				// Found a long match, at least 8 bytes.
+				matched = e.matchlen(s+8, coffsetL+8, src) + 8
+				t = coffsetL
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugMatches {
+					println("long match")
+				}
+
+				if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) {
+					// Found a long match, at least 8 bytes.
+					prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8
+					if prevMatch > matched {
+						matched = prevMatch
+						t = coffsetLP
+					}
+					if debugAsserts && s <= t {
+						panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+					}
+					if debugAsserts && s-t > e.maxMatchOff {
+						panic("s - t >e.maxMatchOff")
+					}
+					if debugMatches {
+						println("long match")
+					}
+				}
+				break
+			}
+
+			// Check if we have a long match on prev.
+			if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) {
+				// Found a long match, at least 8 bytes.
+				matched = e.matchlen(s+8, coffsetLP+8, src) + 8
+				t = coffsetLP
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugMatches {
+					println("long match")
+				}
+				break
+			}
+
+			coffsetS := candidateS.offset - e.cur
+
+			// Check if we have a short match.
+			if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
+				// found a regular match
+				matched = e.matchlen(s+4, coffsetS+4, src) + 4
+
+				// See if we can find a long match at s+1
+				const checkAt = 1
+				cv := load6432(src, s+checkAt)
+				nextHashL = hash8(cv, betterLongTableBits)
+				candidateL = e.longTable[nextHashL]
+				coffsetL = candidateL.offset - e.cur
+
+				// We can store it, since we have at least a 4 byte match.
+				e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset}
+				e.markLongShardDirty(nextHashL)
+				if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
+					// Found a long match, at least 8 bytes.
+					matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8
+					if matchedNext > matched {
+						t = coffsetL
+						s += checkAt
+						matched = matchedNext
+						if debugMatches {
+							println("long match (after short)")
+						}
+						break
+					}
+				}
+
+				// Check prev long...
+				coffsetL = candidateL.prev - e.cur
+				if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
+					// Found a long match, at least 8 bytes.
+					matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8
+					if matchedNext > matched {
+						t = coffsetL
+						s += checkAt
+						matched = matchedNext
+						if debugMatches {
+							println("prev long match (after short)")
+						}
+						break
+					}
+				}
+				t = coffsetS
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugAsserts && t < 0 {
+					panic("t<0")
+				}
+				if debugMatches {
+					println("short match")
+				}
+				break
+			}
+
+			// No match found, move forward in input.
+			s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+			if s >= sLimit {
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+		// Try to find a better match by searching for a long match at the end of the current best match
+		if s+matched < sLimit {
+			nextHashL := hash8(load6432(src, s+matched), betterLongTableBits)
+			cv := load3232(src, s)
+			candidateL := e.longTable[nextHashL]
+			coffsetL := candidateL.offset - e.cur - matched
+			if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+				// Found a long match, at least 4 bytes.
+				matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+				if matchedNext > matched {
+					t = coffsetL
+					matched = matchedNext
+					if debugMatches {
+						println("long match at end-of-match")
+					}
+				}
+			}
+
+			// Check prev long...
+			if true {
+				coffsetL = candidateL.prev - e.cur - matched
+				if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+					// Found a long match, at least 4 bytes.
+					matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+					if matchedNext > matched {
+						t = coffsetL
+						matched = matchedNext
+						if debugMatches {
+							println("prev long match at end-of-match")
+						}
+					}
+				}
+			}
+		}
+		// A match has been found. Update recent offsets.
+		offset2 = offset1
+		offset1 = s - t
+
+		if debugAsserts && s <= t {
+			panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+		}
+
+		if debugAsserts && canRepeat && int(offset1) > len(src) {
+			panic("invalid offset")
+		}
+
+		// Extend the n-byte match as long as possible.
+		l := matched
+
+		// Extend backwards
+		tMin := s - e.maxMatchOff
+		if tMin < 0 {
+			tMin = 0
+		}
+		for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+			s--
+			t--
+			l++
+		}
+
+		// Write our sequence
+		var seq seq
+		seq.litLen = uint32(s - nextEmit)
+		seq.matchLen = uint32(l - zstdMinMatch)
+		if seq.litLen > 0 {
+			blk.literals = append(blk.literals, src[nextEmit:s]...)
+		}
+		seq.offset = uint32(s-t) + 3
+		s += l
+		if debugSequences {
+			println("sequence", seq, "next s:", s)
+		}
+		blk.sequences = append(blk.sequences, seq)
+		nextEmit = s
+		if s >= sLimit {
+			break encodeLoop
+		}
+
+		// Index match start+1 (long) -> s - 1
+		index0 := s - l + 1
+		for index0 < s-1 {
+			cv0 := load6432(src, index0)
+			cv1 := cv0 >> 8
+			h0 := hash8(cv0, betterLongTableBits)
+			off := index0 + e.cur
+			e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+			e.markLongShardDirty(h0)
+			h1 := hash5(cv1, betterShortTableBits)
+			e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
+			e.markShortShardDirty(h1)
+			index0 += 2
+		}
+
+		cv = load6432(src, s)
+		if !canRepeat {
+			continue
+		}
+
+		// Check offset 2
+		for {
+			o2 := s - offset2
+			if load3232(src, o2) != uint32(cv) {
+				// Do regular search
+				break
+			}
+
+			// Store this, since we have it.
+			nextHashS := hash5(cv, betterShortTableBits)
+			nextHashL := hash8(cv, betterLongTableBits)
+
+			// We have at least 4 byte match.
+			// No need to check backwards. We come straight from a match
+			l := 4 + e.matchlen(s+4, o2+4, src)
+
+			e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset}
+			e.markLongShardDirty(nextHashL)
+			e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.markShortShardDirty(nextHashS)
+			seq.matchLen = uint32(l) - zstdMinMatch
+			seq.litLen = 0
+
+			// Since litlen is always 0, this is offset 1.
+			seq.offset = 1
+			s += l
+			nextEmit = s
+			if debugSequences {
+				println("sequence", seq, "next s:", s)
+			}
+			blk.sequences = append(blk.sequences, seq)
+
+			// Swap offset 1 and 2.
+			offset1, offset2 = offset2, offset1
+			if s >= sLimit {
+				// Finished
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+	}
+
+	if int(nextEmit) < len(src) {
+		blk.literals = append(blk.literals, src[nextEmit:]...)
+		blk.extraLits = len(src) - int(nextEmit)
+	}
+	blk.recentOffsets[0] = uint32(offset1)
+	blk.recentOffsets[1] = uint32(offset2)
+	if debug {
+		println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+	}
+}
+
+// ResetDict will reset and set a dictionary if not nil
+func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) {
+	e.resetBase(d, singleBlock)
+	if d != nil {
+		panic("betterFastEncoder: Reset with dict")
+	}
+}
+
+// ResetDict will reset and set a dictionary if not nil
+func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) {
+	e.resetBase(d, singleBlock)
+	if d == nil {
+		return
+	}
+	// Init or copy dict table
+	if len(e.dictTable) != len(e.table) || d.id != e.lastDictID {
+		if len(e.dictTable) != len(e.table) {
+			e.dictTable = make([]tableEntry, len(e.table))
+		}
+		end := int32(len(d.content)) - 8 + e.maxMatchOff
+		for i := e.maxMatchOff; i < end; i += 4 {
+			const hashLog = betterShortTableBits
+
+			cv := load6432(d.content, i-e.maxMatchOff)
+			nextHash := hash5(cv, hashLog)      // 0 -> 4
+			nextHash1 := hash5(cv>>8, hashLog)  // 1 -> 5
+			nextHash2 := hash5(cv>>16, hashLog) // 2 -> 6
+			nextHash3 := hash5(cv>>24, hashLog) // 3 -> 7
+			e.dictTable[nextHash] = tableEntry{
+				val:    uint32(cv),
+				offset: i,
+			}
+			e.dictTable[nextHash1] = tableEntry{
+				val:    uint32(cv >> 8),
+				offset: i + 1,
+			}
+			e.dictTable[nextHash2] = tableEntry{
+				val:    uint32(cv >> 16),
+				offset: i + 2,
+			}
+			e.dictTable[nextHash3] = tableEntry{
+				val:    uint32(cv >> 24),
+				offset: i + 3,
+			}
+		}
+		e.lastDictID = d.id
+		e.allDirty = true
+	}
+
+	// Init or copy dict table
+	if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID {
+		if len(e.dictLongTable) != len(e.longTable) {
+			e.dictLongTable = make([]prevEntry, len(e.longTable))
+		}
+		if len(d.content) >= 8 {
+			cv := load6432(d.content, 0)
+			h := hash8(cv, betterLongTableBits)
+			e.dictLongTable[h] = prevEntry{
+				offset: e.maxMatchOff,
+				prev:   e.dictLongTable[h].offset,
+			}
+
+			end := int32(len(d.content)) - 8 + e.maxMatchOff
+			off := 8 // First to read
+			for i := e.maxMatchOff + 1; i < end; i++ {
+				cv = cv>>8 | (uint64(d.content[off]) << 56)
+				h := hash8(cv, betterLongTableBits)
+				e.dictLongTable[h] = prevEntry{
+					offset: i,
+					prev:   e.dictLongTable[h].offset,
+				}
+				off++
+			}
+		}
+		e.lastDictID = d.id
+		e.allDirty = true
+	}
+
+	// Reset table to initial state
+	{
+		dirtyShardCnt := 0
+		if !e.allDirty {
+			for i := range e.shortTableShardDirty {
+				if e.shortTableShardDirty[i] {
+					dirtyShardCnt++
+				}
+			}
+		}
+		const shardCnt = betterShortTableShardCnt
+		const shardSize = betterShortTableShardSize
+		if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
+			copy(e.table[:], e.dictTable)
+			for i := range e.shortTableShardDirty {
+				e.shortTableShardDirty[i] = false
+			}
+		} else {
+			for i := range e.shortTableShardDirty {
+				if !e.shortTableShardDirty[i] {
+					continue
+				}
+
+				copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
+				e.shortTableShardDirty[i] = false
+			}
+		}
+	}
+	{
+		dirtyShardCnt := 0
+		if !e.allDirty {
+			for i := range e.shortTableShardDirty {
+				if e.shortTableShardDirty[i] {
+					dirtyShardCnt++
+				}
+			}
+		}
+		const shardCnt = betterLongTableShardCnt
+		const shardSize = betterLongTableShardSize
+		if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
+			copy(e.longTable[:], e.dictLongTable)
+			for i := range e.longTableShardDirty {
+				e.longTableShardDirty[i] = false
+			}
+		} else {
+			for i := range e.longTableShardDirty {
+				if !e.longTableShardDirty[i] {
+					continue
+				}
+
+				copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize])
+				e.longTableShardDirty[i] = false
+			}
+		}
+	}
+	e.cur = e.maxMatchOff
+	e.allDirty = false
+}
+
+func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) {
+	e.longTableShardDirty[entryNum/betterLongTableShardSize] = true
+}
+
+func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) {
+	e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
new file mode 100644
index 0000000..8629d43
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
@@ -0,0 +1,1121 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import "fmt"
+
+const (
+	dFastLongTableBits = 17                      // Bits used in the long match table
+	dFastLongTableSize = 1 << dFastLongTableBits // Size of the table
+	dFastLongTableMask = dFastLongTableSize - 1  // Mask for table indices. Redundant, but can eliminate bounds checks.
+
+	dLongTableShardCnt  = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table
+	dLongTableShardSize = dFastLongTableSize / tableShardCnt        // Size of an individual shard
+
+	dFastShortTableBits = tableBits                // Bits used in the short match table
+	dFastShortTableSize = 1 << dFastShortTableBits // Size of the table
+	dFastShortTableMask = dFastShortTableSize - 1  // Mask for table indices. Redundant, but can eliminate bounds checks.
+)
+
+type doubleFastEncoder struct {
+	fastEncoder
+	longTable [dFastLongTableSize]tableEntry
+}
+
+type doubleFastEncoderDict struct {
+	fastEncoderDict
+	longTable           [dFastLongTableSize]tableEntry
+	dictLongTable       []tableEntry
+	longTableShardDirty [dLongTableShardCnt]bool
+}
+
+// Encode mimmics functionality in zstd_dfast.c
+func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) {
+	const (
+		// Input margin is the number of bytes we read (8)
+		// and the maximum we will read ahead (2)
+		inputMargin            = 8 + 2
+		minNonLiteralBlockSize = 16
+	)
+
+	// Protect against e.cur wraparound.
+	for e.cur >= bufferReset {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = tableEntry{}
+			}
+			for i := range e.longTable[:] {
+				e.longTable[i] = tableEntry{}
+			}
+			e.cur = e.maxMatchOff
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+		for i := range e.table[:] {
+			v := e.table[i].offset
+			if v < minOff {
+				v = 0
+			} else {
+				v = v - e.cur + e.maxMatchOff
+			}
+			e.table[i].offset = v
+		}
+		for i := range e.longTable[:] {
+			v := e.longTable[i].offset
+			if v < minOff {
+				v = 0
+			} else {
+				v = v - e.cur + e.maxMatchOff
+			}
+			e.longTable[i].offset = v
+		}
+		e.cur = e.maxMatchOff
+		break
+	}
+
+	s := e.addBlock(src)
+	blk.size = len(src)
+	if len(src) < minNonLiteralBlockSize {
+		blk.extraLits = len(src)
+		blk.literals = blk.literals[:len(src)]
+		copy(blk.literals, src)
+		return
+	}
+
+	// Override src
+	src = e.hist
+	sLimit := int32(len(src)) - inputMargin
+	// stepSize is the number of bytes to skip on every main loop iteration.
+	// It should be >= 1.
+	const stepSize = 1
+
+	const kSearchStrength = 8
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := s
+	cv := load6432(src, s)
+
+	// Relative offsets
+	offset1 := int32(blk.recentOffsets[0])
+	offset2 := int32(blk.recentOffsets[1])
+
+	addLiterals := func(s *seq, until int32) {
+		if until == nextEmit {
+			return
+		}
+		blk.literals = append(blk.literals, src[nextEmit:until]...)
+		s.litLen = uint32(until - nextEmit)
+	}
+	if debug {
+		println("recent offsets:", blk.recentOffsets)
+	}
+
+encodeLoop:
+	for {
+		var t int32
+		// We allow the encoder to optionally turn off repeat offsets across blocks
+		canRepeat := len(blk.sequences) > 2
+
+		for {
+			if debugAsserts && canRepeat && offset1 == 0 {
+				panic("offset0 was 0")
+			}
+
+			nextHashS := hash5(cv, dFastShortTableBits)
+			nextHashL := hash8(cv, dFastLongTableBits)
+			candidateL := e.longTable[nextHashL]
+			candidateS := e.table[nextHashS]
+
+			const repOff = 1
+			repIndex := s - offset1 + repOff
+			entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.longTable[nextHashL] = entry
+			e.table[nextHashS] = entry
+
+			if canRepeat {
+				if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
+					// Consider history as well.
+					var seq seq
+					lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+
+					seq.matchLen = uint32(lenght - zstdMinMatch)
+
+					// We might be able to match backwards.
+					// Extend as long as we can.
+					start := s + repOff
+					// We end the search early, so we don't risk 0 literals
+					// and have to do special offset treatment.
+					startLimit := nextEmit + 1
+
+					tMin := s - e.maxMatchOff
+					if tMin < 0 {
+						tMin = 0
+					}
+					for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+						repIndex--
+						start--
+						seq.matchLen++
+					}
+					addLiterals(&seq, start)
+
+					// rep 0
+					seq.offset = 1
+					if debugSequences {
+						println("repeat sequence", seq, "next s:", s)
+					}
+					blk.sequences = append(blk.sequences, seq)
+					s += lenght + repOff
+					nextEmit = s
+					if s >= sLimit {
+						if debug {
+							println("repeat ended", s, lenght)
+
+						}
+						break encodeLoop
+					}
+					cv = load6432(src, s)
+					continue
+				}
+			}
+			// Find the offsets of our two matches.
+			coffsetL := s - (candidateL.offset - e.cur)
+			coffsetS := s - (candidateS.offset - e.cur)
+
+			// Check if we have a long match.
+			if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+				// Found a long match, likely at least 8 bytes.
+				// Reference encoder checks all 8 bytes, we only check 4,
+				// but the likelihood of both the first 4 bytes and the hash matching should be enough.
+				t = candidateL.offset - e.cur
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugMatches {
+					println("long match")
+				}
+				break
+			}
+
+			// Check if we have a short match.
+			if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
+				// found a regular match
+				// See if we can find a long match at s+1
+				const checkAt = 1
+				cv := load6432(src, s+checkAt)
+				nextHashL = hash8(cv, dFastLongTableBits)
+				candidateL = e.longTable[nextHashL]
+				coffsetL = s - (candidateL.offset - e.cur) + checkAt
+
+				// We can store it, since we have at least a 4 byte match.
+				e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)}
+				if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+					// Found a long match, likely at least 8 bytes.
+					// Reference encoder checks all 8 bytes, we only check 4,
+					// but the likelihood of both the first 4 bytes and the hash matching should be enough.
+					t = candidateL.offset - e.cur
+					s += checkAt
+					if debugMatches {
+						println("long match (after short)")
+					}
+					break
+				}
+
+				t = candidateS.offset - e.cur
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugAsserts && t < 0 {
+					panic("t<0")
+				}
+				if debugMatches {
+					println("short match")
+				}
+				break
+			}
+
+			// No match found, move forward in input.
+			s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+			if s >= sLimit {
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+
+		// A 4-byte match has been found. Update recent offsets.
+		// We'll later see if more than 4 bytes.
+		offset2 = offset1
+		offset1 = s - t
+
+		if debugAsserts && s <= t {
+			panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+		}
+
+		if debugAsserts && canRepeat && int(offset1) > len(src) {
+			panic("invalid offset")
+		}
+
+		// Extend the 4-byte match as long as possible.
+		l := e.matchlen(s+4, t+4, src) + 4
+
+		// Extend backwards
+		tMin := s - e.maxMatchOff
+		if tMin < 0 {
+			tMin = 0
+		}
+		for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+			s--
+			t--
+			l++
+		}
+
+		// Write our sequence
+		var seq seq
+		seq.litLen = uint32(s - nextEmit)
+		seq.matchLen = uint32(l - zstdMinMatch)
+		if seq.litLen > 0 {
+			blk.literals = append(blk.literals, src[nextEmit:s]...)
+		}
+		seq.offset = uint32(s-t) + 3
+		s += l
+		if debugSequences {
+			println("sequence", seq, "next s:", s)
+		}
+		blk.sequences = append(blk.sequences, seq)
+		nextEmit = s
+		if s >= sLimit {
+			break encodeLoop
+		}
+
+		// Index match start+1 (long) and start+2 (short)
+		index0 := s - l + 1
+		// Index match end-2 (long) and end-1 (short)
+		index1 := s - 2
+
+		cv0 := load6432(src, index0)
+		cv1 := load6432(src, index1)
+		te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
+		te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
+		e.longTable[hash8(cv0, dFastLongTableBits)] = te0
+		e.longTable[hash8(cv1, dFastLongTableBits)] = te1
+		cv0 >>= 8
+		cv1 >>= 8
+		te0.offset++
+		te1.offset++
+		te0.val = uint32(cv0)
+		te1.val = uint32(cv1)
+		e.table[hash5(cv0, dFastShortTableBits)] = te0
+		e.table[hash5(cv1, dFastShortTableBits)] = te1
+
+		cv = load6432(src, s)
+
+		if !canRepeat {
+			continue
+		}
+
+		// Check offset 2
+		for {
+			o2 := s - offset2
+			if load3232(src, o2) != uint32(cv) {
+				// Do regular search
+				break
+			}
+
+			// Store this, since we have it.
+			nextHashS := hash5(cv, dFastShortTableBits)
+			nextHashL := hash8(cv, dFastLongTableBits)
+
+			// We have at least 4 byte match.
+			// No need to check backwards. We come straight from a match
+			l := 4 + e.matchlen(s+4, o2+4, src)
+
+			entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.longTable[nextHashL] = entry
+			e.table[nextHashS] = entry
+			seq.matchLen = uint32(l) - zstdMinMatch
+			seq.litLen = 0
+
+			// Since litlen is always 0, this is offset 1.
+			seq.offset = 1
+			s += l
+			nextEmit = s
+			if debugSequences {
+				println("sequence", seq, "next s:", s)
+			}
+			blk.sequences = append(blk.sequences, seq)
+
+			// Swap offset 1 and 2.
+			offset1, offset2 = offset2, offset1
+			if s >= sLimit {
+				// Finished
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+	}
+
+	if int(nextEmit) < len(src) {
+		blk.literals = append(blk.literals, src[nextEmit:]...)
+		blk.extraLits = len(src) - int(nextEmit)
+	}
+	blk.recentOffsets[0] = uint32(offset1)
+	blk.recentOffsets[1] = uint32(offset2)
+	if debug {
+		println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+	}
+}
+
+// EncodeNoHist will encode a block with no history and no following blocks.
+// Most notable difference is that src will not be copied for history and
+// we do not need to check for max match length.
+func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
+	const (
+		// Input margin is the number of bytes we read (8)
+		// and the maximum we will read ahead (2)
+		inputMargin            = 8 + 2
+		minNonLiteralBlockSize = 16
+	)
+
+	// Protect against e.cur wraparound.
+	if e.cur >= bufferReset {
+		for i := range e.table[:] {
+			e.table[i] = tableEntry{}
+		}
+		for i := range e.longTable[:] {
+			e.longTable[i] = tableEntry{}
+		}
+		e.cur = e.maxMatchOff
+	}
+
+	s := int32(0)
+	blk.size = len(src)
+	if len(src) < minNonLiteralBlockSize {
+		blk.extraLits = len(src)
+		blk.literals = blk.literals[:len(src)]
+		copy(blk.literals, src)
+		return
+	}
+
+	// Override src
+	sLimit := int32(len(src)) - inputMargin
+	// stepSize is the number of bytes to skip on every main loop iteration.
+	// It should be >= 1.
+	const stepSize = 1
+
+	const kSearchStrength = 8
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := s
+	cv := load6432(src, s)
+
+	// Relative offsets
+	offset1 := int32(blk.recentOffsets[0])
+	offset2 := int32(blk.recentOffsets[1])
+
+	addLiterals := func(s *seq, until int32) {
+		if until == nextEmit {
+			return
+		}
+		blk.literals = append(blk.literals, src[nextEmit:until]...)
+		s.litLen = uint32(until - nextEmit)
+	}
+	if debug {
+		println("recent offsets:", blk.recentOffsets)
+	}
+
+encodeLoop:
+	for {
+		var t int32
+		for {
+
+			nextHashS := hash5(cv, dFastShortTableBits)
+			nextHashL := hash8(cv, dFastLongTableBits)
+			candidateL := e.longTable[nextHashL]
+			candidateS := e.table[nextHashS]
+
+			const repOff = 1
+			repIndex := s - offset1 + repOff
+			entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.longTable[nextHashL] = entry
+			e.table[nextHashS] = entry
+
+			if len(blk.sequences) > 2 {
+				if load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
+					// Consider history as well.
+					var seq seq
+					//length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+					length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:]))
+
+					seq.matchLen = uint32(length - zstdMinMatch)
+
+					// We might be able to match backwards.
+					// Extend as long as we can.
+					start := s + repOff
+					// We end the search early, so we don't risk 0 literals
+					// and have to do special offset treatment.
+					startLimit := nextEmit + 1
+
+					tMin := s - e.maxMatchOff
+					if tMin < 0 {
+						tMin = 0
+					}
+					for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] {
+						repIndex--
+						start--
+						seq.matchLen++
+					}
+					addLiterals(&seq, start)
+
+					// rep 0
+					seq.offset = 1
+					if debugSequences {
+						println("repeat sequence", seq, "next s:", s)
+					}
+					blk.sequences = append(blk.sequences, seq)
+					s += length + repOff
+					nextEmit = s
+					if s >= sLimit {
+						if debug {
+							println("repeat ended", s, length)
+
+						}
+						break encodeLoop
+					}
+					cv = load6432(src, s)
+					continue
+				}
+			}
+			// Find the offsets of our two matches.
+			coffsetL := s - (candidateL.offset - e.cur)
+			coffsetS := s - (candidateS.offset - e.cur)
+
+			// Check if we have a long match.
+			if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+				// Found a long match, likely at least 8 bytes.
+				// Reference encoder checks all 8 bytes, we only check 4,
+				// but the likelihood of both the first 4 bytes and the hash matching should be enough.
+				t = candidateL.offset - e.cur
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugMatches {
+					println("long match")
+				}
+				break
+			}
+
+			// Check if we have a short match.
+			if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
+				// found a regular match
+				// See if we can find a long match at s+1
+				const checkAt = 1
+				cv := load6432(src, s+checkAt)
+				nextHashL = hash8(cv, dFastLongTableBits)
+				candidateL = e.longTable[nextHashL]
+				coffsetL = s - (candidateL.offset - e.cur) + checkAt
+
+				// We can store it, since we have at least a 4 byte match.
+				e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)}
+				if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+					// Found a long match, likely at least 8 bytes.
+					// Reference encoder checks all 8 bytes, we only check 4,
+					// but the likelihood of both the first 4 bytes and the hash matching should be enough.
+					t = candidateL.offset - e.cur
+					s += checkAt
+					if debugMatches {
+						println("long match (after short)")
+					}
+					break
+				}
+
+				t = candidateS.offset - e.cur
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugAsserts && t < 0 {
+					panic("t<0")
+				}
+				if debugMatches {
+					println("short match")
+				}
+				break
+			}
+
+			// No match found, move forward in input.
+			s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+			if s >= sLimit {
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+
+		// A 4-byte match has been found. Update recent offsets.
+		// We'll later see if more than 4 bytes.
+		offset2 = offset1
+		offset1 = s - t
+
+		if debugAsserts && s <= t {
+			panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+		}
+
+		// Extend the 4-byte match as long as possible.
+		//l := e.matchlen(s+4, t+4, src) + 4
+		l := int32(matchLen(src[s+4:], src[t+4:])) + 4
+
+		// Extend backwards
+		tMin := s - e.maxMatchOff
+		if tMin < 0 {
+			tMin = 0
+		}
+		for t > tMin && s > nextEmit && src[t-1] == src[s-1] {
+			s--
+			t--
+			l++
+		}
+
+		// Write our sequence
+		var seq seq
+		seq.litLen = uint32(s - nextEmit)
+		seq.matchLen = uint32(l - zstdMinMatch)
+		if seq.litLen > 0 {
+			blk.literals = append(blk.literals, src[nextEmit:s]...)
+		}
+		seq.offset = uint32(s-t) + 3
+		s += l
+		if debugSequences {
+			println("sequence", seq, "next s:", s)
+		}
+		blk.sequences = append(blk.sequences, seq)
+		nextEmit = s
+		if s >= sLimit {
+			break encodeLoop
+		}
+
+		// Index match start+1 (long) and start+2 (short)
+		index0 := s - l + 1
+		// Index match end-2 (long) and end-1 (short)
+		index1 := s - 2
+
+		cv0 := load6432(src, index0)
+		cv1 := load6432(src, index1)
+		te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
+		te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
+		e.longTable[hash8(cv0, dFastLongTableBits)] = te0
+		e.longTable[hash8(cv1, dFastLongTableBits)] = te1
+		cv0 >>= 8
+		cv1 >>= 8
+		te0.offset++
+		te1.offset++
+		te0.val = uint32(cv0)
+		te1.val = uint32(cv1)
+		e.table[hash5(cv0, dFastShortTableBits)] = te0
+		e.table[hash5(cv1, dFastShortTableBits)] = te1
+
+		cv = load6432(src, s)
+
+		if len(blk.sequences) <= 2 {
+			continue
+		}
+
+		// Check offset 2
+		for {
+			o2 := s - offset2
+			if load3232(src, o2) != uint32(cv) {
+				// Do regular search
+				break
+			}
+
+			// Store this, since we have it.
+			nextHashS := hash5(cv1>>8, dFastShortTableBits)
+			nextHashL := hash8(cv, dFastLongTableBits)
+
+			// We have at least 4 byte match.
+			// No need to check backwards. We come straight from a match
+			//l := 4 + e.matchlen(s+4, o2+4, src)
+			l := 4 + int32(matchLen(src[s+4:], src[o2+4:]))
+
+			entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.longTable[nextHashL] = entry
+			e.table[nextHashS] = entry
+			seq.matchLen = uint32(l) - zstdMinMatch
+			seq.litLen = 0
+
+			// Since litlen is always 0, this is offset 1.
+			seq.offset = 1
+			s += l
+			nextEmit = s
+			if debugSequences {
+				println("sequence", seq, "next s:", s)
+			}
+			blk.sequences = append(blk.sequences, seq)
+
+			// Swap offset 1 and 2.
+			offset1, offset2 = offset2, offset1
+			if s >= sLimit {
+				// Finished
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+	}
+
+	if int(nextEmit) < len(src) {
+		blk.literals = append(blk.literals, src[nextEmit:]...)
+		blk.extraLits = len(src) - int(nextEmit)
+	}
+	if debug {
+		println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+	}
+
+	// We do not store history, so we must offset e.cur to avoid false matches for next user.
+	if e.cur < bufferReset {
+		e.cur += int32(len(src))
+	}
+}
+
+// Encode will encode the content, with a dictionary if initialized for it.
+func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) {
+	const (
+		// Input margin is the number of bytes we read (8)
+		// and the maximum we will read ahead (2)
+		inputMargin            = 8 + 2
+		minNonLiteralBlockSize = 16
+	)
+
+	// Protect against e.cur wraparound.
+	for e.cur >= bufferReset {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = tableEntry{}
+			}
+			for i := range e.longTable[:] {
+				e.longTable[i] = tableEntry{}
+			}
+			e.markAllShardsDirty()
+			e.cur = e.maxMatchOff
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+		for i := range e.table[:] {
+			v := e.table[i].offset
+			if v < minOff {
+				v = 0
+			} else {
+				v = v - e.cur + e.maxMatchOff
+			}
+			e.table[i].offset = v
+		}
+		for i := range e.longTable[:] {
+			v := e.longTable[i].offset
+			if v < minOff {
+				v = 0
+			} else {
+				v = v - e.cur + e.maxMatchOff
+			}
+			e.longTable[i].offset = v
+		}
+		e.markAllShardsDirty()
+		e.cur = e.maxMatchOff
+		break
+	}
+
+	s := e.addBlock(src)
+	blk.size = len(src)
+	if len(src) < minNonLiteralBlockSize {
+		blk.extraLits = len(src)
+		blk.literals = blk.literals[:len(src)]
+		copy(blk.literals, src)
+		return
+	}
+
+	// Override src
+	src = e.hist
+	sLimit := int32(len(src)) - inputMargin
+	// stepSize is the number of bytes to skip on every main loop iteration.
+	// It should be >= 1.
+	const stepSize = 1
+
+	const kSearchStrength = 8
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := s
+	cv := load6432(src, s)
+
+	// Relative offsets
+	offset1 := int32(blk.recentOffsets[0])
+	offset2 := int32(blk.recentOffsets[1])
+
+	addLiterals := func(s *seq, until int32) {
+		if until == nextEmit {
+			return
+		}
+		blk.literals = append(blk.literals, src[nextEmit:until]...)
+		s.litLen = uint32(until - nextEmit)
+	}
+	if debug {
+		println("recent offsets:", blk.recentOffsets)
+	}
+
+encodeLoop:
+	for {
+		var t int32
+		// We allow the encoder to optionally turn off repeat offsets across blocks
+		canRepeat := len(blk.sequences) > 2
+
+		for {
+			if debugAsserts && canRepeat && offset1 == 0 {
+				panic("offset0 was 0")
+			}
+
+			nextHashS := hash5(cv, dFastShortTableBits)
+			nextHashL := hash8(cv, dFastLongTableBits)
+			candidateL := e.longTable[nextHashL]
+			candidateS := e.table[nextHashS]
+
+			const repOff = 1
+			repIndex := s - offset1 + repOff
+			entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.longTable[nextHashL] = entry
+			e.markLongShardDirty(nextHashL)
+			e.table[nextHashS] = entry
+			e.markShardDirty(nextHashS)
+
+			if canRepeat {
+				if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
+					// Consider history as well.
+					var seq seq
+					lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+
+					seq.matchLen = uint32(lenght - zstdMinMatch)
+
+					// We might be able to match backwards.
+					// Extend as long as we can.
+					start := s + repOff
+					// We end the search early, so we don't risk 0 literals
+					// and have to do special offset treatment.
+					startLimit := nextEmit + 1
+
+					tMin := s - e.maxMatchOff
+					if tMin < 0 {
+						tMin = 0
+					}
+					for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+						repIndex--
+						start--
+						seq.matchLen++
+					}
+					addLiterals(&seq, start)
+
+					// rep 0
+					seq.offset = 1
+					if debugSequences {
+						println("repeat sequence", seq, "next s:", s)
+					}
+					blk.sequences = append(blk.sequences, seq)
+					s += lenght + repOff
+					nextEmit = s
+					if s >= sLimit {
+						if debug {
+							println("repeat ended", s, lenght)
+
+						}
+						break encodeLoop
+					}
+					cv = load6432(src, s)
+					continue
+				}
+			}
+			// Find the offsets of our two matches.
+			coffsetL := s - (candidateL.offset - e.cur)
+			coffsetS := s - (candidateS.offset - e.cur)
+
+			// Check if we have a long match.
+			if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+				// Found a long match, likely at least 8 bytes.
+				// Reference encoder checks all 8 bytes, we only check 4,
+				// but the likelihood of both the first 4 bytes and the hash matching should be enough.
+				t = candidateL.offset - e.cur
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugMatches {
+					println("long match")
+				}
+				break
+			}
+
+			// Check if we have a short match.
+			if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
+				// found a regular match
+				// See if we can find a long match at s+1
+				const checkAt = 1
+				cv := load6432(src, s+checkAt)
+				nextHashL = hash8(cv, dFastLongTableBits)
+				candidateL = e.longTable[nextHashL]
+				coffsetL = s - (candidateL.offset - e.cur) + checkAt
+
+				// We can store it, since we have at least a 4 byte match.
+				e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)}
+				e.markLongShardDirty(nextHashL)
+				if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+					// Found a long match, likely at least 8 bytes.
+					// Reference encoder checks all 8 bytes, we only check 4,
+					// but the likelihood of both the first 4 bytes and the hash matching should be enough.
+					t = candidateL.offset - e.cur
+					s += checkAt
+					if debugMatches {
+						println("long match (after short)")
+					}
+					break
+				}
+
+				t = candidateS.offset - e.cur
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugAsserts && t < 0 {
+					panic("t<0")
+				}
+				if debugMatches {
+					println("short match")
+				}
+				break
+			}
+
+			// No match found, move forward in input.
+			s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+			if s >= sLimit {
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+
+		// A 4-byte match has been found. Update recent offsets.
+		// We'll later see if more than 4 bytes.
+		offset2 = offset1
+		offset1 = s - t
+
+		if debugAsserts && s <= t {
+			panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+		}
+
+		if debugAsserts && canRepeat && int(offset1) > len(src) {
+			panic("invalid offset")
+		}
+
+		// Extend the 4-byte match as long as possible.
+		l := e.matchlen(s+4, t+4, src) + 4
+
+		// Extend backwards
+		tMin := s - e.maxMatchOff
+		if tMin < 0 {
+			tMin = 0
+		}
+		for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+			s--
+			t--
+			l++
+		}
+
+		// Write our sequence
+		var seq seq
+		seq.litLen = uint32(s - nextEmit)
+		seq.matchLen = uint32(l - zstdMinMatch)
+		if seq.litLen > 0 {
+			blk.literals = append(blk.literals, src[nextEmit:s]...)
+		}
+		seq.offset = uint32(s-t) + 3
+		s += l
+		if debugSequences {
+			println("sequence", seq, "next s:", s)
+		}
+		blk.sequences = append(blk.sequences, seq)
+		nextEmit = s
+		if s >= sLimit {
+			break encodeLoop
+		}
+
+		// Index match start+1 (long) and start+2 (short)
+		index0 := s - l + 1
+		// Index match end-2 (long) and end-1 (short)
+		index1 := s - 2
+
+		cv0 := load6432(src, index0)
+		cv1 := load6432(src, index1)
+		te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
+		te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
+		longHash1 := hash8(cv0, dFastLongTableBits)
+		longHash2 := hash8(cv0, dFastLongTableBits)
+		e.longTable[longHash1] = te0
+		e.longTable[longHash2] = te1
+		e.markLongShardDirty(longHash1)
+		e.markLongShardDirty(longHash2)
+		cv0 >>= 8
+		cv1 >>= 8
+		te0.offset++
+		te1.offset++
+		te0.val = uint32(cv0)
+		te1.val = uint32(cv1)
+		hashVal1 := hash5(cv0, dFastShortTableBits)
+		hashVal2 := hash5(cv1, dFastShortTableBits)
+		e.table[hashVal1] = te0
+		e.markShardDirty(hashVal1)
+		e.table[hashVal2] = te1
+		e.markShardDirty(hashVal2)
+
+		cv = load6432(src, s)
+
+		if !canRepeat {
+			continue
+		}
+
+		// Check offset 2
+		for {
+			o2 := s - offset2
+			if load3232(src, o2) != uint32(cv) {
+				// Do regular search
+				break
+			}
+
+			// Store this, since we have it.
+			nextHashS := hash5(cv, dFastShortTableBits)
+			nextHashL := hash8(cv, dFastLongTableBits)
+
+			// We have at least 4 byte match.
+			// No need to check backwards. We come straight from a match
+			l := 4 + e.matchlen(s+4, o2+4, src)
+
+			entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.longTable[nextHashL] = entry
+			e.markLongShardDirty(nextHashL)
+			e.table[nextHashS] = entry
+			e.markShardDirty(nextHashS)
+			seq.matchLen = uint32(l) - zstdMinMatch
+			seq.litLen = 0
+
+			// Since litlen is always 0, this is offset 1.
+			seq.offset = 1
+			s += l
+			nextEmit = s
+			if debugSequences {
+				println("sequence", seq, "next s:", s)
+			}
+			blk.sequences = append(blk.sequences, seq)
+
+			// Swap offset 1 and 2.
+			offset1, offset2 = offset2, offset1
+			if s >= sLimit {
+				// Finished
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+	}
+
+	if int(nextEmit) < len(src) {
+		blk.literals = append(blk.literals, src[nextEmit:]...)
+		blk.extraLits = len(src) - int(nextEmit)
+	}
+	blk.recentOffsets[0] = uint32(offset1)
+	blk.recentOffsets[1] = uint32(offset2)
+	if debug {
+		println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+	}
+	// If we encoded more than 64K mark all dirty.
+	if len(src) > 64<<10 {
+		e.markAllShardsDirty()
+	}
+}
+
+// ResetDict will reset and set a dictionary if not nil
+func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) {
+	e.fastEncoder.Reset(d, singleBlock)
+	if d != nil {
+		panic("doubleFastEncoder: Reset with dict not supported")
+	}
+}
+
+// ResetDict will reset and set a dictionary if not nil
+func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
+	allDirty := e.allDirty
+	e.fastEncoderDict.Reset(d, singleBlock)
+	if d == nil {
+		return
+	}
+
+	// Init or copy dict table
+	if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID {
+		if len(e.dictLongTable) != len(e.longTable) {
+			e.dictLongTable = make([]tableEntry, len(e.longTable))
+		}
+		if len(d.content) >= 8 {
+			cv := load6432(d.content, 0)
+			e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{
+				val:    uint32(cv),
+				offset: e.maxMatchOff,
+			}
+			end := int32(len(d.content)) - 8 + e.maxMatchOff
+			for i := e.maxMatchOff + 1; i < end; i++ {
+				cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56)
+				e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{
+					val:    uint32(cv),
+					offset: i,
+				}
+			}
+		}
+		e.lastDictID = d.id
+		e.allDirty = true
+	}
+	// Reset table to initial state
+	e.cur = e.maxMatchOff
+
+	dirtyShardCnt := 0
+	if !allDirty {
+		for i := range e.longTableShardDirty {
+			if e.longTableShardDirty[i] {
+				dirtyShardCnt++
+			}
+		}
+	}
+
+	if allDirty || dirtyShardCnt > dLongTableShardCnt/2 {
+		copy(e.longTable[:], e.dictLongTable)
+		for i := range e.longTableShardDirty {
+			e.longTableShardDirty[i] = false
+		}
+		return
+	}
+	for i := range e.longTableShardDirty {
+		if !e.longTableShardDirty[i] {
+			continue
+		}
+
+		copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
+		e.longTableShardDirty[i] = false
+	}
+}
+
+func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) {
+	e.longTableShardDirty[entryNum/dLongTableShardSize] = true
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
new file mode 100644
index 0000000..ba4a17e
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
@@ -0,0 +1,1018 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"fmt"
+	"math"
+	"math/bits"
+)
+
+const (
+	tableBits      = 15                               // Bits used in the table
+	tableSize      = 1 << tableBits                   // Size of the table
+	tableShardCnt  = 1 << (tableBits - dictShardBits) // Number of shards in the table
+	tableShardSize = tableSize / tableShardCnt        // Size of an individual shard
+	tableMask      = tableSize - 1                    // Mask for table indices. Redundant, but can eliminate bounds checks.
+	maxMatchLength = 131074
+)
+
+type tableEntry struct {
+	val    uint32
+	offset int32
+}
+
+type fastEncoder struct {
+	fastBase
+	table [tableSize]tableEntry
+}
+
+type fastEncoderDict struct {
+	fastEncoder
+	dictTable       []tableEntry
+	tableShardDirty [tableShardCnt]bool
+	allDirty        bool
+}
+
+// Encode mimmics functionality in zstd_fast.c
+func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
+	const (
+		inputMargin            = 8
+		minNonLiteralBlockSize = 1 + 1 + inputMargin
+	)
+
+	// Protect against e.cur wraparound.
+	for e.cur >= bufferReset {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = tableEntry{}
+			}
+			e.cur = e.maxMatchOff
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+		for i := range e.table[:] {
+			v := e.table[i].offset
+			if v < minOff {
+				v = 0
+			} else {
+				v = v - e.cur + e.maxMatchOff
+			}
+			e.table[i].offset = v
+		}
+		e.cur = e.maxMatchOff
+		break
+	}
+
+	s := e.addBlock(src)
+	blk.size = len(src)
+	if len(src) < minNonLiteralBlockSize {
+		blk.extraLits = len(src)
+		blk.literals = blk.literals[:len(src)]
+		copy(blk.literals, src)
+		return
+	}
+
+	// Override src
+	src = e.hist
+	sLimit := int32(len(src)) - inputMargin
+	// stepSize is the number of bytes to skip on every main loop iteration.
+	// It should be >= 2.
+	const stepSize = 2
+
+	// TEMPLATE
+	const hashLog = tableBits
+	// seems global, but would be nice to tweak.
+	const kSearchStrength = 7
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := s
+	cv := load6432(src, s)
+
+	// Relative offsets
+	offset1 := int32(blk.recentOffsets[0])
+	offset2 := int32(blk.recentOffsets[1])
+
+	addLiterals := func(s *seq, until int32) {
+		if until == nextEmit {
+			return
+		}
+		blk.literals = append(blk.literals, src[nextEmit:until]...)
+		s.litLen = uint32(until - nextEmit)
+	}
+	if debug {
+		println("recent offsets:", blk.recentOffsets)
+	}
+
+encodeLoop:
+	for {
+		// t will contain the match offset when we find one.
+		// When existing the search loop, we have already checked 4 bytes.
+		var t int32
+
+		// We will not use repeat offsets across blocks.
+		// By not using them for the first 3 matches
+		canRepeat := len(blk.sequences) > 2
+
+		for {
+			if debugAsserts && canRepeat && offset1 == 0 {
+				panic("offset0 was 0")
+			}
+
+			nextHash := hash6(cv, hashLog)
+			nextHash2 := hash6(cv>>8, hashLog)
+			candidate := e.table[nextHash]
+			candidate2 := e.table[nextHash2]
+			repIndex := s - offset1 + 2
+
+			e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)}
+
+			if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
+				// Consider history as well.
+				var seq seq
+				var length int32
+				// length = 4 + e.matchlen(s+6, repIndex+4, src)
+				{
+					a := src[s+6:]
+					b := src[repIndex+4:]
+					endI := len(a) & (math.MaxInt32 - 7)
+					length = int32(endI) + 4
+					for i := 0; i < endI; i += 8 {
+						if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+							length = int32(i+bits.TrailingZeros64(diff)>>3) + 4
+							break
+						}
+					}
+				}
+
+				seq.matchLen = uint32(length - zstdMinMatch)
+
+				// We might be able to match backwards.
+				// Extend as long as we can.
+				start := s + 2
+				// We end the search early, so we don't risk 0 literals
+				// and have to do special offset treatment.
+				startLimit := nextEmit + 1
+
+				sMin := s - e.maxMatchOff
+				if sMin < 0 {
+					sMin = 0
+				}
+				for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch {
+					repIndex--
+					start--
+					seq.matchLen++
+				}
+				addLiterals(&seq, start)
+
+				// rep 0
+				seq.offset = 1
+				if debugSequences {
+					println("repeat sequence", seq, "next s:", s)
+				}
+				blk.sequences = append(blk.sequences, seq)
+				s += length + 2
+				nextEmit = s
+				if s >= sLimit {
+					if debug {
+						println("repeat ended", s, length)
+
+					}
+					break encodeLoop
+				}
+				cv = load6432(src, s)
+				continue
+			}
+			coffset0 := s - (candidate.offset - e.cur)
+			coffset1 := s - (candidate2.offset - e.cur) + 1
+			if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val {
+				// found a regular match
+				t = candidate.offset - e.cur
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				break
+			}
+
+			if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val {
+				// found a regular match
+				t = candidate2.offset - e.cur
+				s++
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugAsserts && t < 0 {
+					panic("t<0")
+				}
+				break
+			}
+			s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+			if s >= sLimit {
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+		// A 4-byte match has been found. We'll later see if more than 4 bytes.
+		offset2 = offset1
+		offset1 = s - t
+
+		if debugAsserts && s <= t {
+			panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+		}
+
+		if debugAsserts && canRepeat && int(offset1) > len(src) {
+			panic("invalid offset")
+		}
+
+		// Extend the 4-byte match as long as possible.
+		//l := e.matchlen(s+4, t+4, src) + 4
+		var l int32
+		{
+			a := src[s+4:]
+			b := src[t+4:]
+			endI := len(a) & (math.MaxInt32 - 7)
+			l = int32(endI) + 4
+			for i := 0; i < endI; i += 8 {
+				if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+					l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
+					break
+				}
+			}
+		}
+
+		// Extend backwards
+		tMin := s - e.maxMatchOff
+		if tMin < 0 {
+			tMin = 0
+		}
+		for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+			s--
+			t--
+			l++
+		}
+
+		// Write our sequence.
+		var seq seq
+		seq.litLen = uint32(s - nextEmit)
+		seq.matchLen = uint32(l - zstdMinMatch)
+		if seq.litLen > 0 {
+			blk.literals = append(blk.literals, src[nextEmit:s]...)
+		}
+		// Don't use repeat offsets
+		seq.offset = uint32(s-t) + 3
+		s += l
+		if debugSequences {
+			println("sequence", seq, "next s:", s)
+		}
+		blk.sequences = append(blk.sequences, seq)
+		nextEmit = s
+		if s >= sLimit {
+			break encodeLoop
+		}
+		cv = load6432(src, s)
+
+		// Check offset 2
+		if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) {
+			// We have at least 4 byte match.
+			// No need to check backwards. We come straight from a match
+			//l := 4 + e.matchlen(s+4, o2+4, src)
+			var l int32
+			{
+				a := src[s+4:]
+				b := src[o2+4:]
+				endI := len(a) & (math.MaxInt32 - 7)
+				l = int32(endI) + 4
+				for i := 0; i < endI; i += 8 {
+					if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+						l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
+						break
+					}
+				}
+			}
+
+			// Store this, since we have it.
+			nextHash := hash6(cv, hashLog)
+			e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+			seq.matchLen = uint32(l) - zstdMinMatch
+			seq.litLen = 0
+			// Since litlen is always 0, this is offset 1.
+			seq.offset = 1
+			s += l
+			nextEmit = s
+			if debugSequences {
+				println("sequence", seq, "next s:", s)
+			}
+			blk.sequences = append(blk.sequences, seq)
+
+			// Swap offset 1 and 2.
+			offset1, offset2 = offset2, offset1
+			if s >= sLimit {
+				break encodeLoop
+			}
+			// Prepare next loop.
+			cv = load6432(src, s)
+		}
+	}
+
+	if int(nextEmit) < len(src) {
+		blk.literals = append(blk.literals, src[nextEmit:]...)
+		blk.extraLits = len(src) - int(nextEmit)
+	}
+	blk.recentOffsets[0] = uint32(offset1)
+	blk.recentOffsets[1] = uint32(offset2)
+	if debug {
+		println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+	}
+}
+
+// EncodeNoHist will encode a block with no history and no following blocks.
+// Most notable difference is that src will not be copied for history and
+// we do not need to check for max match length.
+func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
+	const (
+		inputMargin            = 8
+		minNonLiteralBlockSize = 1 + 1 + inputMargin
+	)
+	if debug {
+		if len(src) > maxBlockSize {
+			panic("src too big")
+		}
+	}
+
+	// Protect against e.cur wraparound.
+	if e.cur >= bufferReset {
+		for i := range e.table[:] {
+			e.table[i] = tableEntry{}
+		}
+		e.cur = e.maxMatchOff
+	}
+
+	s := int32(0)
+	blk.size = len(src)
+	if len(src) < minNonLiteralBlockSize {
+		blk.extraLits = len(src)
+		blk.literals = blk.literals[:len(src)]
+		copy(blk.literals, src)
+		return
+	}
+
+	sLimit := int32(len(src)) - inputMargin
+	// stepSize is the number of bytes to skip on every main loop iteration.
+	// It should be >= 2.
+	const stepSize = 2
+
+	// TEMPLATE
+	const hashLog = tableBits
+	// seems global, but would be nice to tweak.
+	const kSearchStrength = 8
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := s
+	cv := load6432(src, s)
+
+	// Relative offsets
+	offset1 := int32(blk.recentOffsets[0])
+	offset2 := int32(blk.recentOffsets[1])
+
+	addLiterals := func(s *seq, until int32) {
+		if until == nextEmit {
+			return
+		}
+		blk.literals = append(blk.literals, src[nextEmit:until]...)
+		s.litLen = uint32(until - nextEmit)
+	}
+	if debug {
+		println("recent offsets:", blk.recentOffsets)
+	}
+
+encodeLoop:
+	for {
+		// t will contain the match offset when we find one.
+		// When existing the search loop, we have already checked 4 bytes.
+		var t int32
+
+		// We will not use repeat offsets across blocks.
+		// By not using them for the first 3 matches
+
+		for {
+			nextHash := hash6(cv, hashLog)
+			nextHash2 := hash6(cv>>8, hashLog)
+			candidate := e.table[nextHash]
+			candidate2 := e.table[nextHash2]
+			repIndex := s - offset1 + 2
+
+			e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)}
+
+			if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) {
+				// Consider history as well.
+				var seq seq
+				// length := 4 + e.matchlen(s+6, repIndex+4, src)
+				// length := 4 + int32(matchLen(src[s+6:], src[repIndex+4:]))
+				var length int32
+				{
+					a := src[s+6:]
+					b := src[repIndex+4:]
+					endI := len(a) & (math.MaxInt32 - 7)
+					length = int32(endI) + 4
+					for i := 0; i < endI; i += 8 {
+						if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+							length = int32(i+bits.TrailingZeros64(diff)>>3) + 4
+							break
+						}
+					}
+				}
+
+				seq.matchLen = uint32(length - zstdMinMatch)
+
+				// We might be able to match backwards.
+				// Extend as long as we can.
+				start := s + 2
+				// We end the search early, so we don't risk 0 literals
+				// and have to do special offset treatment.
+				startLimit := nextEmit + 1
+
+				sMin := s - e.maxMatchOff
+				if sMin < 0 {
+					sMin = 0
+				}
+				for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] {
+					repIndex--
+					start--
+					seq.matchLen++
+				}
+				addLiterals(&seq, start)
+
+				// rep 0
+				seq.offset = 1
+				if debugSequences {
+					println("repeat sequence", seq, "next s:", s)
+				}
+				blk.sequences = append(blk.sequences, seq)
+				s += length + 2
+				nextEmit = s
+				if s >= sLimit {
+					if debug {
+						println("repeat ended", s, length)
+
+					}
+					break encodeLoop
+				}
+				cv = load6432(src, s)
+				continue
+			}
+			coffset0 := s - (candidate.offset - e.cur)
+			coffset1 := s - (candidate2.offset - e.cur) + 1
+			if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val {
+				// found a regular match
+				t = candidate.offset - e.cur
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugAsserts && t < 0 {
+					panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff))
+				}
+				break
+			}
+
+			if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val {
+				// found a regular match
+				t = candidate2.offset - e.cur
+				s++
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugAsserts && t < 0 {
+					panic("t<0")
+				}
+				break
+			}
+			s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+			if s >= sLimit {
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+		// A 4-byte match has been found. We'll later see if more than 4 bytes.
+		offset2 = offset1
+		offset1 = s - t
+
+		if debugAsserts && s <= t {
+			panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+		}
+
+		if debugAsserts && t < 0 {
+			panic(fmt.Sprintf("t (%d) < 0 ", t))
+		}
+		// Extend the 4-byte match as long as possible.
+		//l := e.matchlenNoHist(s+4, t+4, src) + 4
+		// l := int32(matchLen(src[s+4:], src[t+4:])) + 4
+		var l int32
+		{
+			a := src[s+4:]
+			b := src[t+4:]
+			endI := len(a) & (math.MaxInt32 - 7)
+			l = int32(endI) + 4
+			for i := 0; i < endI; i += 8 {
+				if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+					l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
+					break
+				}
+			}
+		}
+
+		// Extend backwards
+		tMin := s - e.maxMatchOff
+		if tMin < 0 {
+			tMin = 0
+		}
+		for t > tMin && s > nextEmit && src[t-1] == src[s-1] {
+			s--
+			t--
+			l++
+		}
+
+		// Write our sequence.
+		var seq seq
+		seq.litLen = uint32(s - nextEmit)
+		seq.matchLen = uint32(l - zstdMinMatch)
+		if seq.litLen > 0 {
+			blk.literals = append(blk.literals, src[nextEmit:s]...)
+		}
+		// Don't use repeat offsets
+		seq.offset = uint32(s-t) + 3
+		s += l
+		if debugSequences {
+			println("sequence", seq, "next s:", s)
+		}
+		blk.sequences = append(blk.sequences, seq)
+		nextEmit = s
+		if s >= sLimit {
+			break encodeLoop
+		}
+		cv = load6432(src, s)
+
+		// Check offset 2
+		if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) {
+			// We have at least 4 byte match.
+			// No need to check backwards. We come straight from a match
+			//l := 4 + e.matchlenNoHist(s+4, o2+4, src)
+			// l := 4 + int32(matchLen(src[s+4:], src[o2+4:]))
+			var l int32
+			{
+				a := src[s+4:]
+				b := src[o2+4:]
+				endI := len(a) & (math.MaxInt32 - 7)
+				l = int32(endI) + 4
+				for i := 0; i < endI; i += 8 {
+					if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+						l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
+						break
+					}
+				}
+			}
+
+			// Store this, since we have it.
+			nextHash := hash6(cv, hashLog)
+			e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+			seq.matchLen = uint32(l) - zstdMinMatch
+			seq.litLen = 0
+			// Since litlen is always 0, this is offset 1.
+			seq.offset = 1
+			s += l
+			nextEmit = s
+			if debugSequences {
+				println("sequence", seq, "next s:", s)
+			}
+			blk.sequences = append(blk.sequences, seq)
+
+			// Swap offset 1 and 2.
+			offset1, offset2 = offset2, offset1
+			if s >= sLimit {
+				break encodeLoop
+			}
+			// Prepare next loop.
+			cv = load6432(src, s)
+		}
+	}
+
+	if int(nextEmit) < len(src) {
+		blk.literals = append(blk.literals, src[nextEmit:]...)
+		blk.extraLits = len(src) - int(nextEmit)
+	}
+	if debug {
+		println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+	}
+	// We do not store history, so we must offset e.cur to avoid false matches for next user.
+	if e.cur < bufferReset {
+		e.cur += int32(len(src))
+	}
+}
+
+// Encode will encode the content, with a dictionary if initialized for it.
+func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) {
+	const (
+		inputMargin            = 8
+		minNonLiteralBlockSize = 1 + 1 + inputMargin
+	)
+	if e.allDirty || len(src) > 32<<10 {
+		e.fastEncoder.Encode(blk, src)
+		e.allDirty = true
+		return
+	}
+	// Protect against e.cur wraparound.
+	for e.cur >= bufferReset {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = tableEntry{}
+			}
+			e.cur = e.maxMatchOff
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+		for i := range e.table[:] {
+			v := e.table[i].offset
+			if v < minOff {
+				v = 0
+			} else {
+				v = v - e.cur + e.maxMatchOff
+			}
+			e.table[i].offset = v
+		}
+		e.cur = e.maxMatchOff
+		break
+	}
+
+	s := e.addBlock(src)
+	blk.size = len(src)
+	if len(src) < minNonLiteralBlockSize {
+		blk.extraLits = len(src)
+		blk.literals = blk.literals[:len(src)]
+		copy(blk.literals, src)
+		return
+	}
+
+	// Override src
+	src = e.hist
+	sLimit := int32(len(src)) - inputMargin
+	// stepSize is the number of bytes to skip on every main loop iteration.
+	// It should be >= 2.
+	const stepSize = 2
+
+	// TEMPLATE
+	const hashLog = tableBits
+	// seems global, but would be nice to tweak.
+	const kSearchStrength = 7
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := s
+	cv := load6432(src, s)
+
+	// Relative offsets
+	offset1 := int32(blk.recentOffsets[0])
+	offset2 := int32(blk.recentOffsets[1])
+
+	addLiterals := func(s *seq, until int32) {
+		if until == nextEmit {
+			return
+		}
+		blk.literals = append(blk.literals, src[nextEmit:until]...)
+		s.litLen = uint32(until - nextEmit)
+	}
+	if debug {
+		println("recent offsets:", blk.recentOffsets)
+	}
+
+encodeLoop:
+	for {
+		// t will contain the match offset when we find one.
+		// When existing the search loop, we have already checked 4 bytes.
+		var t int32
+
+		// We will not use repeat offsets across blocks.
+		// By not using them for the first 3 matches
+		canRepeat := len(blk.sequences) > 2
+
+		for {
+			if debugAsserts && canRepeat && offset1 == 0 {
+				panic("offset0 was 0")
+			}
+
+			nextHash := hash6(cv, hashLog)
+			nextHash2 := hash6(cv>>8, hashLog)
+			candidate := e.table[nextHash]
+			candidate2 := e.table[nextHash2]
+			repIndex := s - offset1 + 2
+
+			e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.markShardDirty(nextHash)
+			e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)}
+			e.markShardDirty(nextHash2)
+
+			if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
+				// Consider history as well.
+				var seq seq
+				var length int32
+				// length = 4 + e.matchlen(s+6, repIndex+4, src)
+				{
+					a := src[s+6:]
+					b := src[repIndex+4:]
+					endI := len(a) & (math.MaxInt32 - 7)
+					length = int32(endI) + 4
+					for i := 0; i < endI; i += 8 {
+						if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+							length = int32(i+bits.TrailingZeros64(diff)>>3) + 4
+							break
+						}
+					}
+				}
+
+				seq.matchLen = uint32(length - zstdMinMatch)
+
+				// We might be able to match backwards.
+				// Extend as long as we can.
+				start := s + 2
+				// We end the search early, so we don't risk 0 literals
+				// and have to do special offset treatment.
+				startLimit := nextEmit + 1
+
+				sMin := s - e.maxMatchOff
+				if sMin < 0 {
+					sMin = 0
+				}
+				for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch {
+					repIndex--
+					start--
+					seq.matchLen++
+				}
+				addLiterals(&seq, start)
+
+				// rep 0
+				seq.offset = 1
+				if debugSequences {
+					println("repeat sequence", seq, "next s:", s)
+				}
+				blk.sequences = append(blk.sequences, seq)
+				s += length + 2
+				nextEmit = s
+				if s >= sLimit {
+					if debug {
+						println("repeat ended", s, length)
+
+					}
+					break encodeLoop
+				}
+				cv = load6432(src, s)
+				continue
+			}
+			coffset0 := s - (candidate.offset - e.cur)
+			coffset1 := s - (candidate2.offset - e.cur) + 1
+			if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val {
+				// found a regular match
+				t = candidate.offset - e.cur
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				break
+			}
+
+			if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val {
+				// found a regular match
+				t = candidate2.offset - e.cur
+				s++
+				if debugAsserts && s <= t {
+					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+				}
+				if debugAsserts && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugAsserts && t < 0 {
+					panic("t<0")
+				}
+				break
+			}
+			s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+			if s >= sLimit {
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+		// A 4-byte match has been found. We'll later see if more than 4 bytes.
+		offset2 = offset1
+		offset1 = s - t
+
+		if debugAsserts && s <= t {
+			panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+		}
+
+		if debugAsserts && canRepeat && int(offset1) > len(src) {
+			panic("invalid offset")
+		}
+
+		// Extend the 4-byte match as long as possible.
+		//l := e.matchlen(s+4, t+4, src) + 4
+		var l int32
+		{
+			a := src[s+4:]
+			b := src[t+4:]
+			endI := len(a) & (math.MaxInt32 - 7)
+			l = int32(endI) + 4
+			for i := 0; i < endI; i += 8 {
+				if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+					l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
+					break
+				}
+			}
+		}
+
+		// Extend backwards
+		tMin := s - e.maxMatchOff
+		if tMin < 0 {
+			tMin = 0
+		}
+		for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+			s--
+			t--
+			l++
+		}
+
+		// Write our sequence.
+		var seq seq
+		seq.litLen = uint32(s - nextEmit)
+		seq.matchLen = uint32(l - zstdMinMatch)
+		if seq.litLen > 0 {
+			blk.literals = append(blk.literals, src[nextEmit:s]...)
+		}
+		// Don't use repeat offsets
+		seq.offset = uint32(s-t) + 3
+		s += l
+		if debugSequences {
+			println("sequence", seq, "next s:", s)
+		}
+		blk.sequences = append(blk.sequences, seq)
+		nextEmit = s
+		if s >= sLimit {
+			break encodeLoop
+		}
+		cv = load6432(src, s)
+
+		// Check offset 2
+		if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) {
+			// We have at least 4 byte match.
+			// No need to check backwards. We come straight from a match
+			//l := 4 + e.matchlen(s+4, o2+4, src)
+			var l int32
+			{
+				a := src[s+4:]
+				b := src[o2+4:]
+				endI := len(a) & (math.MaxInt32 - 7)
+				l = int32(endI) + 4
+				for i := 0; i < endI; i += 8 {
+					if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+						l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
+						break
+					}
+				}
+			}
+
+			// Store this, since we have it.
+			nextHash := hash6(cv, hashLog)
+			e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.markShardDirty(nextHash)
+			seq.matchLen = uint32(l) - zstdMinMatch
+			seq.litLen = 0
+			// Since litlen is always 0, this is offset 1.
+			seq.offset = 1
+			s += l
+			nextEmit = s
+			if debugSequences {
+				println("sequence", seq, "next s:", s)
+			}
+			blk.sequences = append(blk.sequences, seq)
+
+			// Swap offset 1 and 2.
+			offset1, offset2 = offset2, offset1
+			if s >= sLimit {
+				break encodeLoop
+			}
+			// Prepare next loop.
+			cv = load6432(src, s)
+		}
+	}
+
+	if int(nextEmit) < len(src) {
+		blk.literals = append(blk.literals, src[nextEmit:]...)
+		blk.extraLits = len(src) - int(nextEmit)
+	}
+	blk.recentOffsets[0] = uint32(offset1)
+	blk.recentOffsets[1] = uint32(offset2)
+	if debug {
+		println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+	}
+}
+
+// ResetDict will reset and set a dictionary if not nil
+func (e *fastEncoder) Reset(d *dict, singleBlock bool) {
+	e.resetBase(d, singleBlock)
+	if d != nil {
+		panic("fastEncoder: Reset with dict")
+	}
+}
+
+// ResetDict will reset and set a dictionary if not nil
+func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
+	e.resetBase(d, singleBlock)
+	if d == nil {
+		return
+	}
+
+	// Init or copy dict table
+	if len(e.dictTable) != len(e.table) || d.id != e.lastDictID {
+		if len(e.dictTable) != len(e.table) {
+			e.dictTable = make([]tableEntry, len(e.table))
+		}
+		if true {
+			end := e.maxMatchOff + int32(len(d.content)) - 8
+			for i := e.maxMatchOff; i < end; i += 3 {
+				const hashLog = tableBits
+
+				cv := load6432(d.content, i-e.maxMatchOff)
+				nextHash := hash6(cv, hashLog)      // 0 -> 5
+				nextHash1 := hash6(cv>>8, hashLog)  // 1 -> 6
+				nextHash2 := hash6(cv>>16, hashLog) // 2 -> 7
+				e.dictTable[nextHash] = tableEntry{
+					val:    uint32(cv),
+					offset: i,
+				}
+				e.dictTable[nextHash1] = tableEntry{
+					val:    uint32(cv >> 8),
+					offset: i + 1,
+				}
+				e.dictTable[nextHash2] = tableEntry{
+					val:    uint32(cv >> 16),
+					offset: i + 2,
+				}
+			}
+		}
+		e.lastDictID = d.id
+		e.allDirty = true
+	}
+
+	e.cur = e.maxMatchOff
+	dirtyShardCnt := 0
+	if !e.allDirty {
+		for i := range e.tableShardDirty {
+			if e.tableShardDirty[i] {
+				dirtyShardCnt++
+			}
+		}
+	}
+
+	const shardCnt = tableShardCnt
+	const shardSize = tableShardSize
+	if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
+		copy(e.table[:], e.dictTable)
+		for i := range e.tableShardDirty {
+			e.tableShardDirty[i] = false
+		}
+		e.allDirty = false
+		return
+	}
+	for i := range e.tableShardDirty {
+		if !e.tableShardDirty[i] {
+			continue
+		}
+
+		copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
+		e.tableShardDirty[i] = false
+	}
+	e.allDirty = false
+}
+
+func (e *fastEncoderDict) markAllShardsDirty() {
+	e.allDirty = true
+}
+
+func (e *fastEncoderDict) markShardDirty(entryNum uint32) {
+	e.tableShardDirty[entryNum/tableShardSize] = true
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
new file mode 100644
index 0000000..4871dd0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -0,0 +1,576 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"crypto/rand"
+	"fmt"
+	"io"
+	rdebug "runtime/debug"
+	"sync"
+
+	"github.com/klauspost/compress/zstd/internal/xxhash"
+)
+
+// Encoder provides encoding to Zstandard.
+// An Encoder can be used for either compressing a stream via the
+// io.WriteCloser interface supported by the Encoder or as multiple independent
+// tasks via the EncodeAll function.
+// Smaller encodes are encouraged to use the EncodeAll function.
+// Use NewWriter to create a new instance.
+type Encoder struct {
+	o        encoderOptions
+	encoders chan encoder
+	state    encoderState
+	init     sync.Once
+}
+
+type encoder interface {
+	Encode(blk *blockEnc, src []byte)
+	EncodeNoHist(blk *blockEnc, src []byte)
+	Block() *blockEnc
+	CRC() *xxhash.Digest
+	AppendCRC([]byte) []byte
+	WindowSize(size int) int32
+	UseBlock(*blockEnc)
+	Reset(d *dict, singleBlock bool)
+}
+
+type encoderState struct {
+	w                io.Writer
+	filling          []byte
+	current          []byte
+	previous         []byte
+	encoder          encoder
+	writing          *blockEnc
+	err              error
+	writeErr         error
+	nWritten         int64
+	headerWritten    bool
+	eofWritten       bool
+	fullFrameWritten bool
+
+	// This waitgroup indicates an encode is running.
+	wg sync.WaitGroup
+	// This waitgroup indicates we have a block encoding/writing.
+	wWg sync.WaitGroup
+}
+
+// NewWriter will create a new Zstandard encoder.
+// If the encoder will be used for encoding blocks a nil writer can be used.
+func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) {
+	initPredefined()
+	var e Encoder
+	e.o.setDefault()
+	for _, o := range opts {
+		err := o(&e.o)
+		if err != nil {
+			return nil, err
+		}
+	}
+	if w != nil {
+		e.Reset(w)
+	}
+	return &e, nil
+}
+
+func (e *Encoder) initialize() {
+	if e.o.concurrent == 0 {
+		e.o.setDefault()
+	}
+	e.encoders = make(chan encoder, e.o.concurrent)
+	for i := 0; i < e.o.concurrent; i++ {
+		enc := e.o.encoder()
+		e.encoders <- enc
+	}
+}
+
+// Reset will re-initialize the writer and new writes will encode to the supplied writer
+// as a new, independent stream.
+func (e *Encoder) Reset(w io.Writer) {
+	s := &e.state
+	s.wg.Wait()
+	s.wWg.Wait()
+	if cap(s.filling) == 0 {
+		s.filling = make([]byte, 0, e.o.blockSize)
+	}
+	if cap(s.current) == 0 {
+		s.current = make([]byte, 0, e.o.blockSize)
+	}
+	if cap(s.previous) == 0 {
+		s.previous = make([]byte, 0, e.o.blockSize)
+	}
+	if s.encoder == nil {
+		s.encoder = e.o.encoder()
+	}
+	if s.writing == nil {
+		s.writing = &blockEnc{lowMem: e.o.lowMem}
+		s.writing.init()
+	}
+	s.writing.initNewEncode()
+	s.filling = s.filling[:0]
+	s.current = s.current[:0]
+	s.previous = s.previous[:0]
+	s.encoder.Reset(e.o.dict, false)
+	s.headerWritten = false
+	s.eofWritten = false
+	s.fullFrameWritten = false
+	s.w = w
+	s.err = nil
+	s.nWritten = 0
+	s.writeErr = nil
+}
+
+// Write data to the encoder.
+// Input data will be buffered and as the buffer fills up
+// content will be compressed and written to the output.
+// When done writing, use Close to flush the remaining output
+// and write CRC if requested.
+func (e *Encoder) Write(p []byte) (n int, err error) {
+	s := &e.state
+	for len(p) > 0 {
+		if len(p)+len(s.filling) < e.o.blockSize {
+			if e.o.crc {
+				_, _ = s.encoder.CRC().Write(p)
+			}
+			s.filling = append(s.filling, p...)
+			return n + len(p), nil
+		}
+		add := p
+		if len(p)+len(s.filling) > e.o.blockSize {
+			add = add[:e.o.blockSize-len(s.filling)]
+		}
+		if e.o.crc {
+			_, _ = s.encoder.CRC().Write(add)
+		}
+		s.filling = append(s.filling, add...)
+		p = p[len(add):]
+		n += len(add)
+		if len(s.filling) < e.o.blockSize {
+			return n, nil
+		}
+		err := e.nextBlock(false)
+		if err != nil {
+			return n, err
+		}
+		if debugAsserts && len(s.filling) > 0 {
+			panic(len(s.filling))
+		}
+	}
+	return n, nil
+}
+
+// nextBlock will synchronize and start compressing input in e.state.filling.
+// If an error has occurred during encoding it will be returned.
+func (e *Encoder) nextBlock(final bool) error {
+	s := &e.state
+	// Wait for current block.
+	s.wg.Wait()
+	if s.err != nil {
+		return s.err
+	}
+	if len(s.filling) > e.o.blockSize {
+		return fmt.Errorf("block > maxStoreBlockSize")
+	}
+	if !s.headerWritten {
+		// If we have a single block encode, do a sync compression.
+		if final && len(s.filling) == 0 && !e.o.fullZero {
+			s.headerWritten = true
+			s.fullFrameWritten = true
+			s.eofWritten = true
+			return nil
+		}
+		if final && len(s.filling) > 0 {
+			s.current = e.EncodeAll(s.filling, s.current[:0])
+			var n2 int
+			n2, s.err = s.w.Write(s.current)
+			if s.err != nil {
+				return s.err
+			}
+			s.nWritten += int64(n2)
+			s.current = s.current[:0]
+			s.filling = s.filling[:0]
+			s.headerWritten = true
+			s.fullFrameWritten = true
+			s.eofWritten = true
+			return nil
+		}
+
+		var tmp [maxHeaderSize]byte
+		fh := frameHeader{
+			ContentSize:   0,
+			WindowSize:    uint32(s.encoder.WindowSize(0)),
+			SingleSegment: false,
+			Checksum:      e.o.crc,
+			DictID:        e.o.dict.ID(),
+		}
+
+		dst, err := fh.appendTo(tmp[:0])
+		if err != nil {
+			return err
+		}
+		s.headerWritten = true
+		s.wWg.Wait()
+		var n2 int
+		n2, s.err = s.w.Write(dst)
+		if s.err != nil {
+			return s.err
+		}
+		s.nWritten += int64(n2)
+	}
+	if s.eofWritten {
+		// Ensure we only write it once.
+		final = false
+	}
+
+	if len(s.filling) == 0 {
+		// Final block, but no data.
+		if final {
+			enc := s.encoder
+			blk := enc.Block()
+			blk.reset(nil)
+			blk.last = true
+			blk.encodeRaw(nil)
+			s.wWg.Wait()
+			_, s.err = s.w.Write(blk.output)
+			s.nWritten += int64(len(blk.output))
+			s.eofWritten = true
+		}
+		return s.err
+	}
+
+	// Move blocks forward.
+	s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
+	s.wg.Add(1)
+	go func(src []byte) {
+		if debug {
+			println("Adding block,", len(src), "bytes, final:", final)
+		}
+		defer func() {
+			if r := recover(); r != nil {
+				s.err = fmt.Errorf("panic while encoding: %v", r)
+				rdebug.PrintStack()
+			}
+			s.wg.Done()
+		}()
+		enc := s.encoder
+		blk := enc.Block()
+		enc.Encode(blk, src)
+		blk.last = final
+		if final {
+			s.eofWritten = true
+		}
+		// Wait for pending writes.
+		s.wWg.Wait()
+		if s.writeErr != nil {
+			s.err = s.writeErr
+			return
+		}
+		// Transfer encoders from previous write block.
+		blk.swapEncoders(s.writing)
+		// Transfer recent offsets to next.
+		enc.UseBlock(s.writing)
+		s.writing = blk
+		s.wWg.Add(1)
+		go func() {
+			defer func() {
+				if r := recover(); r != nil {
+					s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r)
+					rdebug.PrintStack()
+				}
+				s.wWg.Done()
+			}()
+			err := errIncompressible
+			// If we got the exact same number of literals as input,
+			// assume the literals cannot be compressed.
+			if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
+				err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
+			}
+			switch err {
+			case errIncompressible:
+				if debug {
+					println("Storing incompressible block as raw")
+				}
+				blk.encodeRaw(src)
+				// In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
+			case nil:
+			default:
+				s.writeErr = err
+				return
+			}
+			_, s.writeErr = s.w.Write(blk.output)
+			s.nWritten += int64(len(blk.output))
+		}()
+	}(s.current)
+	return nil
+}
+
+// ReadFrom reads data from r until EOF or error.
+// The return value n is the number of bytes read.
+// Any error except io.EOF encountered during the read is also returned.
+//
+// The Copy function uses ReaderFrom if available.
+func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) {
+	if debug {
+		println("Using ReadFrom")
+	}
+
+	// Flush any current writes.
+	if len(e.state.filling) > 0 {
+		if err := e.nextBlock(false); err != nil {
+			return 0, err
+		}
+	}
+	e.state.filling = e.state.filling[:e.o.blockSize]
+	src := e.state.filling
+	for {
+		n2, err := r.Read(src)
+		if e.o.crc {
+			_, _ = e.state.encoder.CRC().Write(src[:n2])
+		}
+		// src is now the unfilled part...
+		src = src[n2:]
+		n += int64(n2)
+		switch err {
+		case io.EOF:
+			e.state.filling = e.state.filling[:len(e.state.filling)-len(src)]
+			if debug {
+				println("ReadFrom: got EOF final block:", len(e.state.filling))
+			}
+			return n, nil
+		case nil:
+		default:
+			if debug {
+				println("ReadFrom: got error:", err)
+			}
+			e.state.err = err
+			return n, err
+		}
+		if len(src) > 0 {
+			if debug {
+				println("ReadFrom: got space left in source:", len(src))
+			}
+			continue
+		}
+		err = e.nextBlock(false)
+		if err != nil {
+			return n, err
+		}
+		e.state.filling = e.state.filling[:e.o.blockSize]
+		src = e.state.filling
+	}
+}
+
+// Flush will send the currently written data to output
+// and block until everything has been written.
+// This should only be used on rare occasions where pushing the currently queued data is critical.
+func (e *Encoder) Flush() error {
+	s := &e.state
+	if len(s.filling) > 0 {
+		err := e.nextBlock(false)
+		if err != nil {
+			return err
+		}
+	}
+	s.wg.Wait()
+	s.wWg.Wait()
+	if s.err != nil {
+		return s.err
+	}
+	return s.writeErr
+}
+
+// Close will flush the final output and close the stream.
+// The function will block until everything has been written.
+// The Encoder can still be re-used after calling this.
+func (e *Encoder) Close() error {
+	s := &e.state
+	if s.encoder == nil {
+		return nil
+	}
+	err := e.nextBlock(true)
+	if err != nil {
+		return err
+	}
+	if e.state.fullFrameWritten {
+		return s.err
+	}
+	s.wg.Wait()
+	s.wWg.Wait()
+
+	if s.err != nil {
+		return s.err
+	}
+	if s.writeErr != nil {
+		return s.writeErr
+	}
+
+	// Write CRC
+	if e.o.crc && s.err == nil {
+		// heap alloc.
+		var tmp [4]byte
+		_, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0]))
+		s.nWritten += 4
+	}
+
+	// Add padding with content from crypto/rand.Reader
+	if s.err == nil && e.o.pad > 0 {
+		add := calcSkippableFrame(s.nWritten, int64(e.o.pad))
+		frame, err := skippableFrame(s.filling[:0], add, rand.Reader)
+		if err != nil {
+			return err
+		}
+		_, s.err = s.w.Write(frame)
+	}
+	return s.err
+}
+
+// EncodeAll will encode all input in src and append it to dst.
+// This function can be called concurrently, but each call will only run on a single goroutine.
+// If empty input is given, nothing is returned, unless WithZeroFrames is specified.
+// Encoded blocks can be concatenated and the result will be the combined input stream.
+// Data compressed with EncodeAll can be decoded with the Decoder,
+// using either a stream or DecodeAll.
+func (e *Encoder) EncodeAll(src, dst []byte) []byte {
+	if len(src) == 0 {
+		if e.o.fullZero {
+			// Add frame header.
+			fh := frameHeader{
+				ContentSize:   0,
+				WindowSize:    MinWindowSize,
+				SingleSegment: true,
+				// Adding a checksum would be a waste of space.
+				Checksum: false,
+				DictID:   0,
+			}
+			dst, _ = fh.appendTo(dst)
+
+			// Write raw block as last one only.
+			var blk blockHeader
+			blk.setSize(0)
+			blk.setType(blockTypeRaw)
+			blk.setLast(true)
+			dst = blk.appendTo(dst)
+		}
+		return dst
+	}
+	e.init.Do(e.initialize)
+	enc := <-e.encoders
+	defer func() {
+		// Release encoder reference to last block.
+		// If a non-single block is needed the encoder will reset again.
+		e.encoders <- enc
+	}()
+	// Use single segments when above minimum window and below 1MB.
+	single := len(src) < 1<<20 && len(src) > MinWindowSize
+	if e.o.single != nil {
+		single = *e.o.single
+	}
+	fh := frameHeader{
+		ContentSize:   uint64(len(src)),
+		WindowSize:    uint32(enc.WindowSize(len(src))),
+		SingleSegment: single,
+		Checksum:      e.o.crc,
+		DictID:        e.o.dict.ID(),
+	}
+
+	// If less than 1MB, allocate a buffer up front.
+	if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem {
+		dst = make([]byte, 0, len(src))
+	}
+	dst, err := fh.appendTo(dst)
+	if err != nil {
+		panic(err)
+	}
+
+	// If we can do everything in one block, prefer that.
+	if len(src) <= maxCompressedBlockSize {
+		enc.Reset(e.o.dict, true)
+		// Slightly faster with no history and everything in one block.
+		if e.o.crc {
+			_, _ = enc.CRC().Write(src)
+		}
+		blk := enc.Block()
+		blk.last = true
+		if e.o.dict == nil {
+			enc.EncodeNoHist(blk, src)
+		} else {
+			enc.Encode(blk, src)
+		}
+
+		// If we got the exact same number of literals as input,
+		// assume the literals cannot be compressed.
+		err := errIncompressible
+		oldout := blk.output
+		if len(blk.literals) != len(src) || len(src) != e.o.blockSize {
+			// Output directly to dst
+			blk.output = dst
+			err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
+		}
+
+		switch err {
+		case errIncompressible:
+			if debug {
+				println("Storing incompressible block as raw")
+			}
+			dst = blk.encodeRawTo(dst, src)
+		case nil:
+			dst = blk.output
+		default:
+			panic(err)
+		}
+		blk.output = oldout
+	} else {
+		enc.Reset(e.o.dict, false)
+		blk := enc.Block()
+		for len(src) > 0 {
+			todo := src
+			if len(todo) > e.o.blockSize {
+				todo = todo[:e.o.blockSize]
+			}
+			src = src[len(todo):]
+			if e.o.crc {
+				_, _ = enc.CRC().Write(todo)
+			}
+			blk.pushOffsets()
+			enc.Encode(blk, todo)
+			if len(src) == 0 {
+				blk.last = true
+			}
+			err := errIncompressible
+			// If we got the exact same number of literals as input,
+			// assume the literals cannot be compressed.
+			if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize {
+				err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy)
+			}
+
+			switch err {
+			case errIncompressible:
+				if debug {
+					println("Storing incompressible block as raw")
+				}
+				dst = blk.encodeRawTo(dst, todo)
+				blk.popOffsets()
+			case nil:
+				dst = append(dst, blk.output...)
+			default:
+				panic(err)
+			}
+			blk.reset(nil)
+		}
+	}
+	if e.o.crc {
+		dst = enc.AppendCRC(dst)
+	}
+	// Add padding with content from crypto/rand.Reader
+	if e.o.pad > 0 {
+		add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad))
+		dst, err = skippableFrame(dst, add, rand.Reader)
+		if err != nil {
+			panic(err)
+		}
+	}
+	return dst
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
new file mode 100644
index 0000000..16d4ab6
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
@@ -0,0 +1,312 @@
+package zstd
+
+import (
+	"errors"
+	"fmt"
+	"runtime"
+	"strings"
+)
+
+// EOption is an option for creating a encoder.
+type EOption func(*encoderOptions) error
+
+// options retains accumulated state of multiple options.
+type encoderOptions struct {
+	concurrent      int
+	level           EncoderLevel
+	single          *bool
+	pad             int
+	blockSize       int
+	windowSize      int
+	crc             bool
+	fullZero        bool
+	noEntropy       bool
+	allLitEntropy   bool
+	customWindow    bool
+	customALEntropy bool
+	lowMem          bool
+	dict            *dict
+}
+
+func (o *encoderOptions) setDefault() {
+	*o = encoderOptions{
+		concurrent:    runtime.GOMAXPROCS(0),
+		crc:           true,
+		single:        nil,
+		blockSize:     1 << 16,
+		windowSize:    8 << 20,
+		level:         SpeedDefault,
+		allLitEntropy: true,
+		lowMem:        false,
+	}
+}
+
+// encoder returns an encoder with the selected options.
+func (o encoderOptions) encoder() encoder {
+	switch o.level {
+	case SpeedFastest:
+		if o.dict != nil {
+			return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
+		}
+		return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
+
+	case SpeedDefault:
+		if o.dict != nil {
+			return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}}
+		}
+		return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
+	case SpeedBetterCompression:
+		if o.dict != nil {
+			return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
+		}
+		return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
+	case SpeedBestCompression:
+		return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
+	}
+	panic("unknown compression level")
+}
+
+// WithEncoderCRC will add CRC value to output.
+// Output will be 4 bytes larger.
+func WithEncoderCRC(b bool) EOption {
+	return func(o *encoderOptions) error { o.crc = b; return nil }
+}
+
+// WithEncoderConcurrency will set the concurrency,
+// meaning the maximum number of encoders to run concurrently.
+// The value supplied must be at least 1.
+// By default this will be set to GOMAXPROCS.
+func WithEncoderConcurrency(n int) EOption {
+	return func(o *encoderOptions) error {
+		if n <= 0 {
+			return fmt.Errorf("concurrency must be at least 1")
+		}
+		o.concurrent = n
+		return nil
+	}
+}
+
+// WithWindowSize will set the maximum allowed back-reference distance.
+// The value must be a power of two between MinWindowSize and MaxWindowSize.
+// A larger value will enable better compression but allocate more memory and,
+// for above-default values, take considerably longer.
+// The default value is determined by the compression level.
+func WithWindowSize(n int) EOption {
+	return func(o *encoderOptions) error {
+		switch {
+		case n < MinWindowSize:
+			return fmt.Errorf("window size must be at least %d", MinWindowSize)
+		case n > MaxWindowSize:
+			return fmt.Errorf("window size must be at most %d", MaxWindowSize)
+		case (n & (n - 1)) != 0:
+			return errors.New("window size must be a power of 2")
+		}
+
+		o.windowSize = n
+		o.customWindow = true
+		if o.blockSize > o.windowSize {
+			o.blockSize = o.windowSize
+		}
+		return nil
+	}
+}
+
+// WithEncoderPadding will add padding to all output so the size will be a multiple of n.
+// This can be used to obfuscate the exact output size or make blocks of a certain size.
+// The contents will be a skippable frame, so it will be invisible by the decoder.
+// n must be > 0 and <= 1GB, 1<<30 bytes.
+// The padded area will be filled with data from crypto/rand.Reader.
+// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this.
+func WithEncoderPadding(n int) EOption {
+	return func(o *encoderOptions) error {
+		if n <= 0 {
+			return fmt.Errorf("padding must be at least 1")
+		}
+		// No need to waste our time.
+		if n == 1 {
+			o.pad = 0
+		}
+		if n > 1<<30 {
+			return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ")
+		}
+		o.pad = n
+		return nil
+	}
+}
+
+// EncoderLevel predefines encoder compression levels.
+// Only use the constants made available, since the actual mapping
+// of these values are very likely to change and your compression could change
+// unpredictably when upgrading the library.
+type EncoderLevel int
+
+const (
+	speedNotSet EncoderLevel = iota
+
+	// SpeedFastest will choose the fastest reasonable compression.
+	// This is roughly equivalent to the fastest Zstandard mode.
+	SpeedFastest
+
+	// SpeedDefault is the default "pretty fast" compression option.
+	// This is roughly equivalent to the default Zstandard mode (level 3).
+	SpeedDefault
+
+	// SpeedBetterCompression will yield better compression than the default.
+	// Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage.
+	// By using this, notice that CPU usage may go up in the future.
+	SpeedBetterCompression
+
+	// SpeedBestCompression will choose the best available compression option.
+	// This will offer the best compression no matter the CPU cost.
+	SpeedBestCompression
+
+	// speedLast should be kept as the last actual compression option.
+	// The is not for external usage, but is used to keep track of the valid options.
+	speedLast
+)
+
+// EncoderLevelFromString will convert a string representation of an encoding level back
+// to a compression level. The compare is not case sensitive.
+// If the string wasn't recognized, (false, SpeedDefault) will be returned.
+func EncoderLevelFromString(s string) (bool, EncoderLevel) {
+	for l := speedNotSet + 1; l < speedLast; l++ {
+		if strings.EqualFold(s, l.String()) {
+			return true, l
+		}
+	}
+	return false, SpeedDefault
+}
+
+// EncoderLevelFromZstd will return an encoder level that closest matches the compression
+// ratio of a specific zstd compression level.
+// Many input values will provide the same compression level.
+func EncoderLevelFromZstd(level int) EncoderLevel {
+	switch {
+	case level < 3:
+		return SpeedFastest
+	case level >= 3 && level < 6:
+		return SpeedDefault
+	case level >= 6 && level < 10:
+		return SpeedBetterCompression
+	case level >= 10:
+		return SpeedBetterCompression
+	}
+	return SpeedDefault
+}
+
+// String provides a string representation of the compression level.
+func (e EncoderLevel) String() string {
+	switch e {
+	case SpeedFastest:
+		return "fastest"
+	case SpeedDefault:
+		return "default"
+	case SpeedBetterCompression:
+		return "better"
+	case SpeedBestCompression:
+		return "best"
+	default:
+		return "invalid"
+	}
+}
+
+// WithEncoderLevel specifies a predefined compression level.
+func WithEncoderLevel(l EncoderLevel) EOption {
+	return func(o *encoderOptions) error {
+		switch {
+		case l <= speedNotSet || l >= speedLast:
+			return fmt.Errorf("unknown encoder level")
+		}
+		o.level = l
+		if !o.customWindow {
+			switch o.level {
+			case SpeedFastest:
+				o.windowSize = 4 << 20
+			case SpeedDefault:
+				o.windowSize = 8 << 20
+			case SpeedBetterCompression:
+				o.windowSize = 16 << 20
+			case SpeedBestCompression:
+				o.windowSize = 32 << 20
+			}
+		}
+		if !o.customALEntropy {
+			o.allLitEntropy = l > SpeedFastest
+		}
+
+		return nil
+	}
+}
+
+// WithZeroFrames will encode 0 length input as full frames.
+// This can be needed for compatibility with zstandard usage,
+// but is not needed for this package.
+func WithZeroFrames(b bool) EOption {
+	return func(o *encoderOptions) error {
+		o.fullZero = b
+		return nil
+	}
+}
+
+// WithAllLitEntropyCompression will apply entropy compression if no matches are found.
+// Disabling this will skip incompressible data faster, but in cases with no matches but
+// skewed character distribution compression is lost.
+// Default value depends on the compression level selected.
+func WithAllLitEntropyCompression(b bool) EOption {
+	return func(o *encoderOptions) error {
+		o.customALEntropy = true
+		o.allLitEntropy = b
+		return nil
+	}
+}
+
+// WithNoEntropyCompression will always skip entropy compression of literals.
+// This can be useful if content has matches, but unlikely to benefit from entropy
+// compression. Usually the slight speed improvement is not worth enabling this.
+func WithNoEntropyCompression(b bool) EOption {
+	return func(o *encoderOptions) error {
+		o.noEntropy = b
+		return nil
+	}
+}
+
+// WithSingleSegment will set the "single segment" flag when EncodeAll is used.
+// If this flag is set, data must be regenerated within a single continuous memory segment.
+// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present.
+// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content.
+// In order to preserve the decoder from unreasonable memory requirements,
+// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range.
+// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB.
+// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations.
+// If this is not specified, block encodes will automatically choose this based on the input size.
+// This setting has no effect on streamed encodes.
+func WithSingleSegment(b bool) EOption {
+	return func(o *encoderOptions) error {
+		o.single = &b
+		return nil
+	}
+}
+
+// WithLowerEncoderMem will trade in some memory cases trade less memory usage for
+// slower encoding speed.
+// This will not change the window size which is the primary function for reducing
+// memory usage. See WithWindowSize.
+func WithLowerEncoderMem(b bool) EOption {
+	return func(o *encoderOptions) error {
+		o.lowMem = b
+		return nil
+	}
+}
+
+// WithEncoderDict allows to register a dictionary that will be used for the encode.
+// The encoder *may* choose to use no dictionary instead for certain payloads.
+func WithEncoderDict(dict []byte) EOption {
+	return func(o *encoderOptions) error {
+		d, err := loadDict(dict)
+		if err != nil {
+			return err
+		}
+		o.dict = d
+		return nil
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go
new file mode 100644
index 0000000..693c5f0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/framedec.go
@@ -0,0 +1,494 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"bytes"
+	"encoding/hex"
+	"errors"
+	"hash"
+	"io"
+	"sync"
+
+	"github.com/klauspost/compress/zstd/internal/xxhash"
+)
+
+type frameDec struct {
+	o      decoderOptions
+	crc    hash.Hash64
+	offset int64
+
+	WindowSize uint64
+
+	// maxWindowSize is the maximum windows size to support.
+	// should never be bigger than max-int.
+	maxWindowSize uint64
+
+	// In order queue of blocks being decoded.
+	decoding chan *blockDec
+
+	// Frame history passed between blocks
+	history history
+
+	rawInput byteBuffer
+
+	// Byte buffer that can be reused for small input blocks.
+	bBuf byteBuf
+
+	FrameContentSize uint64
+	frameDone        sync.WaitGroup
+
+	DictionaryID  *uint32
+	HasCheckSum   bool
+	SingleSegment bool
+
+	// asyncRunning indicates whether the async routine processes input on 'decoding'.
+	asyncRunningMu sync.Mutex
+	asyncRunning   bool
+}
+
+const (
+	// The minimum Window_Size is 1 KB.
+	MinWindowSize = 1 << 10
+	MaxWindowSize = 1 << 29
+)
+
+var (
+	frameMagic          = []byte{0x28, 0xb5, 0x2f, 0xfd}
+	skippableFrameMagic = []byte{0x2a, 0x4d, 0x18}
+)
+
+func newFrameDec(o decoderOptions) *frameDec {
+	d := frameDec{
+		o:             o,
+		maxWindowSize: MaxWindowSize,
+	}
+	if d.maxWindowSize > o.maxDecodedSize {
+		d.maxWindowSize = o.maxDecodedSize
+	}
+	return &d
+}
+
+// reset will read the frame header and prepare for block decoding.
+// If nothing can be read from the input, io.EOF will be returned.
+// Any other error indicated that the stream contained data, but
+// there was a problem.
+func (d *frameDec) reset(br byteBuffer) error {
+	d.HasCheckSum = false
+	d.WindowSize = 0
+	var b []byte
+	for {
+		b = br.readSmall(4)
+		if b == nil {
+			return io.EOF
+		}
+		if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 {
+			if debug {
+				println("Not skippable", hex.EncodeToString(b), hex.EncodeToString(skippableFrameMagic))
+			}
+			// Break if not skippable frame.
+			break
+		}
+		// Read size to skip
+		b = br.readSmall(4)
+		if b == nil {
+			println("Reading Frame Size EOF")
+			return io.ErrUnexpectedEOF
+		}
+		n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+		println("Skipping frame with", n, "bytes.")
+		err := br.skipN(int(n))
+		if err != nil {
+			if debug {
+				println("Reading discarded frame", err)
+			}
+			return err
+		}
+	}
+	if !bytes.Equal(b, frameMagic) {
+		println("Got magic numbers: ", b, "want:", frameMagic)
+		return ErrMagicMismatch
+	}
+
+	// Read Frame_Header_Descriptor
+	fhd, err := br.readByte()
+	if err != nil {
+		println("Reading Frame_Header_Descriptor", err)
+		return err
+	}
+	d.SingleSegment = fhd&(1<<5) != 0
+
+	if fhd&(1<<3) != 0 {
+		return errors.New("reserved bit set on frame header")
+	}
+
+	// Read Window_Descriptor
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor
+	d.WindowSize = 0
+	if !d.SingleSegment {
+		wd, err := br.readByte()
+		if err != nil {
+			println("Reading Window_Descriptor", err)
+			return err
+		}
+		printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
+		windowLog := 10 + (wd >> 3)
+		windowBase := uint64(1) << windowLog
+		windowAdd := (windowBase / 8) * uint64(wd&0x7)
+		d.WindowSize = windowBase + windowAdd
+	}
+
+	// Read Dictionary_ID
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id
+	d.DictionaryID = nil
+	if size := fhd & 3; size != 0 {
+		if size == 3 {
+			size = 4
+		}
+		b = br.readSmall(int(size))
+		if b == nil {
+			if debug {
+				println("Reading Dictionary_ID", io.ErrUnexpectedEOF)
+			}
+			return io.ErrUnexpectedEOF
+		}
+		var id uint32
+		switch size {
+		case 1:
+			id = uint32(b[0])
+		case 2:
+			id = uint32(b[0]) | (uint32(b[1]) << 8)
+		case 4:
+			id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+		}
+		if debug {
+			println("Dict size", size, "ID:", id)
+		}
+		if id > 0 {
+			// ID 0 means "sorry, no dictionary anyway".
+			// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
+			d.DictionaryID = &id
+		}
+	}
+
+	// Read Frame_Content_Size
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size
+	var fcsSize int
+	v := fhd >> 6
+	switch v {
+	case 0:
+		if d.SingleSegment {
+			fcsSize = 1
+		}
+	default:
+		fcsSize = 1 << v
+	}
+	d.FrameContentSize = 0
+	if fcsSize > 0 {
+		b := br.readSmall(fcsSize)
+		if b == nil {
+			println("Reading Frame content", io.ErrUnexpectedEOF)
+			return io.ErrUnexpectedEOF
+		}
+		switch fcsSize {
+		case 1:
+			d.FrameContentSize = uint64(b[0])
+		case 2:
+			// When FCS_Field_Size is 2, the offset of 256 is added.
+			d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256
+		case 4:
+			d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24)
+		case 8:
+			d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+			d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24)
+			d.FrameContentSize = uint64(d1) | (uint64(d2) << 32)
+		}
+		if debug {
+			println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize)
+		}
+	}
+	// Move this to shared.
+	d.HasCheckSum = fhd&(1<<2) != 0
+	if d.HasCheckSum {
+		if d.crc == nil {
+			d.crc = xxhash.New()
+		}
+		d.crc.Reset()
+	}
+
+	if d.WindowSize == 0 && d.SingleSegment {
+		// We may not need window in this case.
+		d.WindowSize = d.FrameContentSize
+		if d.WindowSize < MinWindowSize {
+			d.WindowSize = MinWindowSize
+		}
+	}
+
+	if d.WindowSize > d.maxWindowSize {
+		printf("window size %d > max %d\n", d.WindowSize, d.maxWindowSize)
+		return ErrWindowSizeExceeded
+	}
+	// The minimum Window_Size is 1 KB.
+	if d.WindowSize < MinWindowSize {
+		println("got window size: ", d.WindowSize)
+		return ErrWindowSizeTooSmall
+	}
+	d.history.windowSize = int(d.WindowSize)
+	if d.o.lowMem && d.history.windowSize < maxBlockSize {
+		d.history.maxSize = d.history.windowSize * 2
+	} else {
+		d.history.maxSize = d.history.windowSize + maxBlockSize
+	}
+	// history contains input - maybe we do something
+	d.rawInput = br
+	return nil
+}
+
+// next will start decoding the next block from stream.
+func (d *frameDec) next(block *blockDec) error {
+	if debug {
+		printf("decoding new block %p:%p", block, block.data)
+	}
+	err := block.reset(d.rawInput, d.WindowSize)
+	if err != nil {
+		println("block error:", err)
+		// Signal the frame decoder we have a problem.
+		d.sendErr(block, err)
+		return err
+	}
+	block.input <- struct{}{}
+	if debug {
+		println("next block:", block)
+	}
+	d.asyncRunningMu.Lock()
+	defer d.asyncRunningMu.Unlock()
+	if !d.asyncRunning {
+		return nil
+	}
+	if block.Last {
+		// We indicate the frame is done by sending io.EOF
+		d.decoding <- block
+		return io.EOF
+	}
+	d.decoding <- block
+	return nil
+}
+
+// sendEOF will queue an error block on the frame.
+// This will cause the frame decoder to return when it encounters the block.
+// Returns true if the decoder was added.
+func (d *frameDec) sendErr(block *blockDec, err error) bool {
+	d.asyncRunningMu.Lock()
+	defer d.asyncRunningMu.Unlock()
+	if !d.asyncRunning {
+		return false
+	}
+
+	println("sending error", err.Error())
+	block.sendErr(err)
+	d.decoding <- block
+	return true
+}
+
+// checkCRC will check the checksum if the frame has one.
+// Will return ErrCRCMismatch if crc check failed, otherwise nil.
+func (d *frameDec) checkCRC() error {
+	if !d.HasCheckSum {
+		return nil
+	}
+	var tmp [4]byte
+	got := d.crc.Sum64()
+	// Flip to match file order.
+	tmp[0] = byte(got >> 0)
+	tmp[1] = byte(got >> 8)
+	tmp[2] = byte(got >> 16)
+	tmp[3] = byte(got >> 24)
+
+	// We can overwrite upper tmp now
+	want := d.rawInput.readSmall(4)
+	if want == nil {
+		println("CRC missing?")
+		return io.ErrUnexpectedEOF
+	}
+
+	if !bytes.Equal(tmp[:], want) {
+		if debug {
+			println("CRC Check Failed:", tmp[:], "!=", want)
+		}
+		return ErrCRCMismatch
+	}
+	if debug {
+		println("CRC ok", tmp[:])
+	}
+	return nil
+}
+
+func (d *frameDec) initAsync() {
+	if !d.o.lowMem && !d.SingleSegment {
+		// set max extra size history to 10MB.
+		d.history.maxSize = d.history.windowSize + maxBlockSize*5
+	}
+	// re-alloc if more than one extra block size.
+	if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize {
+		d.history.b = make([]byte, 0, d.history.maxSize)
+	}
+	if cap(d.history.b) < d.history.maxSize {
+		d.history.b = make([]byte, 0, d.history.maxSize)
+	}
+	if cap(d.decoding) < d.o.concurrent {
+		d.decoding = make(chan *blockDec, d.o.concurrent)
+	}
+	if debug {
+		h := d.history
+		printf("history init. len: %d, cap: %d", len(h.b), cap(h.b))
+	}
+	d.asyncRunningMu.Lock()
+	d.asyncRunning = true
+	d.asyncRunningMu.Unlock()
+}
+
+// startDecoder will start decoding blocks and write them to the writer.
+// The decoder will stop as soon as an error occurs or at end of frame.
+// When the frame has finished decoding the *bufio.Reader
+// containing the remaining input will be sent on frameDec.frameDone.
+func (d *frameDec) startDecoder(output chan decodeOutput) {
+	written := int64(0)
+
+	defer func() {
+		d.asyncRunningMu.Lock()
+		d.asyncRunning = false
+		d.asyncRunningMu.Unlock()
+
+		// Drain the currently decoding.
+		d.history.error = true
+	flushdone:
+		for {
+			select {
+			case b := <-d.decoding:
+				b.history <- &d.history
+				output <- <-b.result
+			default:
+				break flushdone
+			}
+		}
+		println("frame decoder done, signalling done")
+		d.frameDone.Done()
+	}()
+	// Get decoder for first block.
+	block := <-d.decoding
+	block.history <- &d.history
+	for {
+		var next *blockDec
+		// Get result
+		r := <-block.result
+		if r.err != nil {
+			println("Result contained error", r.err)
+			output <- r
+			return
+		}
+		if debug {
+			println("got result, from ", d.offset, "to", d.offset+int64(len(r.b)))
+			d.offset += int64(len(r.b))
+		}
+		if !block.Last {
+			// Send history to next block
+			select {
+			case next = <-d.decoding:
+				if debug {
+					println("Sending ", len(d.history.b), "bytes as history")
+				}
+				next.history <- &d.history
+			default:
+				// Wait until we have sent the block, so
+				// other decoders can potentially get the decoder.
+				next = nil
+			}
+		}
+
+		// Add checksum, async to decoding.
+		if d.HasCheckSum {
+			n, err := d.crc.Write(r.b)
+			if err != nil {
+				r.err = err
+				if n != len(r.b) {
+					r.err = io.ErrShortWrite
+				}
+				output <- r
+				return
+			}
+		}
+		written += int64(len(r.b))
+		if d.SingleSegment && uint64(written) > d.FrameContentSize {
+			println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize)
+			r.err = ErrFrameSizeExceeded
+			output <- r
+			return
+		}
+		if block.Last {
+			r.err = d.checkCRC()
+			output <- r
+			return
+		}
+		output <- r
+		if next == nil {
+			// There was no decoder available, we wait for one now that we have sent to the writer.
+			if debug {
+				println("Sending ", len(d.history.b), " bytes as history")
+			}
+			next = <-d.decoding
+			next.history <- &d.history
+		}
+		block = next
+	}
+}
+
+// runDecoder will create a sync decoder that will decode a block of data.
+func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
+	saved := d.history.b
+
+	// We use the history for output to avoid copying it.
+	d.history.b = dst
+	// Store input length, so we only check new data.
+	crcStart := len(dst)
+	var err error
+	for {
+		err = dec.reset(d.rawInput, d.WindowSize)
+		if err != nil {
+			break
+		}
+		if debug {
+			println("next block:", dec)
+		}
+		err = dec.decodeBuf(&d.history)
+		if err != nil || dec.Last {
+			break
+		}
+		if uint64(len(d.history.b)) > d.o.maxDecodedSize {
+			err = ErrDecoderSizeExceeded
+			break
+		}
+		if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize {
+			println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize)
+			err = ErrFrameSizeExceeded
+			break
+		}
+	}
+	dst = d.history.b
+	if err == nil {
+		if d.HasCheckSum {
+			var n int
+			n, err = d.crc.Write(dst[crcStart:])
+			if err == nil {
+				if n != len(dst)-crcStart {
+					err = io.ErrShortWrite
+				} else {
+					err = d.checkCRC()
+				}
+			}
+		}
+	}
+	d.history.b = saved
+	return dst, err
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go
new file mode 100644
index 0000000..4ef7f5a
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go
@@ -0,0 +1,137 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+	"math"
+	"math/bits"
+)
+
+type frameHeader struct {
+	ContentSize   uint64
+	WindowSize    uint32
+	SingleSegment bool
+	Checksum      bool
+	DictID        uint32
+}
+
+const maxHeaderSize = 14
+
+func (f frameHeader) appendTo(dst []byte) ([]byte, error) {
+	dst = append(dst, frameMagic...)
+	var fhd uint8
+	if f.Checksum {
+		fhd |= 1 << 2
+	}
+	if f.SingleSegment {
+		fhd |= 1 << 5
+	}
+
+	var dictIDContent []byte
+	if f.DictID > 0 {
+		var tmp [4]byte
+		if f.DictID < 256 {
+			fhd |= 1
+			tmp[0] = uint8(f.DictID)
+			dictIDContent = tmp[:1]
+		} else if f.DictID < 1<<16 {
+			fhd |= 2
+			binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID))
+			dictIDContent = tmp[:2]
+		} else {
+			fhd |= 3
+			binary.LittleEndian.PutUint32(tmp[:4], f.DictID)
+			dictIDContent = tmp[:4]
+		}
+	}
+	var fcs uint8
+	if f.ContentSize >= 256 {
+		fcs++
+	}
+	if f.ContentSize >= 65536+256 {
+		fcs++
+	}
+	if f.ContentSize >= 0xffffffff {
+		fcs++
+	}
+
+	fhd |= fcs << 6
+
+	dst = append(dst, fhd)
+	if !f.SingleSegment {
+		const winLogMin = 10
+		windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3
+		dst = append(dst, uint8(windowLog))
+	}
+	if f.DictID > 0 {
+		dst = append(dst, dictIDContent...)
+	}
+	switch fcs {
+	case 0:
+		if f.SingleSegment {
+			dst = append(dst, uint8(f.ContentSize))
+		}
+		// Unless SingleSegment is set, framessizes < 256 are nto stored.
+	case 1:
+		f.ContentSize -= 256
+		dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8))
+	case 2:
+		dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24))
+	case 3:
+		dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24),
+			uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56))
+	default:
+		panic("invalid fcs")
+	}
+	return dst, nil
+}
+
+const skippableFrameHeader = 4 + 4
+
+// calcSkippableFrame will return a total size to be added for written
+// to be divisible by multiple.
+// The value will always be > skippableFrameHeader.
+// The function will panic if written < 0 or wantMultiple <= 0.
+func calcSkippableFrame(written, wantMultiple int64) int {
+	if wantMultiple <= 0 {
+		panic("wantMultiple <= 0")
+	}
+	if written < 0 {
+		panic("written < 0")
+	}
+	leftOver := written % wantMultiple
+	if leftOver == 0 {
+		return 0
+	}
+	toAdd := wantMultiple - leftOver
+	for toAdd < skippableFrameHeader {
+		toAdd += wantMultiple
+	}
+	return int(toAdd)
+}
+
+// skippableFrame will add a skippable frame with a total size of bytes.
+// total should be >= skippableFrameHeader and < math.MaxUint32.
+func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) {
+	if total == 0 {
+		return dst, nil
+	}
+	if total < skippableFrameHeader {
+		return dst, fmt.Errorf("requested skippable frame (%d) < 8", total)
+	}
+	if int64(total) > math.MaxUint32 {
+		return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total)
+	}
+	dst = append(dst, 0x50, 0x2a, 0x4d, 0x18)
+	f := uint32(total - skippableFrameHeader)
+	dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24))
+	start := len(dst)
+	dst = append(dst, make([]byte, f)...)
+	_, err := io.ReadFull(r, dst[start:])
+	return dst, err
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
new file mode 100644
index 0000000..e6d3d49
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
@@ -0,0 +1,385 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"errors"
+	"fmt"
+)
+
+const (
+	tablelogAbsoluteMax = 9
+)
+
+const (
+	/*!MEMORY_USAGE :
+	 *  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+	 *  Increasing memory usage improves compression ratio
+	 *  Reduced memory usage can improve speed, due to cache effect
+	 *  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+	maxMemoryUsage = tablelogAbsoluteMax + 2
+
+	maxTableLog    = maxMemoryUsage - 2
+	maxTablesize   = 1 << maxTableLog
+	maxTableMask   = (1 << maxTableLog) - 1
+	minTablelog    = 5
+	maxSymbolValue = 255
+)
+
+// fseDecoder provides temporary storage for compression and decompression.
+type fseDecoder struct {
+	dt             [maxTablesize]decSymbol // Decompression table.
+	symbolLen      uint16                  // Length of active part of the symbol table.
+	actualTableLog uint8                   // Selected tablelog.
+	maxBits        uint8                   // Maximum number of additional bits
+
+	// used for table creation to avoid allocations.
+	stateTable [256]uint16
+	norm       [maxSymbolValue + 1]int16
+	preDefined bool
+}
+
+// tableStep returns the next table index.
+func tableStep(tableSize uint32) uint32 {
+	return (tableSize >> 1) + (tableSize >> 3) + 3
+}
+
+// readNCount will read the symbol distribution so decoding tables can be constructed.
+func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error {
+	var (
+		charnum   uint16
+		previous0 bool
+	)
+	if b.remain() < 4 {
+		return errors.New("input too small")
+	}
+	bitStream := b.Uint32NC()
+	nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog
+	if nbBits > tablelogAbsoluteMax {
+		println("Invalid tablelog:", nbBits)
+		return errors.New("tableLog too large")
+	}
+	bitStream >>= 4
+	bitCount := uint(4)
+
+	s.actualTableLog = uint8(nbBits)
+	remaining := int32((1 << nbBits) + 1)
+	threshold := int32(1 << nbBits)
+	gotTotal := int32(0)
+	nbBits++
+
+	for remaining > 1 && charnum <= maxSymbol {
+		if previous0 {
+			//println("prev0")
+			n0 := charnum
+			for (bitStream & 0xFFFF) == 0xFFFF {
+				//println("24 x 0")
+				n0 += 24
+				if r := b.remain(); r > 5 {
+					b.advance(2)
+					// The check above should make sure we can read 32 bits
+					bitStream = b.Uint32NC() >> bitCount
+				} else {
+					// end of bit stream
+					bitStream >>= 16
+					bitCount += 16
+				}
+			}
+			//printf("bitstream: %d, 0b%b", bitStream&3, bitStream)
+			for (bitStream & 3) == 3 {
+				n0 += 3
+				bitStream >>= 2
+				bitCount += 2
+			}
+			n0 += uint16(bitStream & 3)
+			bitCount += 2
+
+			if n0 > maxSymbolValue {
+				return errors.New("maxSymbolValue too small")
+			}
+			//println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0)
+			for charnum < n0 {
+				s.norm[uint8(charnum)] = 0
+				charnum++
+			}
+
+			if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 {
+				b.advance(bitCount >> 3)
+				bitCount &= 7
+				// The check above should make sure we can read 32 bits
+				bitStream = b.Uint32NC() >> bitCount
+			} else {
+				bitStream >>= 2
+			}
+		}
+
+		max := (2*threshold - 1) - remaining
+		var count int32
+
+		if int32(bitStream)&(threshold-1) < max {
+			count = int32(bitStream) & (threshold - 1)
+			if debugAsserts && nbBits < 1 {
+				panic("nbBits underflow")
+			}
+			bitCount += nbBits - 1
+		} else {
+			count = int32(bitStream) & (2*threshold - 1)
+			if count >= threshold {
+				count -= max
+			}
+			bitCount += nbBits
+		}
+
+		// extra accuracy
+		count--
+		if count < 0 {
+			// -1 means +1
+			remaining += count
+			gotTotal -= count
+		} else {
+			remaining -= count
+			gotTotal += count
+		}
+		s.norm[charnum&0xff] = int16(count)
+		charnum++
+		previous0 = count == 0
+		for remaining < threshold {
+			nbBits--
+			threshold >>= 1
+		}
+
+		if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 {
+			b.advance(bitCount >> 3)
+			bitCount &= 7
+			// The check above should make sure we can read 32 bits
+			bitStream = b.Uint32NC() >> (bitCount & 31)
+		} else {
+			bitCount -= (uint)(8 * (len(b.b) - 4 - b.off))
+			b.off = len(b.b) - 4
+			bitStream = b.Uint32() >> (bitCount & 31)
+		}
+	}
+	s.symbolLen = charnum
+	if s.symbolLen <= 1 {
+		return fmt.Errorf("symbolLen (%d) too small", s.symbolLen)
+	}
+	if s.symbolLen > maxSymbolValue+1 {
+		return fmt.Errorf("symbolLen (%d) too big", s.symbolLen)
+	}
+	if remaining != 1 {
+		return fmt.Errorf("corruption detected (remaining %d != 1)", remaining)
+	}
+	if bitCount > 32 {
+		return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount)
+	}
+	if gotTotal != 1<<s.actualTableLog {
+		return fmt.Errorf("corruption detected (total %d != %d)", gotTotal, 1<<s.actualTableLog)
+	}
+	b.advance((bitCount + 7) >> 3)
+	// println(s.norm[:s.symbolLen], s.symbolLen)
+	return s.buildDtable()
+}
+
+// decSymbol contains information about a state entry,
+// Including the state offset base, the output symbol and
+// the number of bits to read for the low part of the destination state.
+// Using a composite uint64 is faster than a struct with separate members.
+type decSymbol uint64
+
+func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol {
+	return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32)
+}
+
+func (d decSymbol) nbBits() uint8 {
+	return uint8(d)
+}
+
+func (d decSymbol) addBits() uint8 {
+	return uint8(d >> 8)
+}
+
+func (d decSymbol) newState() uint16 {
+	return uint16(d >> 16)
+}
+
+func (d decSymbol) baseline() uint32 {
+	return uint32(d >> 32)
+}
+
+func (d decSymbol) baselineInt() int {
+	return int(d >> 32)
+}
+
+func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) {
+	*d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32)
+}
+
+func (d *decSymbol) setNBits(nBits uint8) {
+	const mask = 0xffffffffffffff00
+	*d = (*d & mask) | decSymbol(nBits)
+}
+
+func (d *decSymbol) setAddBits(addBits uint8) {
+	const mask = 0xffffffffffff00ff
+	*d = (*d & mask) | (decSymbol(addBits) << 8)
+}
+
+func (d *decSymbol) setNewState(state uint16) {
+	const mask = 0xffffffff0000ffff
+	*d = (*d & mask) | decSymbol(state)<<16
+}
+
+func (d *decSymbol) setBaseline(baseline uint32) {
+	const mask = 0xffffffff
+	*d = (*d & mask) | decSymbol(baseline)<<32
+}
+
+func (d *decSymbol) setExt(addBits uint8, baseline uint32) {
+	const mask = 0xffff00ff
+	*d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32)
+}
+
+// decSymbolValue returns the transformed decSymbol for the given symbol.
+func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) {
+	if int(symb) >= len(t) {
+		return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t))
+	}
+	lu := t[symb]
+	return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil
+}
+
+// setRLE will set the decoder til RLE mode.
+func (s *fseDecoder) setRLE(symbol decSymbol) {
+	s.actualTableLog = 0
+	s.maxBits = symbol.addBits()
+	s.dt[0] = symbol
+}
+
+// buildDtable will build the decoding table.
+func (s *fseDecoder) buildDtable() error {
+	tableSize := uint32(1 << s.actualTableLog)
+	highThreshold := tableSize - 1
+	symbolNext := s.stateTable[:256]
+
+	// Init, lay down lowprob symbols
+	{
+		for i, v := range s.norm[:s.symbolLen] {
+			if v == -1 {
+				s.dt[highThreshold].setAddBits(uint8(i))
+				highThreshold--
+				symbolNext[i] = 1
+			} else {
+				symbolNext[i] = uint16(v)
+			}
+		}
+	}
+	// Spread symbols
+	{
+		tableMask := tableSize - 1
+		step := tableStep(tableSize)
+		position := uint32(0)
+		for ss, v := range s.norm[:s.symbolLen] {
+			for i := 0; i < int(v); i++ {
+				s.dt[position].setAddBits(uint8(ss))
+				position = (position + step) & tableMask
+				for position > highThreshold {
+					// lowprob area
+					position = (position + step) & tableMask
+				}
+			}
+		}
+		if position != 0 {
+			// position must reach all cells once, otherwise normalizedCounter is incorrect
+			return errors.New("corrupted input (position != 0)")
+		}
+	}
+
+	// Build Decoding table
+	{
+		tableSize := uint16(1 << s.actualTableLog)
+		for u, v := range s.dt[:tableSize] {
+			symbol := v.addBits()
+			nextState := symbolNext[symbol]
+			symbolNext[symbol] = nextState + 1
+			nBits := s.actualTableLog - byte(highBits(uint32(nextState)))
+			s.dt[u&maxTableMask].setNBits(nBits)
+			newState := (nextState << nBits) - tableSize
+			if newState > tableSize {
+				return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize)
+			}
+			if newState == uint16(u) && nBits == 0 {
+				// Seems weird that this is possible with nbits > 0.
+				return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u)
+			}
+			s.dt[u&maxTableMask].setNewState(newState)
+		}
+	}
+	return nil
+}
+
+// transform will transform the decoder table into a table usable for
+// decoding without having to apply the transformation while decoding.
+// The state will contain the base value and the number of bits to read.
+func (s *fseDecoder) transform(t []baseOffset) error {
+	tableSize := uint16(1 << s.actualTableLog)
+	s.maxBits = 0
+	for i, v := range s.dt[:tableSize] {
+		add := v.addBits()
+		if int(add) >= len(t) {
+			return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t))
+		}
+		lu := t[add]
+		if lu.addBits > s.maxBits {
+			s.maxBits = lu.addBits
+		}
+		v.setExt(lu.addBits, lu.baseLine)
+		s.dt[i] = v
+	}
+	return nil
+}
+
+type fseState struct {
+	dt    []decSymbol
+	state decSymbol
+}
+
+// Initialize and decodeAsync first state and symbol.
+func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) {
+	s.dt = dt
+	br.fill()
+	s.state = dt[br.getBits(tableLog)]
+}
+
+// next returns the current symbol and sets the next state.
+// At least tablelog bits must be available in the bit reader.
+func (s *fseState) next(br *bitReader) {
+	lowBits := uint16(br.getBits(s.state.nbBits()))
+	s.state = s.dt[s.state.newState()+lowBits]
+}
+
+// finished returns true if all bits have been read from the bitstream
+// and the next state would require reading bits from the input.
+func (s *fseState) finished(br *bitReader) bool {
+	return br.finished() && s.state.nbBits() > 0
+}
+
+// final returns the current state symbol without decoding the next.
+func (s *fseState) final() (int, uint8) {
+	return s.state.baselineInt(), s.state.addBits()
+}
+
+// final returns the current state symbol without decoding the next.
+func (s decSymbol) final() (int, uint8) {
+	return s.baselineInt(), s.addBits()
+}
+
+// nextFast returns the next symbol and sets the next state.
+// This can only be used if no symbols are 0 bits.
+// At least tablelog bits must be available in the bit reader.
+func (s *fseState) nextFast(br *bitReader) (uint32, uint8) {
+	lowBits := uint16(br.getBitsFast(s.state.nbBits()))
+	s.state = s.dt[s.state.newState()+lowBits]
+	return s.state.baseline(), s.state.addBits()
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
new file mode 100644
index 0000000..c74681b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
@@ -0,0 +1,725 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"errors"
+	"fmt"
+	"math"
+)
+
+const (
+	// For encoding we only support up to
+	maxEncTableLog    = 8
+	maxEncTablesize   = 1 << maxTableLog
+	maxEncTableMask   = (1 << maxTableLog) - 1
+	minEncTablelog    = 5
+	maxEncSymbolValue = maxMatchLengthSymbol
+)
+
+// Scratch provides temporary storage for compression and decompression.
+type fseEncoder struct {
+	symbolLen      uint16 // Length of active part of the symbol table.
+	actualTableLog uint8  // Selected tablelog.
+	ct             cTable // Compression tables.
+	maxCount       int    // count of the most probable symbol
+	zeroBits       bool   // no bits has prob > 50%.
+	clearCount     bool   // clear count
+	useRLE         bool   // This encoder is for RLE
+	preDefined     bool   // This encoder is predefined.
+	reUsed         bool   // Set to know when the encoder has been reused.
+	rleVal         uint8  // RLE Symbol
+	maxBits        uint8  // Maximum output bits after transform.
+
+	// TODO: Technically zstd should be fine with 64 bytes.
+	count [256]uint32
+	norm  [256]int16
+}
+
+// cTable contains tables used for compression.
+type cTable struct {
+	tableSymbol []byte
+	stateTable  []uint16
+	symbolTT    []symbolTransform
+}
+
+// symbolTransform contains the state transform for a symbol.
+type symbolTransform struct {
+	deltaNbBits    uint32
+	deltaFindState int16
+	outBits        uint8
+}
+
+// String prints values as a human readable string.
+func (s symbolTransform) String() string {
+	return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits)
+}
+
+// Histogram allows to populate the histogram and skip that step in the compression,
+// It otherwise allows to inspect the histogram when compression is done.
+// To indicate that you have populated the histogram call HistogramFinished
+// with the value of the highest populated symbol, as well as the number of entries
+// in the most populated entry. These are accepted at face value.
+// The returned slice will always be length 256.
+func (s *fseEncoder) Histogram() []uint32 {
+	return s.count[:]
+}
+
+// HistogramFinished can be called to indicate that the histogram has been populated.
+// maxSymbol is the index of the highest set symbol of the next data segment.
+// maxCount is the number of entries in the most populated entry.
+// These are accepted at face value.
+func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) {
+	s.maxCount = maxCount
+	s.symbolLen = uint16(maxSymbol) + 1
+	s.clearCount = maxCount != 0
+}
+
+// prepare will prepare and allocate scratch tables used for both compression and decompression.
+func (s *fseEncoder) prepare() (*fseEncoder, error) {
+	if s == nil {
+		s = &fseEncoder{}
+	}
+	s.useRLE = false
+	if s.clearCount && s.maxCount == 0 {
+		for i := range s.count {
+			s.count[i] = 0
+		}
+		s.clearCount = false
+	}
+	return s, nil
+}
+
+// allocCtable will allocate tables needed for compression.
+// If existing tables a re big enough, they are simply re-used.
+func (s *fseEncoder) allocCtable() {
+	tableSize := 1 << s.actualTableLog
+	// get tableSymbol that is big enough.
+	if cap(s.ct.tableSymbol) < tableSize {
+		s.ct.tableSymbol = make([]byte, tableSize)
+	}
+	s.ct.tableSymbol = s.ct.tableSymbol[:tableSize]
+
+	ctSize := tableSize
+	if cap(s.ct.stateTable) < ctSize {
+		s.ct.stateTable = make([]uint16, ctSize)
+	}
+	s.ct.stateTable = s.ct.stateTable[:ctSize]
+
+	if cap(s.ct.symbolTT) < 256 {
+		s.ct.symbolTT = make([]symbolTransform, 256)
+	}
+	s.ct.symbolTT = s.ct.symbolTT[:256]
+}
+
+// buildCTable will populate the compression table so it is ready to be used.
+func (s *fseEncoder) buildCTable() error {
+	tableSize := uint32(1 << s.actualTableLog)
+	highThreshold := tableSize - 1
+	var cumul [256]int16
+
+	s.allocCtable()
+	tableSymbol := s.ct.tableSymbol[:tableSize]
+	// symbol start positions
+	{
+		cumul[0] = 0
+		for ui, v := range s.norm[:s.symbolLen-1] {
+			u := byte(ui) // one less than reference
+			if v == -1 {
+				// Low proba symbol
+				cumul[u+1] = cumul[u] + 1
+				tableSymbol[highThreshold] = u
+				highThreshold--
+			} else {
+				cumul[u+1] = cumul[u] + v
+			}
+		}
+		// Encode last symbol separately to avoid overflowing u
+		u := int(s.symbolLen - 1)
+		v := s.norm[s.symbolLen-1]
+		if v == -1 {
+			// Low proba symbol
+			cumul[u+1] = cumul[u] + 1
+			tableSymbol[highThreshold] = byte(u)
+			highThreshold--
+		} else {
+			cumul[u+1] = cumul[u] + v
+		}
+		if uint32(cumul[s.symbolLen]) != tableSize {
+			return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize)
+		}
+		cumul[s.symbolLen] = int16(tableSize) + 1
+	}
+	// Spread symbols
+	s.zeroBits = false
+	{
+		step := tableStep(tableSize)
+		tableMask := tableSize - 1
+		var position uint32
+		// if any symbol > largeLimit, we may have 0 bits output.
+		largeLimit := int16(1 << (s.actualTableLog - 1))
+		for ui, v := range s.norm[:s.symbolLen] {
+			symbol := byte(ui)
+			if v > largeLimit {
+				s.zeroBits = true
+			}
+			for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ {
+				tableSymbol[position] = symbol
+				position = (position + step) & tableMask
+				for position > highThreshold {
+					position = (position + step) & tableMask
+				} /* Low proba area */
+			}
+		}
+
+		// Check if we have gone through all positions
+		if position != 0 {
+			return errors.New("position!=0")
+		}
+	}
+
+	// Build table
+	table := s.ct.stateTable
+	{
+		tsi := int(tableSize)
+		for u, v := range tableSymbol {
+			// TableU16 : sorted by symbol order; gives next state value
+			table[cumul[v]] = uint16(tsi + u)
+			cumul[v]++
+		}
+	}
+
+	// Build Symbol Transformation Table
+	{
+		total := int16(0)
+		symbolTT := s.ct.symbolTT[:s.symbolLen]
+		tableLog := s.actualTableLog
+		tl := (uint32(tableLog) << 16) - (1 << tableLog)
+		for i, v := range s.norm[:s.symbolLen] {
+			switch v {
+			case 0:
+			case -1, 1:
+				symbolTT[i].deltaNbBits = tl
+				symbolTT[i].deltaFindState = total - 1
+				total++
+			default:
+				maxBitsOut := uint32(tableLog) - highBit(uint32(v-1))
+				minStatePlus := uint32(v) << maxBitsOut
+				symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus
+				symbolTT[i].deltaFindState = total - v
+				total += v
+			}
+		}
+		if total != int16(tableSize) {
+			return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize)
+		}
+	}
+	return nil
+}
+
+var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000}
+
+func (s *fseEncoder) setRLE(val byte) {
+	s.allocCtable()
+	s.actualTableLog = 0
+	s.ct.stateTable = s.ct.stateTable[:1]
+	s.ct.symbolTT[val] = symbolTransform{
+		deltaFindState: 0,
+		deltaNbBits:    0,
+	}
+	if debug {
+		println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val])
+	}
+	s.rleVal = val
+	s.useRLE = true
+}
+
+// setBits will set output bits for the transform.
+// if nil is provided, the number of bits is equal to the index.
+func (s *fseEncoder) setBits(transform []byte) {
+	if s.reUsed || s.preDefined {
+		return
+	}
+	if s.useRLE {
+		if transform == nil {
+			s.ct.symbolTT[s.rleVal].outBits = s.rleVal
+			s.maxBits = s.rleVal
+			return
+		}
+		s.maxBits = transform[s.rleVal]
+		s.ct.symbolTT[s.rleVal].outBits = s.maxBits
+		return
+	}
+	if transform == nil {
+		for i := range s.ct.symbolTT[:s.symbolLen] {
+			s.ct.symbolTT[i].outBits = uint8(i)
+		}
+		s.maxBits = uint8(s.symbolLen - 1)
+		return
+	}
+	s.maxBits = 0
+	for i, v := range transform[:s.symbolLen] {
+		s.ct.symbolTT[i].outBits = v
+		if v > s.maxBits {
+			// We could assume bits always going up, but we play safe.
+			s.maxBits = v
+		}
+	}
+}
+
+// normalizeCount will normalize the count of the symbols so
+// the total is equal to the table size.
+// If successful, compression tables will also be made ready.
+func (s *fseEncoder) normalizeCount(length int) error {
+	if s.reUsed {
+		return nil
+	}
+	s.optimalTableLog(length)
+	var (
+		tableLog          = s.actualTableLog
+		scale             = 62 - uint64(tableLog)
+		step              = (1 << 62) / uint64(length)
+		vStep             = uint64(1) << (scale - 20)
+		stillToDistribute = int16(1 << tableLog)
+		largest           int
+		largestP          int16
+		lowThreshold      = (uint32)(length >> tableLog)
+	)
+	if s.maxCount == length {
+		s.useRLE = true
+		return nil
+	}
+	s.useRLE = false
+	for i, cnt := range s.count[:s.symbolLen] {
+		// already handled
+		// if (count[s] == s.length) return 0;   /* rle special case */
+
+		if cnt == 0 {
+			s.norm[i] = 0
+			continue
+		}
+		if cnt <= lowThreshold {
+			s.norm[i] = -1
+			stillToDistribute--
+		} else {
+			proba := (int16)((uint64(cnt) * step) >> scale)
+			if proba < 8 {
+				restToBeat := vStep * uint64(rtbTable[proba])
+				v := uint64(cnt)*step - (uint64(proba) << scale)
+				if v > restToBeat {
+					proba++
+				}
+			}
+			if proba > largestP {
+				largestP = proba
+				largest = i
+			}
+			s.norm[i] = proba
+			stillToDistribute -= proba
+		}
+	}
+
+	if -stillToDistribute >= (s.norm[largest] >> 1) {
+		// corner case, need another normalization method
+		err := s.normalizeCount2(length)
+		if err != nil {
+			return err
+		}
+		if debugAsserts {
+			err = s.validateNorm()
+			if err != nil {
+				return err
+			}
+		}
+		return s.buildCTable()
+	}
+	s.norm[largest] += stillToDistribute
+	if debugAsserts {
+		err := s.validateNorm()
+		if err != nil {
+			return err
+		}
+	}
+	return s.buildCTable()
+}
+
+// Secondary normalization method.
+// To be used when primary method fails.
+func (s *fseEncoder) normalizeCount2(length int) error {
+	const notYetAssigned = -2
+	var (
+		distributed  uint32
+		total        = uint32(length)
+		tableLog     = s.actualTableLog
+		lowThreshold = total >> tableLog
+		lowOne       = (total * 3) >> (tableLog + 1)
+	)
+	for i, cnt := range s.count[:s.symbolLen] {
+		if cnt == 0 {
+			s.norm[i] = 0
+			continue
+		}
+		if cnt <= lowThreshold {
+			s.norm[i] = -1
+			distributed++
+			total -= cnt
+			continue
+		}
+		if cnt <= lowOne {
+			s.norm[i] = 1
+			distributed++
+			total -= cnt
+			continue
+		}
+		s.norm[i] = notYetAssigned
+	}
+	toDistribute := (1 << tableLog) - distributed
+
+	if (total / toDistribute) > lowOne {
+		// risk of rounding to zero
+		lowOne = (total * 3) / (toDistribute * 2)
+		for i, cnt := range s.count[:s.symbolLen] {
+			if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) {
+				s.norm[i] = 1
+				distributed++
+				total -= cnt
+				continue
+			}
+		}
+		toDistribute = (1 << tableLog) - distributed
+	}
+	if distributed == uint32(s.symbolLen)+1 {
+		// all values are pretty poor;
+		//   probably incompressible data (should have already been detected);
+		//   find max, then give all remaining points to max
+		var maxV int
+		var maxC uint32
+		for i, cnt := range s.count[:s.symbolLen] {
+			if cnt > maxC {
+				maxV = i
+				maxC = cnt
+			}
+		}
+		s.norm[maxV] += int16(toDistribute)
+		return nil
+	}
+
+	if total == 0 {
+		// all of the symbols were low enough for the lowOne or lowThreshold
+		for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) {
+			if s.norm[i] > 0 {
+				toDistribute--
+				s.norm[i]++
+			}
+		}
+		return nil
+	}
+
+	var (
+		vStepLog = 62 - uint64(tableLog)
+		mid      = uint64((1 << (vStepLog - 1)) - 1)
+		rStep    = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining
+		tmpTotal = mid
+	)
+	for i, cnt := range s.count[:s.symbolLen] {
+		if s.norm[i] == notYetAssigned {
+			var (
+				end    = tmpTotal + uint64(cnt)*rStep
+				sStart = uint32(tmpTotal >> vStepLog)
+				sEnd   = uint32(end >> vStepLog)
+				weight = sEnd - sStart
+			)
+			if weight < 1 {
+				return errors.New("weight < 1")
+			}
+			s.norm[i] = int16(weight)
+			tmpTotal = end
+		}
+	}
+	return nil
+}
+
+// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog
+func (s *fseEncoder) optimalTableLog(length int) {
+	tableLog := uint8(maxEncTableLog)
+	minBitsSrc := highBit(uint32(length)) + 1
+	minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2
+	minBits := uint8(minBitsSymbols)
+	if minBitsSrc < minBitsSymbols {
+		minBits = uint8(minBitsSrc)
+	}
+
+	maxBitsSrc := uint8(highBit(uint32(length-1))) - 2
+	if maxBitsSrc < tableLog {
+		// Accuracy can be reduced
+		tableLog = maxBitsSrc
+	}
+	if minBits > tableLog {
+		tableLog = minBits
+	}
+	// Need a minimum to safely represent all symbol values
+	if tableLog < minEncTablelog {
+		tableLog = minEncTablelog
+	}
+	if tableLog > maxEncTableLog {
+		tableLog = maxEncTableLog
+	}
+	s.actualTableLog = tableLog
+}
+
+// validateNorm validates the normalized histogram table.
+func (s *fseEncoder) validateNorm() (err error) {
+	var total int
+	for _, v := range s.norm[:s.symbolLen] {
+		if v >= 0 {
+			total += int(v)
+		} else {
+			total -= int(v)
+		}
+	}
+	defer func() {
+		if err == nil {
+			return
+		}
+		fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen)
+		for i, v := range s.norm[:s.symbolLen] {
+			fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v)
+		}
+	}()
+	if total != (1 << s.actualTableLog) {
+		return fmt.Errorf("warning: Total == %d != %d", total, 1<<s.actualTableLog)
+	}
+	for i, v := range s.count[s.symbolLen:] {
+		if v != 0 {
+			return fmt.Errorf("warning: Found symbol out of range, %d after cut", i)
+		}
+	}
+	return nil
+}
+
+// writeCount will write the normalized histogram count to header.
+// This is read back by readNCount.
+func (s *fseEncoder) writeCount(out []byte) ([]byte, error) {
+	if s.useRLE {
+		return append(out, s.rleVal), nil
+	}
+	if s.preDefined || s.reUsed {
+		// Never write predefined.
+		return out, nil
+	}
+
+	var (
+		tableLog  = s.actualTableLog
+		tableSize = 1 << tableLog
+		previous0 bool
+		charnum   uint16
+
+		// maximum header size plus 2 extra bytes for final output if bitCount == 0.
+		maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 + 2
+
+		// Write Table Size
+		bitStream = uint32(tableLog - minEncTablelog)
+		bitCount  = uint(4)
+		remaining = int16(tableSize + 1) /* +1 for extra accuracy */
+		threshold = int16(tableSize)
+		nbBits    = uint(tableLog + 1)
+		outP      = len(out)
+	)
+	if cap(out) < outP+maxHeaderSize {
+		out = append(out, make([]byte, maxHeaderSize*3)...)
+		out = out[:len(out)-maxHeaderSize*3]
+	}
+	out = out[:outP+maxHeaderSize]
+
+	// stops at 1
+	for remaining > 1 {
+		if previous0 {
+			start := charnum
+			for s.norm[charnum] == 0 {
+				charnum++
+			}
+			for charnum >= start+24 {
+				start += 24
+				bitStream += uint32(0xFFFF) << bitCount
+				out[outP] = byte(bitStream)
+				out[outP+1] = byte(bitStream >> 8)
+				outP += 2
+				bitStream >>= 16
+			}
+			for charnum >= start+3 {
+				start += 3
+				bitStream += 3 << bitCount
+				bitCount += 2
+			}
+			bitStream += uint32(charnum-start) << bitCount
+			bitCount += 2
+			if bitCount > 16 {
+				out[outP] = byte(bitStream)
+				out[outP+1] = byte(bitStream >> 8)
+				outP += 2
+				bitStream >>= 16
+				bitCount -= 16
+			}
+		}
+
+		count := s.norm[charnum]
+		charnum++
+		max := (2*threshold - 1) - remaining
+		if count < 0 {
+			remaining += count
+		} else {
+			remaining -= count
+		}
+		count++ // +1 for extra accuracy
+		if count >= threshold {
+			count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[
+		}
+		bitStream += uint32(count) << bitCount
+		bitCount += nbBits
+		if count < max {
+			bitCount--
+		}
+
+		previous0 = count == 1
+		if remaining < 1 {
+			return nil, errors.New("internal error: remaining < 1")
+		}
+		for remaining < threshold {
+			nbBits--
+			threshold >>= 1
+		}
+
+		if bitCount > 16 {
+			out[outP] = byte(bitStream)
+			out[outP+1] = byte(bitStream >> 8)
+			outP += 2
+			bitStream >>= 16
+			bitCount -= 16
+		}
+	}
+
+	if outP+2 > len(out) {
+		return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen])
+	}
+	out[outP] = byte(bitStream)
+	out[outP+1] = byte(bitStream >> 8)
+	outP += int((bitCount + 7) / 8)
+
+	if charnum > s.symbolLen {
+		return nil, errors.New("internal error: charnum > s.symbolLen")
+	}
+	return out[:outP], nil
+}
+
+// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits)
+// note 1 : assume symbolValue is valid (<= maxSymbolValue)
+// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits *
+func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 {
+	minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16
+	threshold := (minNbBits + 1) << 16
+	if debugAsserts {
+		if !(s.actualTableLog < 16) {
+			panic("!s.actualTableLog < 16")
+		}
+		// ensure enough room for renormalization double shift
+		if !(uint8(accuracyLog) < 31-s.actualTableLog) {
+			panic("!uint8(accuracyLog) < 31-s.actualTableLog")
+		}
+	}
+	tableSize := uint32(1) << s.actualTableLog
+	deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize)
+	// linear interpolation (very approximate)
+	normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog
+	bitMultiplier := uint32(1) << accuracyLog
+	if debugAsserts {
+		if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold {
+			panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold")
+		}
+		if normalizedDeltaFromThreshold > bitMultiplier {
+			panic("normalizedDeltaFromThreshold > bitMultiplier")
+		}
+	}
+	return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold
+}
+
+// Returns the cost in bits of encoding the distribution in count using ctable.
+// Histogram should only be up to the last non-zero symbol.
+// Returns an -1 if ctable cannot represent all the symbols in count.
+func (s *fseEncoder) approxSize(hist []uint32) uint32 {
+	if int(s.symbolLen) < len(hist) {
+		// More symbols than we have.
+		return math.MaxUint32
+	}
+	if s.useRLE {
+		// We will never reuse RLE encoders.
+		return math.MaxUint32
+	}
+	const kAccuracyLog = 8
+	badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog
+	var cost uint32
+	for i, v := range hist {
+		if v == 0 {
+			continue
+		}
+		if s.norm[i] == 0 {
+			return math.MaxUint32
+		}
+		bitCost := s.bitCost(uint8(i), kAccuracyLog)
+		if bitCost > badCost {
+			return math.MaxUint32
+		}
+		cost += v * bitCost
+	}
+	return cost >> kAccuracyLog
+}
+
+// maxHeaderSize returns the maximum header size in bits.
+// This is not exact size, but we want a penalty for new tables anyway.
+func (s *fseEncoder) maxHeaderSize() uint32 {
+	if s.preDefined {
+		return 0
+	}
+	if s.useRLE {
+		return 8
+	}
+	return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8
+}
+
+// cState contains the compression state of a stream.
+type cState struct {
+	bw         *bitWriter
+	stateTable []uint16
+	state      uint16
+}
+
+// init will initialize the compression state to the first symbol of the stream.
+func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) {
+	c.bw = bw
+	c.stateTable = ct.stateTable
+	if len(c.stateTable) == 1 {
+		// RLE
+		c.stateTable[0] = uint16(0)
+		c.state = 0
+		return
+	}
+	nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16
+	im := int32((nbBitsOut << 16) - first.deltaNbBits)
+	lu := (im >> nbBitsOut) + int32(first.deltaFindState)
+	c.state = c.stateTable[lu]
+}
+
+// encode the output symbol provided and write it to the bitstream.
+func (c *cState) encode(symbolTT symbolTransform) {
+	nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
+	dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState)
+	c.bw.addBits16NC(c.state, uint8(nbBitsOut))
+	c.state = c.stateTable[dstState]
+}
+
+// flush will write the tablelog to the output and flush the remaining full bytes.
+func (c *cState) flush(tableLog uint8) {
+	c.bw.flush32()
+	c.bw.addBits16NC(c.state, tableLog)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go
new file mode 100644
index 0000000..474cb77
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go
@@ -0,0 +1,158 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"fmt"
+	"math"
+	"sync"
+)
+
+var (
+	// fsePredef are the predefined fse tables as defined here:
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions
+	// These values are already transformed.
+	fsePredef [3]fseDecoder
+
+	// fsePredefEnc are the predefined encoder based on fse tables as defined here:
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions
+	// These values are already transformed.
+	fsePredefEnc [3]fseEncoder
+
+	// symbolTableX contain the transformations needed for each type as defined in
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets
+	symbolTableX [3][]baseOffset
+
+	// maxTableSymbol is the biggest supported symbol for each table type
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets
+	maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol}
+
+	// bitTables is the bits table for each table.
+	bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]}
+)
+
+type tableIndex uint8
+
+const (
+	// indexes for fsePredef and symbolTableX
+	tableLiteralLengths tableIndex = 0
+	tableOffsets        tableIndex = 1
+	tableMatchLengths   tableIndex = 2
+
+	maxLiteralLengthSymbol = 35
+	maxOffsetLengthSymbol  = 30
+	maxMatchLengthSymbol   = 52
+)
+
+// baseOffset is used for calculating transformations.
+type baseOffset struct {
+	baseLine uint32
+	addBits  uint8
+}
+
+// fillBase will precalculate base offsets with the given bit distributions.
+func fillBase(dst []baseOffset, base uint32, bits ...uint8) {
+	if len(bits) != len(dst) {
+		panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits)))
+	}
+	for i, bit := range bits {
+		if base > math.MaxInt32 {
+			panic("invalid decoding table, base overflows int32")
+		}
+
+		dst[i] = baseOffset{
+			baseLine: base,
+			addBits:  bit,
+		}
+		base += 1 << bit
+	}
+}
+
+var predef sync.Once
+
+func initPredefined() {
+	predef.Do(func() {
+		// Literals length codes
+		tmp := make([]baseOffset, 36)
+		for i := range tmp[:16] {
+			tmp[i] = baseOffset{
+				baseLine: uint32(i),
+				addBits:  0,
+			}
+		}
+		fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
+		symbolTableX[tableLiteralLengths] = tmp
+
+		// Match length codes
+		tmp = make([]baseOffset, 53)
+		for i := range tmp[:32] {
+			tmp[i] = baseOffset{
+				// The transformation adds the 3 length.
+				baseLine: uint32(i) + 3,
+				addBits:  0,
+			}
+		}
+		fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
+		symbolTableX[tableMatchLengths] = tmp
+
+		// Offset codes
+		tmp = make([]baseOffset, maxOffsetBits+1)
+		tmp[1] = baseOffset{
+			baseLine: 1,
+			addBits:  1,
+		}
+		fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30)
+		symbolTableX[tableOffsets] = tmp
+
+		// Fill predefined tables and transform them.
+		// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions
+		for i := range fsePredef[:] {
+			f := &fsePredef[i]
+			switch tableIndex(i) {
+			case tableLiteralLengths:
+				// https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243
+				f.actualTableLog = 6
+				copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
+					2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
+					-1, -1, -1, -1})
+				f.symbolLen = 36
+			case tableOffsets:
+				// https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281
+				f.actualTableLog = 5
+				copy(f.norm[:], []int16{
+					1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+					1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1})
+				f.symbolLen = 29
+			case tableMatchLengths:
+				//https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304
+				f.actualTableLog = 6
+				copy(f.norm[:], []int16{
+					1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+					1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+					1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1,
+					-1, -1, -1, -1, -1})
+				f.symbolLen = 53
+			}
+			if err := f.buildDtable(); err != nil {
+				panic(fmt.Errorf("building table %v: %v", tableIndex(i), err))
+			}
+			if err := f.transform(symbolTableX[i]); err != nil {
+				panic(fmt.Errorf("building table %v: %v", tableIndex(i), err))
+			}
+			f.preDefined = true
+
+			// Create encoder as well
+			enc := &fsePredefEnc[i]
+			copy(enc.norm[:], f.norm[:])
+			enc.symbolLen = f.symbolLen
+			enc.actualTableLog = f.actualTableLog
+			if err := enc.buildCTable(); err != nil {
+				panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err))
+			}
+			enc.setBits(bitTables[i])
+			enc.preDefined = true
+		}
+	})
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go
new file mode 100644
index 0000000..4a75206
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/hash.go
@@ -0,0 +1,77 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+const (
+	prime3bytes = 506832829
+	prime4bytes = 2654435761
+	prime5bytes = 889523592379
+	prime6bytes = 227718039650203
+	prime7bytes = 58295818150454627
+	prime8bytes = 0xcf1bbcdcb7a56463
+)
+
+// hashLen returns a hash of the lowest l bytes of u for a size size of h bytes.
+// l must be >=4 and <=8. Any other value will return hash for 4 bytes.
+// h should always be <32.
+// Preferably h and l should be a constant.
+// FIXME: This does NOT get resolved, if 'mls' is constant,
+//  so this cannot be used.
+func hashLen(u uint64, hashLog, mls uint8) uint32 {
+	switch mls {
+	case 5:
+		return hash5(u, hashLog)
+	case 6:
+		return hash6(u, hashLog)
+	case 7:
+		return hash7(u, hashLog)
+	case 8:
+		return hash8(u, hashLog)
+	default:
+		return hash4x64(u, hashLog)
+	}
+}
+
+// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <32.
+func hash3(u uint32, h uint8) uint32 {
+	return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31)
+}
+
+// hash4 returns the hash of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <32.
+func hash4(u uint32, h uint8) uint32 {
+	return (u * prime4bytes) >> ((32 - h) & 31)
+}
+
+// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <32.
+func hash4x64(u uint64, h uint8) uint32 {
+	return (uint32(u) * prime4bytes) >> ((32 - h) & 31)
+}
+
+// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash5(u uint64, h uint8) uint32 {
+	return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63))
+}
+
+// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash6(u uint64, h uint8) uint32 {
+	return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63))
+}
+
+// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash7(u uint64, h uint8) uint32 {
+	return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63))
+}
+
+// hash8 returns the hash of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash8(u uint64, h uint8) uint32 {
+	return uint32((u * prime8bytes) >> ((64 - h) & 63))
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go
new file mode 100644
index 0000000..f783e32
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/history.go
@@ -0,0 +1,89 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"github.com/klauspost/compress/huff0"
+)
+
+// history contains the information transferred between blocks.
+type history struct {
+	b             []byte
+	huffTree      *huff0.Scratch
+	recentOffsets [3]int
+	decoders      sequenceDecs
+	windowSize    int
+	maxSize       int
+	error         bool
+	dict          *dict
+}
+
+// reset will reset the history to initial state of a frame.
+// The history must already have been initialized to the desired size.
+func (h *history) reset() {
+	h.b = h.b[:0]
+	h.error = false
+	h.recentOffsets = [3]int{1, 4, 8}
+	if f := h.decoders.litLengths.fse; f != nil && !f.preDefined {
+		fseDecoderPool.Put(f)
+	}
+	if f := h.decoders.offsets.fse; f != nil && !f.preDefined {
+		fseDecoderPool.Put(f)
+	}
+	if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined {
+		fseDecoderPool.Put(f)
+	}
+	h.decoders = sequenceDecs{}
+	if h.huffTree != nil {
+		if h.dict == nil || h.dict.litEnc != h.huffTree {
+			huffDecoderPool.Put(h.huffTree)
+		}
+	}
+	h.huffTree = nil
+	h.dict = nil
+	//printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b))
+}
+
+func (h *history) setDict(dict *dict) {
+	if dict == nil {
+		return
+	}
+	h.dict = dict
+	h.decoders.litLengths = dict.llDec
+	h.decoders.offsets = dict.ofDec
+	h.decoders.matchLengths = dict.mlDec
+	h.recentOffsets = dict.offsets
+	h.huffTree = dict.litEnc
+}
+
+// append bytes to history.
+// This function will make sure there is space for it,
+// if the buffer has been allocated with enough extra space.
+func (h *history) append(b []byte) {
+	if len(b) >= h.windowSize {
+		// Discard all history by simply overwriting
+		h.b = h.b[:h.windowSize]
+		copy(h.b, b[len(b)-h.windowSize:])
+		return
+	}
+
+	// If there is space, append it.
+	if len(b) < cap(h.b)-len(h.b) {
+		h.b = append(h.b, b...)
+		return
+	}
+
+	// Move data down so we only have window size left.
+	// We know we have less than window size in b at this point.
+	discard := len(b) + len(h.b) - h.windowSize
+	copy(h.b, h.b[discard:])
+	h.b = h.b[:h.windowSize]
+	copy(h.b[h.windowSize-len(b):], b)
+}
+
+// append bytes to history without ever discarding anything.
+func (h *history) appendKeep(b []byte) {
+	h.b = append(h.b, b...)
+}
diff --git a/vendor/github.com/cespare/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt
similarity index 100%
rename from vendor/github.com/cespare/xxhash/LICENSE.txt
rename to vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
new file mode 100644
index 0000000..69aa3bb
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
@@ -0,0 +1,58 @@
+# xxhash
+
+VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package.
+
+
+[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
+[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash)
+
+xxhash is a Go implementation of the 64-bit
+[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
+high-quality hashing algorithm that is much faster than anything in the Go
+standard library.
+
+This package provides a straightforward API:
+
+```
+func Sum64(b []byte) uint64
+func Sum64String(s string) uint64
+type Digest struct{ ... }
+    func New() *Digest
+```
+
+The `Digest` type implements hash.Hash64. Its key methods are:
+
+```
+func (*Digest) Write([]byte) (int, error)
+func (*Digest) WriteString(string) (int, error)
+func (*Digest) Sum64() uint64
+```
+
+This implementation provides a fast pure-Go implementation and an even faster
+assembly implementation for amd64.
+
+## Benchmarks
+
+Here are some quick benchmarks comparing the pure-Go and assembly
+implementations of Sum64.
+
+| input size | purego | asm |
+| --- | --- | --- |
+| 5 B   |  979.66 MB/s |  1291.17 MB/s  |
+| 100 B | 7475.26 MB/s | 7973.40 MB/s  |
+| 4 KB  | 17573.46 MB/s | 17602.65 MB/s |
+| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
+
+These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
+the following commands under Go 1.11.2:
+
+```
+$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
+$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
+```
+
+## Projects using this package
+
+- [InfluxDB](https://github.com/influxdata/influxdb)
+- [Prometheus](https://github.com/prometheus/prometheus)
+- [FreeCache](https://github.com/coocood/freecache)
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
new file mode 100644
index 0000000..426b9ca
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
@@ -0,0 +1,238 @@
+// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
+// at http://cyan4973.github.io/xxHash/.
+// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package.
+
+package xxhash
+
+import (
+	"encoding/binary"
+	"errors"
+	"math/bits"
+)
+
+const (
+	prime1 uint64 = 11400714785074694791
+	prime2 uint64 = 14029467366897019727
+	prime3 uint64 = 1609587929392839161
+	prime4 uint64 = 9650029242287828579
+	prime5 uint64 = 2870177450012600261
+)
+
+// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
+// possible in the Go code is worth a small (but measurable) performance boost
+// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
+// convenience in the Go code in a few places where we need to intentionally
+// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
+// result overflows a uint64).
+var (
+	prime1v = prime1
+	prime2v = prime2
+	prime3v = prime3
+	prime4v = prime4
+	prime5v = prime5
+)
+
+// Digest implements hash.Hash64.
+type Digest struct {
+	v1    uint64
+	v2    uint64
+	v3    uint64
+	v4    uint64
+	total uint64
+	mem   [32]byte
+	n     int // how much of mem is used
+}
+
+// New creates a new Digest that computes the 64-bit xxHash algorithm.
+func New() *Digest {
+	var d Digest
+	d.Reset()
+	return &d
+}
+
+// Reset clears the Digest's state so that it can be reused.
+func (d *Digest) Reset() {
+	d.v1 = prime1v + prime2
+	d.v2 = prime2
+	d.v3 = 0
+	d.v4 = -prime1v
+	d.total = 0
+	d.n = 0
+}
+
+// Size always returns 8 bytes.
+func (d *Digest) Size() int { return 8 }
+
+// BlockSize always returns 32 bytes.
+func (d *Digest) BlockSize() int { return 32 }
+
+// Write adds more data to d. It always returns len(b), nil.
+func (d *Digest) Write(b []byte) (n int, err error) {
+	n = len(b)
+	d.total += uint64(n)
+
+	if d.n+n < 32 {
+		// This new data doesn't even fill the current block.
+		copy(d.mem[d.n:], b)
+		d.n += n
+		return
+	}
+
+	if d.n > 0 {
+		// Finish off the partial block.
+		copy(d.mem[d.n:], b)
+		d.v1 = round(d.v1, u64(d.mem[0:8]))
+		d.v2 = round(d.v2, u64(d.mem[8:16]))
+		d.v3 = round(d.v3, u64(d.mem[16:24]))
+		d.v4 = round(d.v4, u64(d.mem[24:32]))
+		b = b[32-d.n:]
+		d.n = 0
+	}
+
+	if len(b) >= 32 {
+		// One or more full blocks left.
+		nw := writeBlocks(d, b)
+		b = b[nw:]
+	}
+
+	// Store any remaining partial block.
+	copy(d.mem[:], b)
+	d.n = len(b)
+
+	return
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+func (d *Digest) Sum(b []byte) []byte {
+	s := d.Sum64()
+	return append(
+		b,
+		byte(s>>56),
+		byte(s>>48),
+		byte(s>>40),
+		byte(s>>32),
+		byte(s>>24),
+		byte(s>>16),
+		byte(s>>8),
+		byte(s),
+	)
+}
+
+// Sum64 returns the current hash.
+func (d *Digest) Sum64() uint64 {
+	var h uint64
+
+	if d.total >= 32 {
+		v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+		h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+		h = mergeRound(h, v1)
+		h = mergeRound(h, v2)
+		h = mergeRound(h, v3)
+		h = mergeRound(h, v4)
+	} else {
+		h = d.v3 + prime5
+	}
+
+	h += d.total
+
+	i, end := 0, d.n
+	for ; i+8 <= end; i += 8 {
+		k1 := round(0, u64(d.mem[i:i+8]))
+		h ^= k1
+		h = rol27(h)*prime1 + prime4
+	}
+	if i+4 <= end {
+		h ^= uint64(u32(d.mem[i:i+4])) * prime1
+		h = rol23(h)*prime2 + prime3
+		i += 4
+	}
+	for i < end {
+		h ^= uint64(d.mem[i]) * prime5
+		h = rol11(h) * prime1
+		i++
+	}
+
+	h ^= h >> 33
+	h *= prime2
+	h ^= h >> 29
+	h *= prime3
+	h ^= h >> 32
+
+	return h
+}
+
+const (
+	magic         = "xxh\x06"
+	marshaledSize = len(magic) + 8*5 + 32
+)
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (d *Digest) MarshalBinary() ([]byte, error) {
+	b := make([]byte, 0, marshaledSize)
+	b = append(b, magic...)
+	b = appendUint64(b, d.v1)
+	b = appendUint64(b, d.v2)
+	b = appendUint64(b, d.v3)
+	b = appendUint64(b, d.v4)
+	b = appendUint64(b, d.total)
+	b = append(b, d.mem[:d.n]...)
+	b = b[:len(b)+len(d.mem)-d.n]
+	return b, nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+func (d *Digest) UnmarshalBinary(b []byte) error {
+	if len(b) < len(magic) || string(b[:len(magic)]) != magic {
+		return errors.New("xxhash: invalid hash state identifier")
+	}
+	if len(b) != marshaledSize {
+		return errors.New("xxhash: invalid hash state size")
+	}
+	b = b[len(magic):]
+	b, d.v1 = consumeUint64(b)
+	b, d.v2 = consumeUint64(b)
+	b, d.v3 = consumeUint64(b)
+	b, d.v4 = consumeUint64(b)
+	b, d.total = consumeUint64(b)
+	copy(d.mem[:], b)
+	b = b[len(d.mem):]
+	d.n = int(d.total % uint64(len(d.mem)))
+	return nil
+}
+
+func appendUint64(b []byte, x uint64) []byte {
+	var a [8]byte
+	binary.LittleEndian.PutUint64(a[:], x)
+	return append(b, a[:]...)
+}
+
+func consumeUint64(b []byte) ([]byte, uint64) {
+	x := u64(b)
+	return b[8:], x
+}
+
+func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
+func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
+
+func round(acc, input uint64) uint64 {
+	acc += input * prime2
+	acc = rol31(acc)
+	acc *= prime1
+	return acc
+}
+
+func mergeRound(acc, val uint64) uint64 {
+	val = round(0, val)
+	acc ^= val
+	acc = acc*prime1 + prime4
+	return acc
+}
+
+func rol1(x uint64) uint64  { return bits.RotateLeft64(x, 1) }
+func rol7(x uint64) uint64  { return bits.RotateLeft64(x, 7) }
+func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
+func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
+func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
+func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
+func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
+func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
diff --git a/vendor/github.com/cespare/xxhash/xxhash_amd64.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go
similarity index 75%
rename from vendor/github.com/cespare/xxhash/xxhash_amd64.go
rename to vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go
index d617652..35318d7 100644
--- a/vendor/github.com/cespare/xxhash/xxhash_amd64.go
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go
@@ -9,4 +9,5 @@
 //go:noescape
 func Sum64(b []byte) uint64
 
-func writeBlocks(x *xxh, b []byte) []byte
+//go:noescape
+func writeBlocks(*Digest, []byte) int
diff --git a/vendor/github.com/cespare/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
similarity index 77%
rename from vendor/github.com/cespare/xxhash/xxhash_amd64.s
rename to vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
index 757f201..2c9c535 100644
--- a/vendor/github.com/cespare/xxhash/xxhash_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
@@ -170,23 +170,22 @@
 	RET
 
 // writeBlocks uses the same registers as above except that it uses AX to store
-// the x pointer.
+// the d pointer.
 
-// func writeBlocks(x *xxh, b []byte) []byte
-TEXT ·writeBlocks(SB), NOSPLIT, $0-56
+// func writeBlocks(d *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT, $0-40
 	// Load fixed primes needed for round.
 	MOVQ ·prime1v(SB), R13
 	MOVQ ·prime2v(SB), R14
 
 	// Load slice.
-	MOVQ b_base+8(FP), CX
-	MOVQ CX, ret_base+32(FP) // initialize return base pointer; see NOTE below
-	MOVQ b_len+16(FP), DX
+	MOVQ arg1_base+8(FP), CX
+	MOVQ arg1_len+16(FP), DX
 	LEAQ (CX)(DX*1), BX
 	SUBQ $32, BX
 
-	// Load vN from x.
-	MOVQ x+0(FP), AX
+	// Load vN from d.
+	MOVQ arg+0(FP), AX
 	MOVQ 0(AX), R8   // v1
 	MOVQ 8(AX), R9   // v2
 	MOVQ 16(AX), R10 // v3
@@ -203,31 +202,14 @@
 	CMPQ CX, BX
 	JLE  blockLoop
 
-	// Copy vN back to x.
+	// Copy vN back to d.
 	MOVQ R8, 0(AX)
 	MOVQ R9, 8(AX)
 	MOVQ R10, 16(AX)
 	MOVQ R11, 24(AX)
 
-	// Construct return slice.
-	// NOTE: It's important that we don't construct a slice that has a base
-	// pointer off the end of the original slice, as in Go 1.7+ this will
-	// cause runtime crashes. (See discussion in, for example,
-	// https://github.com/golang/go/issues/16772.)
-	// Therefore, we calculate the length/cap first, and if they're zero, we
-	// keep the old base. This is what the compiler does as well if you
-	// write code like
-	//   b = b[len(b):]
-
-	// New length is 32 - (CX - BX) -> BX+32 - CX.
-	ADDQ $32, BX
-	SUBQ CX, BX
-	JZ   afterSetBase
-
-	MOVQ CX, ret_base+32(FP)
-
-afterSetBase:
-	MOVQ BX, ret_len+40(FP)
-	MOVQ BX, ret_cap+48(FP) // set cap == len
+	// The number of bytes written is CX minus the old base pointer.
+	SUBQ arg1_base+8(FP), CX
+	MOVQ CX, ret+32(FP)
 
 	RET
diff --git a/vendor/github.com/cespare/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
similarity index 85%
rename from vendor/github.com/cespare/xxhash/xxhash_other.go
rename to vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
index c68d13f..4a5a821 100644
--- a/vendor/github.com/cespare/xxhash/xxhash_other.go
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
@@ -5,9 +5,9 @@
 // Sum64 computes the 64-bit xxHash digest of b.
 func Sum64(b []byte) uint64 {
 	// A simpler version would be
-	//   x := New()
-	//   x.Write(b)
-	//   return x.Sum64()
+	//   d := New()
+	//   d.Write(b)
+	//   return d.Sum64()
 	// but this is faster, particularly for small inputs.
 
 	n := len(b)
@@ -61,8 +61,9 @@
 	return h
 }
 
-func writeBlocks(x *xxh, b []byte) []byte {
-	v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4
+func writeBlocks(d *Digest, b []byte) int {
+	v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+	n := len(b)
 	for len(b) >= 32 {
 		v1 = round(v1, u64(b[0:8:len(b)]))
 		v2 = round(v2, u64(b[8:16:len(b)]))
@@ -70,6 +71,6 @@
 		v4 = round(v4, u64(b[24:32:len(b)]))
 		b = b[32:len(b):len(b)]
 	}
-	x.v1, x.v2, x.v3, x.v4 = v1, v2, v3, v4
-	return b
+	d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
+	return n - len(b)
 }
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go
new file mode 100644
index 0000000..6f3b0cb
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go
@@ -0,0 +1,11 @@
+package xxhash
+
+// Sum64String computes the 64-bit xxHash digest of s.
+func Sum64String(s string) uint64 {
+	return Sum64([]byte(s))
+}
+
+// WriteString adds more data to d. It always returns len(s), nil.
+func (d *Digest) WriteString(s string) (n int, err error) {
+	return d.Write([]byte(s))
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go
new file mode 100644
index 0000000..1dd39e6
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go
@@ -0,0 +1,492 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"errors"
+	"fmt"
+	"io"
+)
+
+type seq struct {
+	litLen   uint32
+	matchLen uint32
+	offset   uint32
+
+	// Codes are stored here for the encoder
+	// so they only have to be looked up once.
+	llCode, mlCode, ofCode uint8
+}
+
+func (s seq) String() string {
+	if s.offset <= 3 {
+		if s.offset == 0 {
+			return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)")
+		}
+		return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)")
+	}
+	return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)")
+}
+
+type seqCompMode uint8
+
+const (
+	compModePredefined seqCompMode = iota
+	compModeRLE
+	compModeFSE
+	compModeRepeat
+)
+
+type sequenceDec struct {
+	// decoder keeps track of the current state and updates it from the bitstream.
+	fse    *fseDecoder
+	state  fseState
+	repeat bool
+}
+
+// init the state of the decoder with input from stream.
+func (s *sequenceDec) init(br *bitReader) error {
+	if s.fse == nil {
+		return errors.New("sequence decoder not defined")
+	}
+	s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1<<s.fse.actualTableLog])
+	return nil
+}
+
+// sequenceDecs contains all 3 sequence decoders and their state.
+type sequenceDecs struct {
+	litLengths   sequenceDec
+	offsets      sequenceDec
+	matchLengths sequenceDec
+	prevOffset   [3]int
+	hist         []byte
+	dict         []byte
+	literals     []byte
+	out          []byte
+	windowSize   int
+	maxBits      uint8
+}
+
+// initialize all 3 decoders from the stream input.
+func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []byte) error {
+	if err := s.litLengths.init(br); err != nil {
+		return errors.New("litLengths:" + err.Error())
+	}
+	if err := s.offsets.init(br); err != nil {
+		return errors.New("offsets:" + err.Error())
+	}
+	if err := s.matchLengths.init(br); err != nil {
+		return errors.New("matchLengths:" + err.Error())
+	}
+	s.literals = literals
+	s.hist = hist.b
+	s.prevOffset = hist.recentOffsets
+	s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits
+	s.windowSize = hist.windowSize
+	s.out = out
+	s.dict = nil
+	if hist.dict != nil {
+		s.dict = hist.dict.content
+	}
+	return nil
+}
+
+// decode sequences from the stream with the provided history.
+func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
+	startSize := len(s.out)
+	// Grab full sizes tables, to avoid bounds checks.
+	llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
+	llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
+
+	for i := seqs - 1; i >= 0; i-- {
+		if br.overread() {
+			printf("reading sequence %d, exceeded available data\n", seqs-i)
+			return io.ErrUnexpectedEOF
+		}
+		var ll, mo, ml int
+		if br.off > 4+((maxOffsetBits+16+16)>>3) {
+			// inlined function:
+			// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
+
+			// Final will not read from stream.
+			var llB, mlB, moB uint8
+			ll, llB = llState.final()
+			ml, mlB = mlState.final()
+			mo, moB = ofState.final()
+
+			// extra bits are stored in reverse order.
+			br.fillFast()
+			mo += br.getBits(moB)
+			if s.maxBits > 32 {
+				br.fillFast()
+			}
+			ml += br.getBits(mlB)
+			ll += br.getBits(llB)
+
+			if moB > 1 {
+				s.prevOffset[2] = s.prevOffset[1]
+				s.prevOffset[1] = s.prevOffset[0]
+				s.prevOffset[0] = mo
+			} else {
+				// mo = s.adjustOffset(mo, ll, moB)
+				// Inlined for rather big speedup
+				if ll == 0 {
+					// There is an exception though, when current sequence's literals_length = 0.
+					// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
+					// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
+					mo++
+				}
+
+				if mo == 0 {
+					mo = s.prevOffset[0]
+				} else {
+					var temp int
+					if mo == 3 {
+						temp = s.prevOffset[0] - 1
+					} else {
+						temp = s.prevOffset[mo]
+					}
+
+					if temp == 0 {
+						// 0 is not valid; input is corrupted; force offset to 1
+						println("temp was 0")
+						temp = 1
+					}
+
+					if mo != 1 {
+						s.prevOffset[2] = s.prevOffset[1]
+					}
+					s.prevOffset[1] = s.prevOffset[0]
+					s.prevOffset[0] = temp
+					mo = temp
+				}
+			}
+			br.fillFast()
+		} else {
+			ll, mo, ml = s.next(br, llState, mlState, ofState)
+			br.fill()
+		}
+
+		if debugSequences {
+			println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
+		}
+
+		if ll > len(s.literals) {
+			return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals))
+		}
+		size := ll + ml + len(s.out)
+		if size-startSize > maxBlockSize {
+			return fmt.Errorf("output (%d) bigger than max block size", size)
+		}
+		if size > cap(s.out) {
+			// Not enough size, which can happen under high volume block streaming conditions
+			// but could be if destination slice is too small for sync operations.
+			// over-allocating here can create a large amount of GC pressure so we try to keep
+			// it as contained as possible
+			used := len(s.out) - startSize
+			addBytes := 256 + ll + ml + used>>2
+			// Clamp to max block size.
+			if used+addBytes > maxBlockSize {
+				addBytes = maxBlockSize - used
+			}
+			s.out = append(s.out, make([]byte, addBytes)...)
+			s.out = s.out[:len(s.out)-addBytes]
+		}
+		if ml > maxMatchLen {
+			return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
+		}
+
+		// Add literals
+		s.out = append(s.out, s.literals[:ll]...)
+		s.literals = s.literals[ll:]
+		out := s.out
+
+		if mo == 0 && ml > 0 {
+			return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
+		}
+
+		if mo > len(s.out)+len(hist) || mo > s.windowSize {
+			if len(s.dict) == 0 {
+				return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist))
+			}
+
+			// we may be in dictionary.
+			dictO := len(s.dict) - (mo - (len(s.out) + len(hist)))
+			if dictO < 0 || dictO >= len(s.dict) {
+				return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist))
+			}
+			end := dictO + ml
+			if end > len(s.dict) {
+				out = append(out, s.dict[dictO:]...)
+				mo -= len(s.dict) - dictO
+				ml -= len(s.dict) - dictO
+			} else {
+				out = append(out, s.dict[dictO:end]...)
+				mo = 0
+				ml = 0
+			}
+		}
+
+		// Copy from history.
+		// TODO: Blocks without history could be made to ignore this completely.
+		if v := mo - len(s.out); v > 0 {
+			// v is the start position in history from end.
+			start := len(s.hist) - v
+			if ml > v {
+				// Some goes into current block.
+				// Copy remainder of history
+				out = append(out, s.hist[start:]...)
+				mo -= v
+				ml -= v
+			} else {
+				out = append(out, s.hist[start:start+ml]...)
+				ml = 0
+			}
+		}
+		// We must be in current buffer now
+		if ml > 0 {
+			start := len(s.out) - mo
+			if ml <= len(s.out)-start {
+				// No overlap
+				out = append(out, s.out[start:start+ml]...)
+			} else {
+				// Overlapping copy
+				// Extend destination slice and copy one byte at the time.
+				out = out[:len(out)+ml]
+				src := out[start : start+ml]
+				// Destination is the space we just added.
+				dst := out[len(out)-ml:]
+				dst = dst[:len(src)]
+				for i := range src {
+					dst[i] = src[i]
+				}
+			}
+		}
+		s.out = out
+		if i == 0 {
+			// This is the last sequence, so we shouldn't update state.
+			break
+		}
+
+		// Manually inlined, ~ 5-20% faster
+		// Update all 3 states at once. Approx 20% faster.
+		nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
+		if nBits == 0 {
+			llState = llTable[llState.newState()&maxTableMask]
+			mlState = mlTable[mlState.newState()&maxTableMask]
+			ofState = ofTable[ofState.newState()&maxTableMask]
+		} else {
+			bits := br.getBitsFast(nBits)
+			lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
+			llState = llTable[(llState.newState()+lowBits)&maxTableMask]
+
+			lowBits = uint16(bits >> (ofState.nbBits() & 31))
+			lowBits &= bitMask[mlState.nbBits()&15]
+			mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
+
+			lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
+			ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
+		}
+	}
+
+	// Add final literals
+	s.out = append(s.out, s.literals...)
+	return nil
+}
+
+// update states, at least 27 bits must be available.
+func (s *sequenceDecs) update(br *bitReader) {
+	// Max 8 bits
+	s.litLengths.state.next(br)
+	// Max 9 bits
+	s.matchLengths.state.next(br)
+	// Max 8 bits
+	s.offsets.state.next(br)
+}
+
+var bitMask [16]uint16
+
+func init() {
+	for i := range bitMask[:] {
+		bitMask[i] = uint16((1 << uint(i)) - 1)
+	}
+}
+
+// update states, at least 27 bits must be available.
+func (s *sequenceDecs) updateAlt(br *bitReader) {
+	// Update all 3 states at once. Approx 20% faster.
+	a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
+
+	nBits := a.nbBits() + b.nbBits() + c.nbBits()
+	if nBits == 0 {
+		s.litLengths.state.state = s.litLengths.state.dt[a.newState()]
+		s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()]
+		s.offsets.state.state = s.offsets.state.dt[c.newState()]
+		return
+	}
+	bits := br.getBitsFast(nBits)
+	lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31))
+	s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits]
+
+	lowBits = uint16(bits >> (c.nbBits() & 31))
+	lowBits &= bitMask[b.nbBits()&15]
+	s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits]
+
+	lowBits = uint16(bits) & bitMask[c.nbBits()&15]
+	s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits]
+}
+
+// nextFast will return new states when there are at least 4 unused bytes left on the stream when done.
+func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
+	// Final will not read from stream.
+	ll, llB := llState.final()
+	ml, mlB := mlState.final()
+	mo, moB := ofState.final()
+
+	// extra bits are stored in reverse order.
+	br.fillFast()
+	mo += br.getBits(moB)
+	if s.maxBits > 32 {
+		br.fillFast()
+	}
+	ml += br.getBits(mlB)
+	ll += br.getBits(llB)
+
+	if moB > 1 {
+		s.prevOffset[2] = s.prevOffset[1]
+		s.prevOffset[1] = s.prevOffset[0]
+		s.prevOffset[0] = mo
+		return
+	}
+	// mo = s.adjustOffset(mo, ll, moB)
+	// Inlined for rather big speedup
+	if ll == 0 {
+		// There is an exception though, when current sequence's literals_length = 0.
+		// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
+		// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
+		mo++
+	}
+
+	if mo == 0 {
+		mo = s.prevOffset[0]
+		return
+	}
+	var temp int
+	if mo == 3 {
+		temp = s.prevOffset[0] - 1
+	} else {
+		temp = s.prevOffset[mo]
+	}
+
+	if temp == 0 {
+		// 0 is not valid; input is corrupted; force offset to 1
+		println("temp was 0")
+		temp = 1
+	}
+
+	if mo != 1 {
+		s.prevOffset[2] = s.prevOffset[1]
+	}
+	s.prevOffset[1] = s.prevOffset[0]
+	s.prevOffset[0] = temp
+	mo = temp
+	return
+}
+
+func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
+	// Final will not read from stream.
+	ll, llB := llState.final()
+	ml, mlB := mlState.final()
+	mo, moB := ofState.final()
+
+	// extra bits are stored in reverse order.
+	br.fill()
+	if s.maxBits <= 32 {
+		mo += br.getBits(moB)
+		ml += br.getBits(mlB)
+		ll += br.getBits(llB)
+	} else {
+		mo += br.getBits(moB)
+		br.fill()
+		// matchlength+literal length, max 32 bits
+		ml += br.getBits(mlB)
+		ll += br.getBits(llB)
+
+	}
+	mo = s.adjustOffset(mo, ll, moB)
+	return
+}
+
+func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int {
+	if offsetB > 1 {
+		s.prevOffset[2] = s.prevOffset[1]
+		s.prevOffset[1] = s.prevOffset[0]
+		s.prevOffset[0] = offset
+		return offset
+	}
+
+	if litLen == 0 {
+		// There is an exception though, when current sequence's literals_length = 0.
+		// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
+		// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
+		offset++
+	}
+
+	if offset == 0 {
+		return s.prevOffset[0]
+	}
+	var temp int
+	if offset == 3 {
+		temp = s.prevOffset[0] - 1
+	} else {
+		temp = s.prevOffset[offset]
+	}
+
+	if temp == 0 {
+		// 0 is not valid; input is corrupted; force offset to 1
+		println("temp was 0")
+		temp = 1
+	}
+
+	if offset != 1 {
+		s.prevOffset[2] = s.prevOffset[1]
+	}
+	s.prevOffset[1] = s.prevOffset[0]
+	s.prevOffset[0] = temp
+	return temp
+}
+
+// mergeHistory will merge history.
+func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) {
+	for i := uint(0); i < 3; i++ {
+		var sNew, sHist *sequenceDec
+		switch i {
+		default:
+			// same as "case 0":
+			sNew = &s.litLengths
+			sHist = &hist.litLengths
+		case 1:
+			sNew = &s.offsets
+			sHist = &hist.offsets
+		case 2:
+			sNew = &s.matchLengths
+			sHist = &hist.matchLengths
+		}
+		if sNew.repeat {
+			if sHist.fse == nil {
+				return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i)
+			}
+			continue
+		}
+		if sNew.fse == nil {
+			return nil, fmt.Errorf("sequence stream %d, no fse found", i)
+		}
+		if sHist.fse != nil && !sHist.fse.preDefined {
+			fseDecoderPool.Put(sHist.fse)
+		}
+		sHist.fse = sNew.fse
+	}
+	return hist, nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go
new file mode 100644
index 0000000..8014174
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go
@@ -0,0 +1,114 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import "math/bits"
+
+type seqCoders struct {
+	llEnc, ofEnc, mlEnc    *fseEncoder
+	llPrev, ofPrev, mlPrev *fseEncoder
+}
+
+// swap coders with another (block).
+func (s *seqCoders) swap(other *seqCoders) {
+	*s, *other = *other, *s
+}
+
+// setPrev will update the previous encoders to the actually used ones
+// and make sure a fresh one is in the main slot.
+func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) {
+	compareSwap := func(used *fseEncoder, current, prev **fseEncoder) {
+		// We used the new one, more current to history and reuse the previous history
+		if *current == used {
+			*prev, *current = *current, *prev
+			c := *current
+			p := *prev
+			c.reUsed = false
+			p.reUsed = true
+			return
+		}
+		if used == *prev {
+			return
+		}
+		// Ensure we cannot reuse by accident
+		prevEnc := *prev
+		prevEnc.symbolLen = 0
+	}
+	compareSwap(ll, &s.llEnc, &s.llPrev)
+	compareSwap(ml, &s.mlEnc, &s.mlPrev)
+	compareSwap(of, &s.ofEnc, &s.ofPrev)
+}
+
+func highBit(val uint32) (n uint32) {
+	return uint32(bits.Len32(val) - 1)
+}
+
+var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7,
+	8, 9, 10, 11, 12, 13, 14, 15,
+	16, 16, 17, 17, 18, 18, 19, 19,
+	20, 20, 20, 20, 21, 21, 21, 21,
+	22, 22, 22, 22, 22, 22, 22, 22,
+	23, 23, 23, 23, 23, 23, 23, 23,
+	24, 24, 24, 24, 24, 24, 24, 24,
+	24, 24, 24, 24, 24, 24, 24, 24}
+
+// Up to 6 bits
+const maxLLCode = 35
+
+// llBitsTable translates from ll code to number of bits.
+var llBitsTable = [maxLLCode + 1]byte{
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	1, 1, 1, 1, 2, 2, 3, 3,
+	4, 6, 7, 8, 9, 10, 11, 12,
+	13, 14, 15, 16}
+
+// llCode returns the code that represents the literal length requested.
+func llCode(litLength uint32) uint8 {
+	const llDeltaCode = 19
+	if litLength <= 63 {
+		// Compiler insists on bounds check (Go 1.12)
+		return llCodeTable[litLength&63]
+	}
+	return uint8(highBit(litLength)) + llDeltaCode
+}
+
+var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+	16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+	32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
+	38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
+	40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
+	41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
+	42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
+	42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42}
+
+// Up to 6 bits
+const maxMLCode = 52
+
+// mlBitsTable translates from ml code to number of bits.
+var mlBitsTable = [maxMLCode + 1]byte{
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	1, 1, 1, 1, 2, 2, 3, 3,
+	4, 4, 5, 7, 8, 9, 10, 11,
+	12, 13, 14, 15, 16}
+
+// note : mlBase = matchLength - MINMATCH;
+// because it's the format it's stored in seqStore->sequences
+func mlCode(mlBase uint32) uint8 {
+	const mlDeltaCode = 36
+	if mlBase <= 127 {
+		// Compiler insists on bounds check (Go 1.12)
+		return mlCodeTable[mlBase&127]
+	}
+	return uint8(highBit(mlBase)) + mlDeltaCode
+}
+
+func ofCode(offset uint32) uint8 {
+	// A valid offset will always be > 0.
+	return uint8(bits.Len32(offset) - 1)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go
new file mode 100644
index 0000000..9d9d1d5
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/snappy.go
@@ -0,0 +1,435 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"encoding/binary"
+	"errors"
+	"hash/crc32"
+	"io"
+
+	"github.com/golang/snappy"
+	"github.com/klauspost/compress/huff0"
+)
+
+const (
+	snappyTagLiteral = 0x00
+	snappyTagCopy1   = 0x01
+	snappyTagCopy2   = 0x02
+	snappyTagCopy4   = 0x03
+)
+
+const (
+	snappyChecksumSize = 4
+	snappyMagicBody    = "sNaPpY"
+
+	// snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not
+	// part of the wire format per se, but some parts of the encoder assume
+	// that an offset fits into a uint16.
+	//
+	// Also, for the framing format (Writer type instead of Encode function),
+	// https://github.com/google/snappy/blob/master/framing_format.txt says
+	// that "the uncompressed data in a chunk must be no longer than 65536
+	// bytes".
+	snappyMaxBlockSize = 65536
+
+	// snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is
+	// hard coded to be a const instead of a variable, so that obufLen can also
+	// be a const. Their equivalence is confirmed by
+	// TestMaxEncodedLenOfMaxBlockSize.
+	snappyMaxEncodedLenOfMaxBlockSize = 76490
+)
+
+const (
+	chunkTypeCompressedData   = 0x00
+	chunkTypeUncompressedData = 0x01
+	chunkTypePadding          = 0xfe
+	chunkTypeStreamIdentifier = 0xff
+)
+
+var (
+	// ErrSnappyCorrupt reports that the input is invalid.
+	ErrSnappyCorrupt = errors.New("snappy: corrupt input")
+	// ErrSnappyTooLarge reports that the uncompressed length is too large.
+	ErrSnappyTooLarge = errors.New("snappy: decoded block is too large")
+	// ErrSnappyUnsupported reports that the input isn't supported.
+	ErrSnappyUnsupported = errors.New("snappy: unsupported input")
+
+	errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
+)
+
+// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd.
+// Conversion is done by converting the stream directly from Snappy without intermediate
+// full decoding.
+// Therefore the compression ratio is much less than what can be done by a full decompression
+// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without
+// any errors being generated.
+// No CRC value is being generated and not all CRC values of the Snappy stream are checked.
+// However, it provides really fast recompression of Snappy streams.
+// The converter can be reused to avoid allocations, even after errors.
+type SnappyConverter struct {
+	r     io.Reader
+	err   error
+	buf   []byte
+	block *blockEnc
+}
+
+// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'.
+// If any error is detected on the Snappy stream it is returned.
+// The number of bytes written is returned.
+func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
+	initPredefined()
+	r.err = nil
+	r.r = in
+	if r.block == nil {
+		r.block = &blockEnc{}
+		r.block.init()
+	}
+	r.block.initNewEncode()
+	if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize {
+		r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize)
+	}
+	r.block.litEnc.Reuse = huff0.ReusePolicyNone
+	var written int64
+	var readHeader bool
+	{
+		var header []byte
+		var n int
+		header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0])
+
+		n, r.err = w.Write(header)
+		if r.err != nil {
+			return written, r.err
+		}
+		written += int64(n)
+	}
+
+	for {
+		if !r.readFull(r.buf[:4], true) {
+			// Add empty last block
+			r.block.reset(nil)
+			r.block.last = true
+			err := r.block.encodeLits(r.block.literals, false)
+			if err != nil {
+				return written, err
+			}
+			n, err := w.Write(r.block.output)
+			if err != nil {
+				return written, err
+			}
+			written += int64(n)
+
+			return written, r.err
+		}
+		chunkType := r.buf[0]
+		if !readHeader {
+			if chunkType != chunkTypeStreamIdentifier {
+				println("chunkType != chunkTypeStreamIdentifier", chunkType)
+				r.err = ErrSnappyCorrupt
+				return written, r.err
+			}
+			readHeader = true
+		}
+		chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+		if chunkLen > len(r.buf) {
+			println("chunkLen > len(r.buf)", chunkType)
+			r.err = ErrSnappyUnsupported
+			return written, r.err
+		}
+
+		// The chunk types are specified at
+		// https://github.com/google/snappy/blob/master/framing_format.txt
+		switch chunkType {
+		case chunkTypeCompressedData:
+			// Section 4.2. Compressed data (chunk type 0x00).
+			if chunkLen < snappyChecksumSize {
+				println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize)
+				r.err = ErrSnappyCorrupt
+				return written, r.err
+			}
+			buf := r.buf[:chunkLen]
+			if !r.readFull(buf, false) {
+				return written, r.err
+			}
+			//checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+			buf = buf[snappyChecksumSize:]
+
+			n, hdr, err := snappyDecodedLen(buf)
+			if err != nil {
+				r.err = err
+				return written, r.err
+			}
+			buf = buf[hdr:]
+			if n > snappyMaxBlockSize {
+				println("n > snappyMaxBlockSize", n, snappyMaxBlockSize)
+				r.err = ErrSnappyCorrupt
+				return written, r.err
+			}
+			r.block.reset(nil)
+			r.block.pushOffsets()
+			if err := decodeSnappy(r.block, buf); err != nil {
+				r.err = err
+				return written, r.err
+			}
+			if r.block.size+r.block.extraLits != n {
+				printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits)
+				r.err = ErrSnappyCorrupt
+				return written, r.err
+			}
+			err = r.block.encode(nil, false, false)
+			switch err {
+			case errIncompressible:
+				r.block.popOffsets()
+				r.block.reset(nil)
+				r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen])
+				if err != nil {
+					return written, err
+				}
+				err = r.block.encodeLits(r.block.literals, false)
+				if err != nil {
+					return written, err
+				}
+			case nil:
+			default:
+				return written, err
+			}
+
+			n, r.err = w.Write(r.block.output)
+			if r.err != nil {
+				return written, err
+			}
+			written += int64(n)
+			continue
+		case chunkTypeUncompressedData:
+			if debug {
+				println("Uncompressed, chunklen", chunkLen)
+			}
+			// Section 4.3. Uncompressed data (chunk type 0x01).
+			if chunkLen < snappyChecksumSize {
+				println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize)
+				r.err = ErrSnappyCorrupt
+				return written, r.err
+			}
+			r.block.reset(nil)
+			buf := r.buf[:snappyChecksumSize]
+			if !r.readFull(buf, false) {
+				return written, r.err
+			}
+			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+			// Read directly into r.decoded instead of via r.buf.
+			n := chunkLen - snappyChecksumSize
+			if n > snappyMaxBlockSize {
+				println("n > snappyMaxBlockSize", n, snappyMaxBlockSize)
+				r.err = ErrSnappyCorrupt
+				return written, r.err
+			}
+			r.block.literals = r.block.literals[:n]
+			if !r.readFull(r.block.literals, false) {
+				return written, r.err
+			}
+			if snappyCRC(r.block.literals) != checksum {
+				println("literals crc mismatch")
+				r.err = ErrSnappyCorrupt
+				return written, r.err
+			}
+			err := r.block.encodeLits(r.block.literals, false)
+			if err != nil {
+				return written, err
+			}
+			n, r.err = w.Write(r.block.output)
+			if r.err != nil {
+				return written, err
+			}
+			written += int64(n)
+			continue
+
+		case chunkTypeStreamIdentifier:
+			if debug {
+				println("stream id", chunkLen, len(snappyMagicBody))
+			}
+			// Section 4.1. Stream identifier (chunk type 0xff).
+			if chunkLen != len(snappyMagicBody) {
+				println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody))
+				r.err = ErrSnappyCorrupt
+				return written, r.err
+			}
+			if !r.readFull(r.buf[:len(snappyMagicBody)], false) {
+				return written, r.err
+			}
+			for i := 0; i < len(snappyMagicBody); i++ {
+				if r.buf[i] != snappyMagicBody[i] {
+					println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i)
+					r.err = ErrSnappyCorrupt
+					return written, r.err
+				}
+			}
+			continue
+		}
+
+		if chunkType <= 0x7f {
+			// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+			println("chunkType <= 0x7f")
+			r.err = ErrSnappyUnsupported
+			return written, r.err
+		}
+		// Section 4.4 Padding (chunk type 0xfe).
+		// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+		if !r.readFull(r.buf[:chunkLen], false) {
+			return written, r.err
+		}
+	}
+}
+
+// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded
+// length of the decompressed bytes has already been read.
+func decodeSnappy(blk *blockEnc, src []byte) error {
+	//decodeRef(make([]byte, snappyMaxBlockSize), src)
+	var s, length int
+	lits := blk.extraLits
+	var offset uint32
+	for s < len(src) {
+		switch src[s] & 0x03 {
+		case snappyTagLiteral:
+			x := uint32(src[s] >> 2)
+			switch {
+			case x < 60:
+				s++
+			case x == 60:
+				s += 2
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					println("uint(s) > uint(len(src)", s, src)
+					return ErrSnappyCorrupt
+				}
+				x = uint32(src[s-1])
+			case x == 61:
+				s += 3
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					println("uint(s) > uint(len(src)", s, src)
+					return ErrSnappyCorrupt
+				}
+				x = uint32(src[s-2]) | uint32(src[s-1])<<8
+			case x == 62:
+				s += 4
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					println("uint(s) > uint(len(src)", s, src)
+					return ErrSnappyCorrupt
+				}
+				x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+			case x == 63:
+				s += 5
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					println("uint(s) > uint(len(src)", s, src)
+					return ErrSnappyCorrupt
+				}
+				x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+			}
+			if x > snappyMaxBlockSize {
+				println("x > snappyMaxBlockSize", x, snappyMaxBlockSize)
+				return ErrSnappyCorrupt
+			}
+			length = int(x) + 1
+			if length <= 0 {
+				println("length <= 0 ", length)
+
+				return errUnsupportedLiteralLength
+			}
+			//if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s {
+			//	return ErrSnappyCorrupt
+			//}
+
+			blk.literals = append(blk.literals, src[s:s+length]...)
+			//println(length, "litLen")
+			lits += length
+			s += length
+			continue
+
+		case snappyTagCopy1:
+			s += 2
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				println("uint(s) > uint(len(src)", s, len(src))
+				return ErrSnappyCorrupt
+			}
+			length = 4 + int(src[s-2])>>2&0x7
+			offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])
+
+		case snappyTagCopy2:
+			s += 3
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				println("uint(s) > uint(len(src)", s, len(src))
+				return ErrSnappyCorrupt
+			}
+			length = 1 + int(src[s-3])>>2
+			offset = uint32(src[s-2]) | uint32(src[s-1])<<8
+
+		case snappyTagCopy4:
+			s += 5
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				println("uint(s) > uint(len(src)", s, len(src))
+				return ErrSnappyCorrupt
+			}
+			length = 1 + int(src[s-5])>>2
+			offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+		}
+
+		if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ {
+			println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits)
+
+			return ErrSnappyCorrupt
+		}
+
+		// Check if offset is one of the recent offsets.
+		// Adjusts the output offset accordingly.
+		// Gives a tiny bit of compression, typically around 1%.
+		if false {
+			offset = blk.matchOffset(offset, uint32(lits))
+		} else {
+			offset += 3
+		}
+
+		blk.sequences = append(blk.sequences, seq{
+			litLen:   uint32(lits),
+			offset:   offset,
+			matchLen: uint32(length) - zstdMinMatch,
+		})
+		blk.size += length + lits
+		lits = 0
+	}
+	blk.extraLits = lits
+	return nil
+}
+
+func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) {
+	if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+		if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+			r.err = ErrSnappyCorrupt
+		}
+		return false
+	}
+	return true
+}
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func snappyCRC(b []byte) uint32 {
+	c := crc32.Update(0, crcTable, b)
+	return c>>15 | c<<17 + 0xa282ead8
+}
+
+// snappyDecodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) {
+	v, n := binary.Uvarint(src)
+	if n <= 0 || v > 0xffffffff {
+		return 0, 0, ErrSnappyCorrupt
+	}
+
+	const wordSize = 32 << (^uint(0) >> 32 & 1)
+	if wordSize == 32 && v > 0x7fffffff {
+		return 0, 0, ErrSnappyTooLarge
+	}
+	return int(v), n, nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go
new file mode 100644
index 0000000..e35a0a2
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/zip.go
@@ -0,0 +1,120 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+
+package zstd
+
+import (
+	"errors"
+	"io"
+	"sync"
+)
+
+// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip.
+// See https://www.winzip.com/win/en/comp_info.html
+const ZipMethodWinZip = 93
+
+// ZipMethodPKWare is the method number used by PKWARE to indicate Zstandard compression.
+// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.7.TXT
+const ZipMethodPKWare = 20
+
+var zipReaderPool sync.Pool
+
+// newZipReader cannot be used since we would leak goroutines...
+func newZipReader(r io.Reader) io.ReadCloser {
+	dec, ok := zipReaderPool.Get().(*Decoder)
+	if ok {
+		dec.Reset(r)
+	} else {
+		d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true))
+		if err != nil {
+			panic(err)
+		}
+		dec = d
+	}
+	return &pooledZipReader{dec: dec}
+}
+
+type pooledZipReader struct {
+	mu  sync.Mutex // guards Close and Read
+	dec *Decoder
+}
+
+func (r *pooledZipReader) Read(p []byte) (n int, err error) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	if r.dec == nil {
+		return 0, errors.New("Read after Close")
+	}
+	dec, err := r.dec.Read(p)
+
+	return dec, err
+}
+
+func (r *pooledZipReader) Close() error {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	var err error
+	if r.dec != nil {
+		err = r.dec.Reset(nil)
+		zipReaderPool.Put(r.dec)
+		r.dec = nil
+	}
+	return err
+}
+
+type pooledZipWriter struct {
+	mu  sync.Mutex // guards Close and Read
+	enc *Encoder
+}
+
+func (w *pooledZipWriter) Write(p []byte) (n int, err error) {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+	if w.enc == nil {
+		return 0, errors.New("Write after Close")
+	}
+	return w.enc.Write(p)
+}
+
+func (w *pooledZipWriter) Close() error {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+	var err error
+	if w.enc != nil {
+		err = w.enc.Close()
+		zipReaderPool.Put(w.enc)
+		w.enc = nil
+	}
+	return err
+}
+
+// ZipCompressor returns a compressor that can be registered with zip libraries.
+// The provided encoder options will be used on all encodes.
+func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) {
+	var pool sync.Pool
+	return func(w io.Writer) (io.WriteCloser, error) {
+		enc, ok := pool.Get().(*Encoder)
+		if ok {
+			enc.Reset(w)
+		} else {
+			var err error
+			enc, err = NewWriter(w, opts...)
+			if err != nil {
+				return nil, err
+			}
+		}
+		return &pooledZipWriter{enc: enc}, nil
+	}
+}
+
+// ZipDecompressor returns a decompressor that can be registered with zip libraries.
+// See ZipCompressor for example.
+func ZipDecompressor() func(r io.Reader) io.ReadCloser {
+	return func(r io.Reader) io.ReadCloser {
+		d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true))
+		if err != nil {
+			panic(err)
+		}
+		return d.IOReadCloser()
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go
new file mode 100644
index 0000000..1ba308c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/zstd.go
@@ -0,0 +1,146 @@
+// Package zstd provides decompression of zstandard files.
+//
+// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd
+package zstd
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"log"
+	"math"
+	"math/bits"
+)
+
+// enable debug printing
+const debug = false
+
+// Enable extra assertions.
+const debugAsserts = debug || false
+
+// print sequence details
+const debugSequences = false
+
+// print detailed matching information
+const debugMatches = false
+
+// force encoder to use predefined tables.
+const forcePreDef = false
+
+// zstdMinMatch is the minimum zstd match length.
+const zstdMinMatch = 3
+
+// Reset the buffer offset when reaching this.
+const bufferReset = math.MaxInt32 - MaxWindowSize
+
+var (
+	// ErrReservedBlockType is returned when a reserved block type is found.
+	// Typically this indicates wrong or corrupted input.
+	ErrReservedBlockType = errors.New("invalid input: reserved block type encountered")
+
+	// ErrCompressedSizeTooBig is returned when a block is bigger than allowed.
+	// Typically this indicates wrong or corrupted input.
+	ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big")
+
+	// ErrBlockTooSmall is returned when a block is too small to be decoded.
+	// Typically returned on invalid input.
+	ErrBlockTooSmall = errors.New("block too small")
+
+	// ErrMagicMismatch is returned when a "magic" number isn't what is expected.
+	// Typically this indicates wrong or corrupted input.
+	ErrMagicMismatch = errors.New("invalid input: magic number mismatch")
+
+	// ErrWindowSizeExceeded is returned when a reference exceeds the valid window size.
+	// Typically this indicates wrong or corrupted input.
+	ErrWindowSizeExceeded = errors.New("window size exceeded")
+
+	// ErrWindowSizeTooSmall is returned when no window size is specified.
+	// Typically this indicates wrong or corrupted input.
+	ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small")
+
+	// ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit.
+	ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit")
+
+	// ErrUnknownDictionary is returned if the dictionary ID is unknown.
+	// For the time being dictionaries are not supported.
+	ErrUnknownDictionary = errors.New("unknown dictionary")
+
+	// ErrFrameSizeExceeded is returned if the stated frame size is exceeded.
+	// This is only returned if SingleSegment is specified on the frame.
+	ErrFrameSizeExceeded = errors.New("frame size exceeded")
+
+	// ErrCRCMismatch is returned if CRC mismatches.
+	ErrCRCMismatch = errors.New("CRC check failed")
+
+	// ErrDecoderClosed will be returned if the Decoder was used after
+	// Close has been called.
+	ErrDecoderClosed = errors.New("decoder used after Close")
+
+	// ErrDecoderNilInput is returned when a nil Reader was provided
+	// and an operation other than Reset/DecodeAll/Close was attempted.
+	ErrDecoderNilInput = errors.New("nil input provided as reader")
+)
+
+func println(a ...interface{}) {
+	if debug {
+		log.Println(a...)
+	}
+}
+
+func printf(format string, a ...interface{}) {
+	if debug {
+		log.Printf(format, a...)
+	}
+}
+
+// matchLenFast does matching, but will not match the last up to 7 bytes.
+func matchLenFast(a, b []byte) int {
+	endI := len(a) & (math.MaxInt32 - 7)
+	for i := 0; i < endI; i += 8 {
+		if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+			return i + bits.TrailingZeros64(diff)>>3
+		}
+	}
+	return endI
+}
+
+// matchLen returns the maximum length.
+// a must be the shortest of the two.
+// The function also returns whether all bytes matched.
+func matchLen(a, b []byte) int {
+	b = b[:len(a)]
+	for i := 0; i < len(a)-7; i += 8 {
+		if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+			return i + (bits.TrailingZeros64(diff) >> 3)
+		}
+	}
+
+	checked := (len(a) >> 3) << 3
+	a = a[checked:]
+	b = b[checked:]
+	for i := range a {
+		if a[i] != b[i] {
+			return i + checked
+		}
+	}
+	return len(a) + checked
+}
+
+func load3232(b []byte, i int32) uint32 {
+	return binary.LittleEndian.Uint32(b[i:])
+}
+
+func load6432(b []byte, i int32) uint64 {
+	return binary.LittleEndian.Uint64(b[i:])
+}
+
+func load64(b []byte, i int) uint64 {
+	return binary.LittleEndian.Uint64(b[i:])
+}
+
+type byter interface {
+	Bytes() []byte
+	Len() int
+}
+
+var _ byter = &bytes.Buffer{}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/adapters/adapterif/adapter_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/v6/pkg/adapters/adapterif/adapter_proxy_if.go
deleted file mode 100644
index c514d6d..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/adapters/adapterif/adapter_proxy_if.go
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package adapterif
-
-import (
-	"context"
-
-	"github.com/golang/protobuf/proto"
-	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
-)
-
-// AdapterProxy interface for AdapterProxy implementation.
-type AdapterProxy interface {
-	SendInterAdapterMessage(ctx context.Context,
-		msg proto.Message,
-		msgType ic.InterAdapterMessageType_Types,
-		fromAdapter string,
-		toAdapter string,
-		toDeviceID string,
-		proxyDeviceID string,
-		messageID string) error
-	TechProfileInstanceRequest(ctx context.Context,
-		tpPath string,
-		ponIntfID uint32,
-		onuID uint32,
-		uniID uint32,
-		fromAdapter string,
-		toAdapter string,
-		toDeviceID string,
-		proxyDeviceID string) (*ic.InterAdapterTechProfileDownloadMessage, error)
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/adapters/adapterif/core_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/v6/pkg/adapters/adapterif/core_proxy_if.go
deleted file mode 100644
index 36939bd..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/adapters/adapterif/core_proxy_if.go
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package adapterif
-
-import (
-	"context"
-
-	"github.com/opencord/voltha-protos/v4/go/voltha"
-)
-
-// CoreProxy interface for voltha-go coreproxy.
-type CoreProxy interface {
-	UpdateCoreReference(deviceID string, coreReference string)
-	DeleteCoreReference(deviceID string)
-	RegisterAdapter(ctx context.Context, adapter *voltha.Adapter, deviceTypes *voltha.DeviceTypes) error
-	DeviceUpdate(ctx context.Context, device *voltha.Device) error
-	PortCreated(ctx context.Context, deviceID string, port *voltha.Port) error
-	PortsStateUpdate(ctx context.Context, deviceID string, portTypeFilter uint32, operStatus voltha.OperStatus_Types) error
-	DeleteAllPorts(ctx context.Context, deviceID string) error
-	GetDevicePort(ctx context.Context, deviceID string, portNo uint32) (*voltha.Port, error)
-	ListDevicePorts(ctx context.Context, deviceID string) ([]*voltha.Port, error)
-	DeviceStateUpdate(ctx context.Context, deviceID string,
-		connStatus voltha.ConnectStatus_Types, operStatus voltha.OperStatus_Types) error
-
-	DevicePMConfigUpdate(ctx context.Context, pmConfigs *voltha.PmConfigs) error
-	ChildDeviceDetected(ctx context.Context, parentDeviceID string, parentPortNo int,
-		childDeviceType string, channelID int, vendorID string, serialNumber string, onuID int64) (*voltha.Device, error)
-
-	ChildDevicesLost(ctx context.Context, parentDeviceID string) error
-	ChildDevicesDetected(ctx context.Context, parentDeviceID string) error
-	GetDevice(ctx context.Context, parentDeviceID string, deviceID string) (*voltha.Device, error)
-	GetChildDevice(ctx context.Context, parentDeviceID string, kwargs map[string]interface{}) (*voltha.Device, error)
-	GetChildDevices(ctx context.Context, parentDeviceID string) (*voltha.Devices, error)
-	SendPacketIn(ctx context.Context, deviceID string, port uint32, pktPayload []byte) error
-	DeviceReasonUpdate(ctx context.Context, deviceID string, deviceReason string) error
-	PortStateUpdate(ctx context.Context, deviceID string, pType voltha.Port_PortType, portNo uint32,
-		operStatus voltha.OperStatus_Types) error
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/adapters/common/adapter_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v6/pkg/adapters/common/adapter_proxy.go
deleted file mode 100644
index 2811472..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/adapters/common/adapter_proxy.go
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package common
-
-import (
-	"context"
-	"github.com/opencord/voltha-lib-go/v6/pkg/db"
-	"google.golang.org/grpc/status"
-
-	"github.com/golang/protobuf/proto"
-	"github.com/golang/protobuf/ptypes"
-	"github.com/golang/protobuf/ptypes/any"
-	"github.com/google/uuid"
-	"github.com/opencord/voltha-lib-go/v6/pkg/kafka"
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
-	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
-)
-
-type AdapterProxy struct {
-	kafkaICProxy kafka.InterContainerProxy
-	coreTopic    string
-	endpointMgr  kafka.EndpointManager
-}
-
-func NewAdapterProxy(ctx context.Context, kafkaProxy kafka.InterContainerProxy, coreTopic string, backend *db.Backend) *AdapterProxy {
-	proxy := AdapterProxy{
-		kafkaICProxy: kafkaProxy,
-		coreTopic:    coreTopic,
-		endpointMgr:  kafka.NewEndpointManager(backend),
-	}
-	logger.Debugw(ctx, "topics", log.Fields{"core": proxy.coreTopic})
-	return &proxy
-}
-
-func (ap *AdapterProxy) SendInterAdapterMessage(ctx context.Context,
-	msg proto.Message,
-	msgType ic.InterAdapterMessageType_Types,
-	fromAdapter string,
-	toAdapter string,
-	toDeviceId string,
-	proxyDeviceId string,
-	messageId string) error {
-	logger.Debugw(ctx, "sending-inter-adapter-message", log.Fields{"type": msgType, "from": fromAdapter,
-		"to": toAdapter, "toDevice": toDeviceId, "proxyDevice": proxyDeviceId})
-
-	//Marshal the message
-	var marshalledMsg *any.Any
-	var err error
-	if marshalledMsg, err = ptypes.MarshalAny(msg); err != nil {
-		logger.Warnw(ctx, "cannot-marshal-msg", log.Fields{"error": err})
-		return err
-	}
-
-	// Set up the required rpc arguments
-	endpoint, err := ap.endpointMgr.GetEndpoint(ctx, toDeviceId, toAdapter)
-	if err != nil {
-		return err
-	}
-
-	//Build the inter adapter message
-	header := &ic.InterAdapterHeader{
-		Type:          msgType,
-		FromTopic:     fromAdapter,
-		ToTopic:       string(endpoint),
-		ToDeviceId:    toDeviceId,
-		ProxyDeviceId: proxyDeviceId,
-	}
-	if messageId != "" {
-		header.Id = messageId
-	} else {
-		header.Id = uuid.New().String()
-	}
-	header.Timestamp = ptypes.TimestampNow()
-	iaMsg := &ic.InterAdapterMessage{
-		Header: header,
-		Body:   marshalledMsg,
-	}
-	args := make([]*kafka.KVArg, 1)
-	args[0] = &kafka.KVArg{
-		Key:   "msg",
-		Value: iaMsg,
-	}
-
-	topic := kafka.Topic{Name: string(endpoint)}
-	replyToTopic := kafka.Topic{Name: fromAdapter}
-	rpc := "process_inter_adapter_message"
-
-	// Add a indication in context to differentiate this Inter Adapter message during Span processing in Kafka IC proxy
-	ctx = context.WithValue(ctx, "inter-adapter-msg-type", msgType)
-	success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, proxyDeviceId, args...)
-	logger.Debugw(ctx, "inter-adapter-msg-response", log.Fields{"replyTopic": replyToTopic, "success": success})
-	return unPackResponse(ctx, rpc, "", success, result)
-}
-
-func (ap *AdapterProxy) TechProfileInstanceRequest(ctx context.Context,
-	tpPath string,
-	parentPonPort uint32,
-	onuID uint32,
-	uniID uint32,
-	fromAdapter string,
-	toAdapter string,
-	toDeviceId string,
-	proxyDeviceId string) (*ic.InterAdapterTechProfileDownloadMessage, error) {
-	logger.Debugw(ctx, "sending-tech-profile-instance-request-message", log.Fields{"from": fromAdapter,
-		"to": toAdapter, "toDevice": toDeviceId, "proxyDevice": proxyDeviceId})
-
-	// Set up the required rpc arguments
-	endpoint, err := ap.endpointMgr.GetEndpoint(ctx, toDeviceId, toAdapter)
-	if err != nil {
-		return nil, err
-	}
-
-	//Build the inter adapter message
-	tpReqMsg := &ic.InterAdapterTechProfileInstanceRequestMessage{
-		TpInstancePath: tpPath,
-		ParentDeviceId: toDeviceId,
-		ParentPonPort:  parentPonPort,
-		OnuId:          onuID,
-		UniId:          uniID,
-	}
-
-	args := make([]*kafka.KVArg, 1)
-	args[0] = &kafka.KVArg{
-		Key:   "msg",
-		Value: tpReqMsg,
-	}
-
-	topic := kafka.Topic{Name: string(endpoint)}
-	replyToTopic := kafka.Topic{Name: fromAdapter}
-	rpc := "process_tech_profile_instance_request"
-
-	ctx = context.WithValue(ctx, "inter-adapter-tp-req-msg", tpPath)
-	success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, proxyDeviceId, args...)
-	logger.Debugw(ctx, "inter-adapter-msg-response", log.Fields{"replyTopic": replyToTopic, "success": success})
-	if success {
-		tpDwnldMsg := &ic.InterAdapterTechProfileDownloadMessage{}
-		if err := ptypes.UnmarshalAny(result, tpDwnldMsg); err != nil {
-			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
-			return nil, err
-		}
-		return tpDwnldMsg, nil
-	} else {
-		unpackResult := &ic.Error{}
-		var err error
-		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
-			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
-		}
-		logger.Debugw(ctx, "TechProfileInstanceRequest-return", log.Fields{"tpPath": tpPath, "success": success, "error": err})
-
-		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
-	}
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/adapters/common/common.go b/vendor/github.com/opencord/voltha-lib-go/v6/pkg/adapters/common/common.go
deleted file mode 100644
index b907040..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/adapters/common/common.go
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright 2020-present Open Networking Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package common
-
-import (
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
-)
-
-var logger log.CLogger
-
-func init() {
-	// Setup this package so that it's log level can be modified at run time
-	var err error
-	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
-	if err != nil {
-		panic(err)
-	}
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/adapters/common/core_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v6/pkg/adapters/common/core_proxy.go
deleted file mode 100644
index ddbc867..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/adapters/common/core_proxy.go
+++ /dev/null
@@ -1,689 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package common
-
-import (
-	"context"
-	"sync"
-
-	"github.com/golang/protobuf/ptypes"
-	a "github.com/golang/protobuf/ptypes/any"
-	"github.com/opencord/voltha-lib-go/v6/pkg/kafka"
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
-	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
-	"github.com/opencord/voltha-protos/v4/go/voltha"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/status"
-)
-
-type CoreProxy struct {
-	kafkaICProxy        kafka.InterContainerProxy
-	adapterTopic        string
-	coreTopic           string
-	deviceIdCoreMap     map[string]string
-	lockDeviceIdCoreMap sync.RWMutex
-}
-
-func NewCoreProxy(ctx context.Context, kafkaProxy kafka.InterContainerProxy, adapterTopic string, coreTopic string) *CoreProxy {
-	var proxy CoreProxy
-	proxy.kafkaICProxy = kafkaProxy
-	proxy.adapterTopic = adapterTopic
-	proxy.coreTopic = coreTopic
-	proxy.deviceIdCoreMap = make(map[string]string)
-	proxy.lockDeviceIdCoreMap = sync.RWMutex{}
-	logger.Debugw(ctx, "TOPICS", log.Fields{"core": proxy.coreTopic, "adapter": proxy.adapterTopic})
-
-	return &proxy
-}
-
-func unPackResponse(ctx context.Context, rpc string, deviceId string, success bool, response *a.Any) error {
-	if success {
-		return nil
-	} else {
-		unpackResult := &ic.Error{}
-		var err error
-		if err = ptypes.UnmarshalAny(response, unpackResult); err != nil {
-			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
-		}
-		logger.Debugw(ctx, "response", log.Fields{"rpc": rpc, "device-id": deviceId, "success": success, "error": err})
-		// TODO:  Need to get the real error code
-		return status.Errorf(codes.Canceled, "%s", unpackResult.Reason)
-	}
-}
-
-// UpdateCoreReference adds or update a core reference (really the topic name) for a given device Id
-func (ap *CoreProxy) UpdateCoreReference(deviceId string, coreReference string) {
-	ap.lockDeviceIdCoreMap.Lock()
-	defer ap.lockDeviceIdCoreMap.Unlock()
-	ap.deviceIdCoreMap[deviceId] = coreReference
-}
-
-// DeleteCoreReference removes a core reference (really the topic name) for a given device Id
-func (ap *CoreProxy) DeleteCoreReference(deviceId string) {
-	ap.lockDeviceIdCoreMap.Lock()
-	defer ap.lockDeviceIdCoreMap.Unlock()
-	delete(ap.deviceIdCoreMap, deviceId)
-}
-
-func (ap *CoreProxy) getCoreTopic(deviceId string) kafka.Topic {
-	ap.lockDeviceIdCoreMap.Lock()
-	defer ap.lockDeviceIdCoreMap.Unlock()
-
-	if t, exist := ap.deviceIdCoreMap[deviceId]; exist {
-		return kafka.Topic{Name: t}
-	}
-
-	return kafka.Topic{Name: ap.coreTopic}
-}
-
-func (ap *CoreProxy) getAdapterTopic(args ...string) kafka.Topic {
-	return kafka.Topic{Name: ap.adapterTopic}
-}
-
-func (ap *CoreProxy) RegisterAdapter(ctx context.Context, adapter *voltha.Adapter, deviceTypes *voltha.DeviceTypes) error {
-	logger.Debugw(ctx, "registering-adapter", log.Fields{"coreTopic": ap.coreTopic, "adapterTopic": ap.adapterTopic})
-	rpc := "Register"
-	topic := kafka.Topic{Name: ap.coreTopic}
-	replyToTopic := ap.getAdapterTopic()
-	args := make([]*kafka.KVArg, 2)
-
-	if adapter.TotalReplicas == 0 && adapter.CurrentReplica != 0 {
-		logger.Fatal(ctx, "totalReplicas can't be 0, since you're here you have at least one")
-	}
-
-	if adapter.CurrentReplica == 0 && adapter.TotalReplicas != 0 {
-		logger.Fatal(ctx, "currentReplica can't be 0, it has to start from 1")
-	}
-
-	if adapter.CurrentReplica == 0 && adapter.TotalReplicas == 0 {
-		// if the adapter is not setting these fields they default to 0,
-		// in that case it means the adapter is not ready to be scaled and thus it defaults
-		// to a single instance
-		adapter.CurrentReplica = 1
-		adapter.TotalReplicas = 1
-	}
-
-	if adapter.CurrentReplica > adapter.TotalReplicas {
-		logger.Fatalf(ctx, "CurrentReplica (%d) can't be greater than TotalReplicas (%d)",
-			adapter.CurrentReplica, adapter.TotalReplicas)
-	}
-
-	args[0] = &kafka.KVArg{
-		Key:   "adapter",
-		Value: adapter,
-	}
-	args[1] = &kafka.KVArg{
-		Key:   "deviceTypes",
-		Value: deviceTypes,
-	}
-
-	success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, "", args...)
-	logger.Debugw(ctx, "Register-Adapter-response", log.Fields{"replyTopic": replyToTopic, "success": success})
-	return unPackResponse(ctx, rpc, "", success, result)
-}
-
-func (ap *CoreProxy) DeviceUpdate(ctx context.Context, device *voltha.Device) error {
-	logger.Debugw(ctx, "DeviceUpdate", log.Fields{"device-id": device.Id})
-	rpc := "DeviceUpdate"
-	toTopic := ap.getCoreTopic(device.Id)
-	args := make([]*kafka.KVArg, 1)
-	args[0] = &kafka.KVArg{
-		Key:   "device",
-		Value: device,
-	}
-	// Use a device specific topic as we are the only adaptercore handling requests for this device
-	replyToTopic := ap.getAdapterTopic()
-	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, device.Id, args...)
-	logger.Debugw(ctx, "DeviceUpdate-response", log.Fields{"device-id": device.Id, "success": success})
-	return unPackResponse(ctx, rpc, device.Id, success, result)
-}
-
-func (ap *CoreProxy) PortCreated(ctx context.Context, deviceId string, port *voltha.Port) error {
-	logger.Debugw(ctx, "PortCreated", log.Fields{"portNo": port.PortNo})
-	rpc := "PortCreated"
-	// Use a device specific topic to send the request.  The adapter handling the device creates a device
-	// specific topic
-	toTopic := ap.getCoreTopic(deviceId)
-	args := make([]*kafka.KVArg, 2)
-	id := &voltha.ID{Id: deviceId}
-	args[0] = &kafka.KVArg{
-		Key:   "device_id",
-		Value: id,
-	}
-	args[1] = &kafka.KVArg{
-		Key:   "port",
-		Value: port,
-	}
-
-	// Use a device specific topic as we are the only adaptercore handling requests for this device
-	replyToTopic := ap.getAdapterTopic()
-	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	logger.Debugw(ctx, "PortCreated-response", log.Fields{"device-id": deviceId, "success": success})
-	return unPackResponse(ctx, rpc, deviceId, success, result)
-}
-
-func (ap *CoreProxy) PortsStateUpdate(ctx context.Context, deviceId string, portTypeFilter uint32, operStatus voltha.OperStatus_Types) error {
-	logger.Debugw(ctx, "PortsStateUpdate", log.Fields{"device-id": deviceId})
-	rpc := "PortsStateUpdate"
-	// Use a device specific topic to send the request.  The adapter handling the device creates a device
-	// specific topic
-	toTopic := ap.getCoreTopic(deviceId)
-	args := []*kafka.KVArg{{
-		Key:   "device_id",
-		Value: &voltha.ID{Id: deviceId},
-	}, {
-		Key:   "port_type_filter",
-		Value: &ic.IntType{Val: int64(portTypeFilter)},
-	}, {
-		Key:   "oper_status",
-		Value: &ic.IntType{Val: int64(operStatus)},
-	}}
-
-	// Use a device specific topic as we are the only adaptercore handling requests for this device
-	replyToTopic := ap.getAdapterTopic()
-	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	logger.Debugw(ctx, "PortsStateUpdate-response", log.Fields{"device-id": deviceId, "success": success})
-	return unPackResponse(ctx, rpc, deviceId, success, result)
-}
-
-func (ap *CoreProxy) DeleteAllPorts(ctx context.Context, deviceId string) error {
-	logger.Debugw(ctx, "DeleteAllPorts", log.Fields{"device-id": deviceId})
-	rpc := "DeleteAllPorts"
-	// Use a device specific topic to send the request.  The adapter handling the device creates a device
-	// specific topic
-	toTopic := ap.getCoreTopic(deviceId)
-	args := make([]*kafka.KVArg, 2)
-	id := &voltha.ID{Id: deviceId}
-
-	args[0] = &kafka.KVArg{
-		Key:   "device_id",
-		Value: id,
-	}
-
-	// Use a device specific topic as we are the only adaptercore handling requests for this device
-	replyToTopic := ap.getAdapterTopic()
-	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	logger.Debugw(ctx, "DeleteAllPorts-response", log.Fields{"device-id": deviceId, "success": success})
-	return unPackResponse(ctx, rpc, deviceId, success, result)
-}
-
-func (ap *CoreProxy) GetDevicePort(ctx context.Context, deviceID string, portNo uint32) (*voltha.Port, error) {
-	logger.Debugw(ctx, "GetDevicePort", log.Fields{"device-id": deviceID})
-	rpc := "GetDevicePort"
-
-	toTopic := ap.getCoreTopic(deviceID)
-	replyToTopic := ap.getAdapterTopic()
-
-	args := []*kafka.KVArg{{
-		Key:   "device_id",
-		Value: &voltha.ID{Id: deviceID},
-	}, {
-		Key:   "port_no",
-		Value: &ic.IntType{Val: int64(portNo)},
-	}}
-
-	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceID, args...)
-	logger.Debugw(ctx, "GetDevicePort-response", log.Fields{"device-id": deviceID, "success": success})
-
-	if success {
-		port := &voltha.Port{}
-		if err := ptypes.UnmarshalAny(result, port); err != nil {
-			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
-			return nil, status.Error(codes.InvalidArgument, err.Error())
-		}
-		return port, nil
-	} else {
-		unpackResult := &ic.Error{}
-		var err error
-		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
-			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
-		}
-		logger.Debugw(ctx, "GetDevicePort-return", log.Fields{"device-id": deviceID, "success": success, "error": err})
-		// TODO:  Need to get the real error code
-		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
-	}
-}
-
-func (ap *CoreProxy) ListDevicePorts(ctx context.Context, deviceID string) ([]*voltha.Port, error) {
-	logger.Debugw(ctx, "ListDevicePorts", log.Fields{"device-id": deviceID})
-	rpc := "ListDevicePorts"
-
-	toTopic := ap.getCoreTopic(deviceID)
-	replyToTopic := ap.getAdapterTopic()
-
-	args := []*kafka.KVArg{{
-		Key:   "device_id",
-		Value: &voltha.ID{Id: deviceID},
-	}}
-
-	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceID, args...)
-	logger.Debugw(ctx, "ListDevicePorts-response", log.Fields{"device-id": deviceID, "success": success})
-
-	if success {
-		ports := &voltha.Ports{}
-		if err := ptypes.UnmarshalAny(result, ports); err != nil {
-			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
-			return nil, status.Error(codes.InvalidArgument, err.Error())
-		}
-		return ports.Items, nil
-	} else {
-		unpackResult := &ic.Error{}
-		var err error
-		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
-			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
-		}
-		logger.Debugw(ctx, "ListDevicePorts-return", log.Fields{"device-id": deviceID, "success": success, "error": err})
-		// TODO:  Need to get the real error code
-		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
-	}
-}
-
-func (ap *CoreProxy) DeviceStateUpdate(ctx context.Context, deviceId string,
-	connStatus voltha.ConnectStatus_Types, operStatus voltha.OperStatus_Types) error {
-	logger.Debugw(ctx, "DeviceStateUpdate", log.Fields{"device-id": deviceId})
-	rpc := "DeviceStateUpdate"
-	// Use a device specific topic to send the request.  The adapter handling the device creates a device
-	// specific topic
-	toTopic := ap.getCoreTopic(deviceId)
-	args := make([]*kafka.KVArg, 3)
-	id := &voltha.ID{Id: deviceId}
-	oStatus := &ic.IntType{Val: int64(operStatus)}
-	cStatus := &ic.IntType{Val: int64(connStatus)}
-
-	args[0] = &kafka.KVArg{
-		Key:   "device_id",
-		Value: id,
-	}
-	args[1] = &kafka.KVArg{
-		Key:   "oper_status",
-		Value: oStatus,
-	}
-	args[2] = &kafka.KVArg{
-		Key:   "connect_status",
-		Value: cStatus,
-	}
-	// Use a device specific topic as we are the only adaptercore handling requests for this device
-	replyToTopic := ap.getAdapterTopic()
-	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	logger.Debugw(ctx, "DeviceStateUpdate-response", log.Fields{"device-id": deviceId, "success": success})
-	return unPackResponse(ctx, rpc, deviceId, success, result)
-}
-
-func (ap *CoreProxy) ChildDeviceDetected(ctx context.Context, parentDeviceId string, parentPortNo int,
-	childDeviceType string, channelId int, vendorId string, serialNumber string, onuId int64) (*voltha.Device, error) {
-	logger.Debugw(ctx, "ChildDeviceDetected", log.Fields{"parent-device-id": parentDeviceId, "channelId": channelId})
-	rpc := "ChildDeviceDetected"
-	// Use a device specific topic to send the request.  The adapter handling the device creates a device
-	// specific topic
-	toTopic := ap.getCoreTopic(parentDeviceId)
-	replyToTopic := ap.getAdapterTopic()
-
-	args := make([]*kafka.KVArg, 7)
-	id := &voltha.ID{Id: parentDeviceId}
-	args[0] = &kafka.KVArg{
-		Key:   "parent_device_id",
-		Value: id,
-	}
-	ppn := &ic.IntType{Val: int64(parentPortNo)}
-	args[1] = &kafka.KVArg{
-		Key:   "parent_port_no",
-		Value: ppn,
-	}
-	cdt := &ic.StrType{Val: childDeviceType}
-	args[2] = &kafka.KVArg{
-		Key:   "child_device_type",
-		Value: cdt,
-	}
-	channel := &ic.IntType{Val: int64(channelId)}
-	args[3] = &kafka.KVArg{
-		Key:   "channel_id",
-		Value: channel,
-	}
-	vId := &ic.StrType{Val: vendorId}
-	args[4] = &kafka.KVArg{
-		Key:   "vendor_id",
-		Value: vId,
-	}
-	sNo := &ic.StrType{Val: serialNumber}
-	args[5] = &kafka.KVArg{
-		Key:   "serial_number",
-		Value: sNo,
-	}
-	oId := &ic.IntType{Val: int64(onuId)}
-	args[6] = &kafka.KVArg{
-		Key:   "onu_id",
-		Value: oId,
-	}
-
-	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	logger.Debugw(ctx, "ChildDeviceDetected-response", log.Fields{"parent-device-id": parentDeviceId, "success": success})
-
-	if success {
-		volthaDevice := &voltha.Device{}
-		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
-			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
-			return nil, status.Error(codes.InvalidArgument, err.Error())
-		}
-		return volthaDevice, nil
-	} else {
-		unpackResult := &ic.Error{}
-		var err error
-		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
-			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
-		}
-		logger.Debugw(ctx, "ChildDeviceDetected-return", log.Fields{"device-id": parentDeviceId, "success": success, "error": err})
-
-		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
-	}
-
-}
-
-func (ap *CoreProxy) ChildDevicesLost(ctx context.Context, parentDeviceId string) error {
-	logger.Debugw(ctx, "ChildDevicesLost", log.Fields{"parent-device-id": parentDeviceId})
-	rpc := "ChildDevicesLost"
-	// Use a device specific topic to send the request.  The adapter handling the device creates a device
-	// specific topic
-	toTopic := ap.getCoreTopic(parentDeviceId)
-	replyToTopic := ap.getAdapterTopic()
-
-	args := make([]*kafka.KVArg, 1)
-	id := &voltha.ID{Id: parentDeviceId}
-	args[0] = &kafka.KVArg{
-		Key:   "parent_device_id",
-		Value: id,
-	}
-
-	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	logger.Debugw(ctx, "ChildDevicesLost-response", log.Fields{"parent-device-id": parentDeviceId, "success": success})
-	return unPackResponse(ctx, rpc, parentDeviceId, success, result)
-}
-
-func (ap *CoreProxy) ChildDevicesDetected(ctx context.Context, parentDeviceId string) error {
-	logger.Debugw(ctx, "ChildDevicesDetected", log.Fields{"parent-device-id": parentDeviceId})
-	rpc := "ChildDevicesDetected"
-	// Use a device specific topic to send the request.  The adapter handling the device creates a device
-	// specific topic
-	toTopic := ap.getCoreTopic(parentDeviceId)
-	replyToTopic := ap.getAdapterTopic()
-
-	args := make([]*kafka.KVArg, 1)
-	id := &voltha.ID{Id: parentDeviceId}
-	args[0] = &kafka.KVArg{
-		Key:   "parent_device_id",
-		Value: id,
-	}
-
-	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	logger.Debugw(ctx, "ChildDevicesDetected-response", log.Fields{"parent-device-id": parentDeviceId, "success": success})
-	return unPackResponse(ctx, rpc, parentDeviceId, success, result)
-}
-
-func (ap *CoreProxy) GetDevice(ctx context.Context, parentDeviceId string, deviceId string) (*voltha.Device, error) {
-	logger.Debugw(ctx, "GetDevice", log.Fields{"device-id": deviceId})
-	rpc := "GetDevice"
-
-	toTopic := ap.getCoreTopic(parentDeviceId)
-	replyToTopic := ap.getAdapterTopic()
-
-	args := make([]*kafka.KVArg, 1)
-	id := &voltha.ID{Id: deviceId}
-	args[0] = &kafka.KVArg{
-		Key:   "device_id",
-		Value: id,
-	}
-
-	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	logger.Debugw(ctx, "GetDevice-response", log.Fields{"parent-device-id": parentDeviceId, "success": success})
-
-	if success {
-		volthaDevice := &voltha.Device{}
-		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
-			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
-			return nil, status.Error(codes.InvalidArgument, err.Error())
-		}
-		return volthaDevice, nil
-	} else {
-		unpackResult := &ic.Error{}
-		var err error
-		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
-			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
-		}
-		logger.Debugw(ctx, "GetDevice-return", log.Fields{"parent-device-id": parentDeviceId, "success": success, "error": err})
-		// TODO:  Need to get the real error code
-		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
-	}
-}
-
-func (ap *CoreProxy) GetChildDevice(ctx context.Context, parentDeviceId string, kwargs map[string]interface{}) (*voltha.Device, error) {
-	logger.Debugw(ctx, "GetChildDevice", log.Fields{"parent-device-id": parentDeviceId, "kwargs": kwargs})
-	rpc := "GetChildDevice"
-
-	toTopic := ap.getCoreTopic(parentDeviceId)
-	replyToTopic := ap.getAdapterTopic()
-
-	args := make([]*kafka.KVArg, 4)
-	id := &voltha.ID{Id: parentDeviceId}
-	args[0] = &kafka.KVArg{
-		Key:   "device_id",
-		Value: id,
-	}
-
-	var cnt uint8 = 0
-	for k, v := range kwargs {
-		cnt += 1
-		if k == "serial_number" {
-			val := &ic.StrType{Val: v.(string)}
-			args[cnt] = &kafka.KVArg{
-				Key:   k,
-				Value: val,
-			}
-		} else if k == "onu_id" {
-			val := &ic.IntType{Val: int64(v.(uint32))}
-			args[cnt] = &kafka.KVArg{
-				Key:   k,
-				Value: val,
-			}
-		} else if k == "parent_port_no" {
-			val := &ic.IntType{Val: int64(v.(uint32))}
-			args[cnt] = &kafka.KVArg{
-				Key:   k,
-				Value: val,
-			}
-		}
-	}
-
-	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	logger.Debugw(ctx, "GetChildDevice-response", log.Fields{"parent-device-id": parentDeviceId, "success": success})
-
-	if success {
-		volthaDevice := &voltha.Device{}
-		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
-			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
-			return nil, status.Error(codes.InvalidArgument, err.Error())
-		}
-		return volthaDevice, nil
-	} else {
-		unpackResult := &ic.Error{}
-		var err error
-		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
-			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
-		}
-		logger.Debugw(ctx, "GetChildDevice-return", log.Fields{"parent-device-id": parentDeviceId, "success": success, "error": err})
-
-		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
-	}
-}
-
-func (ap *CoreProxy) GetChildDevices(ctx context.Context, parentDeviceId string) (*voltha.Devices, error) {
-	logger.Debugw(ctx, "GetChildDevices", log.Fields{"parent-device-id": parentDeviceId})
-	rpc := "GetChildDevices"
-
-	toTopic := ap.getCoreTopic(parentDeviceId)
-	replyToTopic := ap.getAdapterTopic()
-
-	args := make([]*kafka.KVArg, 1)
-	id := &voltha.ID{Id: parentDeviceId}
-	args[0] = &kafka.KVArg{
-		Key:   "device_id",
-		Value: id,
-	}
-
-	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	logger.Debugw(ctx, "GetChildDevices-response", log.Fields{"parent-device-id": parentDeviceId, "success": success})
-
-	if success {
-		volthaDevices := &voltha.Devices{}
-		if err := ptypes.UnmarshalAny(result, volthaDevices); err != nil {
-			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
-			return nil, status.Error(codes.InvalidArgument, err.Error())
-		}
-		return volthaDevices, nil
-	} else {
-		unpackResult := &ic.Error{}
-		var err error
-		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
-			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
-		}
-		logger.Debugw(ctx, "GetChildDevices-return", log.Fields{"parent-device-id": parentDeviceId, "success": success, "error": err})
-
-		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
-	}
-}
-
-func (ap *CoreProxy) SendPacketIn(ctx context.Context, deviceId string, port uint32, pktPayload []byte) error {
-	logger.Debugw(ctx, "SendPacketIn", log.Fields{"device-id": deviceId, "port": port, "pktPayload": pktPayload})
-	rpc := "PacketIn"
-	// Use a device specific topic to send the request.  The adapter handling the device creates a device
-	// specific topic
-	toTopic := ap.getCoreTopic(deviceId)
-	replyToTopic := ap.getAdapterTopic()
-
-	args := make([]*kafka.KVArg, 3)
-	id := &voltha.ID{Id: deviceId}
-	args[0] = &kafka.KVArg{
-		Key:   "device_id",
-		Value: id,
-	}
-	portNo := &ic.IntType{Val: int64(port)}
-	args[1] = &kafka.KVArg{
-		Key:   "port",
-		Value: portNo,
-	}
-	pkt := &ic.Packet{Payload: pktPayload}
-	args[2] = &kafka.KVArg{
-		Key:   "packet",
-		Value: pkt,
-	}
-	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	logger.Debugw(ctx, "SendPacketIn-response", log.Fields{"device-id": deviceId, "success": success})
-	return unPackResponse(ctx, rpc, deviceId, success, result)
-}
-
-func (ap *CoreProxy) DeviceReasonUpdate(ctx context.Context, deviceId string, deviceReason string) error {
-	logger.Debugw(ctx, "DeviceReasonUpdate", log.Fields{"device-id": deviceId, "deviceReason": deviceReason})
-	rpc := "DeviceReasonUpdate"
-	// Use a device specific topic to send the request.  The adapter handling the device creates a device
-	// specific topic
-	toTopic := ap.getCoreTopic(deviceId)
-	replyToTopic := ap.getAdapterTopic()
-
-	args := make([]*kafka.KVArg, 2)
-	id := &voltha.ID{Id: deviceId}
-	args[0] = &kafka.KVArg{
-		Key:   "device_id",
-		Value: id,
-	}
-	reason := &ic.StrType{Val: deviceReason}
-	args[1] = &kafka.KVArg{
-		Key:   "device_reason",
-		Value: reason,
-	}
-	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	logger.Debugw(ctx, "DeviceReason-response", log.Fields{"device-id": deviceId, "success": success})
-	return unPackResponse(ctx, rpc, deviceId, success, result)
-}
-
-func (ap *CoreProxy) DevicePMConfigUpdate(ctx context.Context, pmConfigs *voltha.PmConfigs) error {
-	logger.Debugw(ctx, "DevicePMConfigUpdate", log.Fields{"pmConfigs": pmConfigs})
-	rpc := "DevicePMConfigUpdate"
-	// Use a device specific topic to send the request.  The adapter handling the device creates a device
-	// specific topic
-	toTopic := ap.getCoreTopic(pmConfigs.Id)
-	replyToTopic := ap.getAdapterTopic()
-
-	args := make([]*kafka.KVArg, 1)
-	args[0] = &kafka.KVArg{
-		Key:   "device_pm_config",
-		Value: pmConfigs,
-	}
-	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, pmConfigs.Id, args...)
-	logger.Debugw(ctx, "DevicePMConfigUpdate-response", log.Fields{"pmconfig-device-id": pmConfigs.Id, "success": success})
-	return unPackResponse(ctx, rpc, pmConfigs.Id, success, result)
-}
-
-func (ap *CoreProxy) ReconcileChildDevices(ctx context.Context, parentDeviceId string) error {
-	logger.Debugw(ctx, "ReconcileChildDevices", log.Fields{"parent-device-id": parentDeviceId})
-	rpc := "ReconcileChildDevices"
-	// Use a device specific topic to send the request.  The adapter handling the device creates a device
-	// specific topic
-	toTopic := ap.getCoreTopic(parentDeviceId)
-	replyToTopic := ap.getAdapterTopic()
-
-	args := []*kafka.KVArg{
-		{Key: "parent_device_id", Value: &voltha.ID{Id: parentDeviceId}},
-	}
-
-	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	logger.Debugw(ctx, "ReconcileChildDevices-response", log.Fields{"parent-device-id": parentDeviceId, "success": success})
-	return unPackResponse(ctx, rpc, parentDeviceId, success, result)
-}
-
-func (ap *CoreProxy) PortStateUpdate(ctx context.Context, deviceId string, pType voltha.Port_PortType, portNum uint32,
-	operStatus voltha.OperStatus_Types) error {
-	logger.Debugw(ctx, "PortStateUpdate", log.Fields{"device-id": deviceId, "portType": pType, "portNo": portNum, "operation_status": operStatus})
-	rpc := "PortStateUpdate"
-	// Use a device specific topic to send the request.  The adapter handling the device creates a device
-	// specific topic
-	toTopic := ap.getCoreTopic(deviceId)
-	args := make([]*kafka.KVArg, 4)
-	deviceID := &voltha.ID{Id: deviceId}
-	portNo := &ic.IntType{Val: int64(portNum)}
-	portType := &ic.IntType{Val: int64(pType)}
-	oStatus := &ic.IntType{Val: int64(operStatus)}
-
-	args[0] = &kafka.KVArg{
-		Key:   "device_id",
-		Value: deviceID,
-	}
-	args[1] = &kafka.KVArg{
-		Key:   "oper_status",
-		Value: oStatus,
-	}
-	args[2] = &kafka.KVArg{
-		Key:   "port_type",
-		Value: portType,
-	}
-	args[3] = &kafka.KVArg{
-		Key:   "port_no",
-		Value: portNo,
-	}
-
-	// Use a device specific topic as we are the only adaptercore handling requests for this device
-	replyToTopic := ap.getAdapterTopic()
-	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	logger.Debugw(ctx, "PortStateUpdate-response", log.Fields{"device-id": deviceId, "success": success})
-	return unPackResponse(ctx, rpc, deviceId, success, result)
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/adapters/common/performance_metrics.go b/vendor/github.com/opencord/voltha-lib-go/v6/pkg/adapters/common/performance_metrics.go
deleted file mode 100644
index 3b6d4f9..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/adapters/common/performance_metrics.go
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright 2019-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package common
-
-import (
-	"github.com/opencord/voltha-protos/v4/go/voltha"
-)
-
-type PmMetrics struct {
-	deviceId          string
-	frequency         uint32
-	grouped           bool
-	frequencyOverride bool
-	metrics           map[string]*voltha.PmConfig
-}
-
-type PmMetricsOption func(*PmMetrics)
-
-func Frequency(frequency uint32) PmMetricsOption {
-	return func(args *PmMetrics) {
-		args.frequency = frequency
-	}
-}
-
-func Grouped(grouped bool) PmMetricsOption {
-	return func(args *PmMetrics) {
-		args.grouped = grouped
-	}
-}
-
-func FrequencyOverride(frequencyOverride bool) PmMetricsOption {
-	return func(args *PmMetrics) {
-		args.frequencyOverride = frequencyOverride
-	}
-}
-
-// UpdateFrequency will update the frequency.
-func (pm *PmMetrics) UpdateFrequency(frequency uint32) {
-	pm.frequency = frequency
-}
-
-func Metrics(pmNames []string) PmMetricsOption {
-	return func(args *PmMetrics) {
-		args.metrics = make(map[string]*voltha.PmConfig)
-		for _, name := range pmNames {
-			args.metrics[name] = &voltha.PmConfig{
-				Name:    name,
-				Type:    voltha.PmConfig_COUNTER,
-				Enabled: true,
-			}
-		}
-	}
-}
-
-func NewPmMetrics(deviceId string, opts ...PmMetricsOption) *PmMetrics {
-	pm := &PmMetrics{deviceId: deviceId}
-	for _, option := range opts {
-		option(pm)
-	}
-	return pm
-}
-
-func (pm *PmMetrics) ToPmConfigs() *voltha.PmConfigs {
-	pmConfigs := &voltha.PmConfigs{
-		Id:           pm.deviceId,
-		DefaultFreq:  pm.frequency,
-		Grouped:      pm.grouped,
-		FreqOverride: pm.frequencyOverride,
-	}
-	for _, v := range pm.metrics {
-		pmConfigs.Metrics = append(pmConfigs.Metrics, &voltha.PmConfig{Name: v.Name, Type: v.Type, Enabled: v.Enabled})
-	}
-	return pmConfigs
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/adapters/common/request_handler.go b/vendor/github.com/opencord/voltha-lib-go/v6/pkg/adapters/common/request_handler.go
deleted file mode 100644
index 1c016ae..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/adapters/common/request_handler.go
+++ /dev/null
@@ -1,1011 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package common
-
-import (
-	"context"
-	"errors"
-
-	"github.com/golang/protobuf/ptypes"
-	"github.com/golang/protobuf/ptypes/empty"
-	"github.com/opencord/voltha-lib-go/v6/pkg/adapters"
-	"github.com/opencord/voltha-lib-go/v6/pkg/adapters/adapterif"
-	"github.com/opencord/voltha-lib-go/v6/pkg/kafka"
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
-	"github.com/opencord/voltha-protos/v4/go/extension"
-	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
-	"github.com/opencord/voltha-protos/v4/go/openflow_13"
-	"github.com/opencord/voltha-protos/v4/go/voltha"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/status"
-)
-
-type RequestHandlerProxy struct {
-	TestMode       bool
-	coreInstanceId string
-	adapter        adapters.IAdapter
-	coreProxy      adapterif.CoreProxy
-}
-
-func NewRequestHandlerProxy(coreInstanceId string, iadapter adapters.IAdapter, cProxy adapterif.CoreProxy) *RequestHandlerProxy {
-	var proxy RequestHandlerProxy
-	proxy.coreInstanceId = coreInstanceId
-	proxy.adapter = iadapter
-	proxy.coreProxy = cProxy
-	return &proxy
-}
-
-func (rhp *RequestHandlerProxy) Adapter_descriptor() (*empty.Empty, error) {
-	return new(empty.Empty), nil
-}
-
-func (rhp *RequestHandlerProxy) Device_types() (*voltha.DeviceTypes, error) {
-	return nil, nil
-}
-
-func (rhp *RequestHandlerProxy) Health() (*voltha.HealthStatus, error) {
-	return nil, nil
-}
-
-func (rhp *RequestHandlerProxy) Adopt_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
-	if len(args) < 3 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
-		err := errors.New("invalid-number-of-args")
-		return nil, err
-	}
-	device := &voltha.Device{}
-	transactionID := &ic.StrType{}
-	fromTopic := &ic.StrType{}
-	for _, arg := range args {
-		switch arg.Key {
-		case "device":
-			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
-				return nil, err
-			}
-		case kafka.TransactionKey:
-			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
-				return nil, err
-			}
-		case kafka.FromTopic:
-			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
-				return nil, err
-			}
-		}
-	}
-
-	logger.Debugw(ctx, "Adopt_device", log.Fields{"deviceId": device.Id})
-
-	//Update the core reference for that device
-	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
-
-	//Invoke the adopt device on the adapter
-	if err := rhp.adapter.Adopt_device(ctx, device); err != nil {
-		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
-	}
-
-	return new(empty.Empty), nil
-}
-
-func (rhp *RequestHandlerProxy) Reconcile_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
-	if len(args) < 3 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
-		err := errors.New("invalid-number-of-args")
-		return nil, err
-	}
-
-	device := &voltha.Device{}
-	transactionID := &ic.StrType{}
-	fromTopic := &ic.StrType{}
-	for _, arg := range args {
-		switch arg.Key {
-		case "device":
-			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
-				return nil, err
-			}
-		case kafka.TransactionKey:
-			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
-				return nil, err
-			}
-		case kafka.FromTopic:
-			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
-				return nil, err
-			}
-		}
-	}
-	//Update the core reference for that device
-	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
-
-	//Invoke the reconcile device API on the adapter
-	if err := rhp.adapter.Reconcile_device(ctx, device); err != nil {
-		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
-	}
-	return new(empty.Empty), nil
-}
-
-func (rhp *RequestHandlerProxy) Abandon_device(args []*ic.Argument) (*empty.Empty, error) {
-	return new(empty.Empty), nil
-}
-
-func (rhp *RequestHandlerProxy) Disable_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
-	if len(args) < 3 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
-		err := errors.New("invalid-number-of-args")
-		return nil, err
-	}
-
-	device := &voltha.Device{}
-	transactionID := &ic.StrType{}
-	fromTopic := &ic.StrType{}
-	for _, arg := range args {
-		switch arg.Key {
-		case "device":
-			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
-				return nil, err
-			}
-		case kafka.TransactionKey:
-			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
-				return nil, err
-			}
-		case kafka.FromTopic:
-			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
-				return nil, err
-			}
-		}
-	}
-	//Update the core reference for that device
-	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
-	//Invoke the Disable_device API on the adapter
-	if err := rhp.adapter.Disable_device(ctx, device); err != nil {
-		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
-	}
-	return new(empty.Empty), nil
-}
-
-func (rhp *RequestHandlerProxy) Reenable_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
-	if len(args) < 3 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
-		err := errors.New("invalid-number-of-args")
-		return nil, err
-	}
-
-	device := &voltha.Device{}
-	transactionID := &ic.StrType{}
-	fromTopic := &ic.StrType{}
-	for _, arg := range args {
-		switch arg.Key {
-		case "device":
-			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
-				return nil, err
-			}
-		case kafka.TransactionKey:
-			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
-				return nil, err
-			}
-		case kafka.FromTopic:
-			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
-				return nil, err
-			}
-		}
-	}
-	//Update the core reference for that device
-	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
-	//Invoke the Reenable_device API on the adapter
-	if err := rhp.adapter.Reenable_device(ctx, device); err != nil {
-		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
-	}
-	return new(empty.Empty), nil
-}
-
-func (rhp *RequestHandlerProxy) Reboot_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
-	if len(args) < 3 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
-		err := errors.New("invalid-number-of-args")
-		return nil, err
-	}
-
-	device := &voltha.Device{}
-	transactionID := &ic.StrType{}
-	fromTopic := &ic.StrType{}
-	for _, arg := range args {
-		switch arg.Key {
-		case "device":
-			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
-				return nil, err
-			}
-		case kafka.TransactionKey:
-			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
-				return nil, err
-			}
-		case kafka.FromTopic:
-			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
-				return nil, err
-			}
-		}
-	}
-	//Update the core reference for that device
-	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
-	//Invoke the Reboot_device API on the adapter
-	if err := rhp.adapter.Reboot_device(ctx, device); err != nil {
-		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
-	}
-	return new(empty.Empty), nil
-
-}
-
-func (rhp *RequestHandlerProxy) Self_test_device(args []*ic.Argument) (*empty.Empty, error) {
-	return new(empty.Empty), nil
-}
-
-func (rhp *RequestHandlerProxy) Delete_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
-	if len(args) < 3 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
-		err := errors.New("invalid-number-of-args")
-		return nil, err
-	}
-
-	device := &voltha.Device{}
-	transactionID := &ic.StrType{}
-	fromTopic := &ic.StrType{}
-	for _, arg := range args {
-		switch arg.Key {
-		case "device":
-			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
-				return nil, err
-			}
-		case kafka.TransactionKey:
-			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
-				return nil, err
-			}
-		case kafka.FromTopic:
-			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
-				return nil, err
-			}
-		}
-	}
-	//Update the core reference for that device
-	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
-	//Invoke the delete_device API on the adapter
-	if err := rhp.adapter.Delete_device(ctx, device); err != nil {
-		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
-	}
-	return new(empty.Empty), nil
-}
-
-func (rhp *RequestHandlerProxy) Get_device_details(args []*ic.Argument) (*empty.Empty, error) {
-	return new(empty.Empty), nil
-}
-
-func (rhp *RequestHandlerProxy) Update_flows_bulk(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
-	logger.Debug(ctx, "Update_flows_bulk")
-	if len(args) < 5 {
-		logger.Warn(ctx, "Update_flows_bulk-invalid-number-of-args", log.Fields{"args": args})
-		err := errors.New("invalid-number-of-args")
-		return nil, err
-	}
-	device := &voltha.Device{}
-	transactionID := &ic.StrType{}
-	flows := &voltha.Flows{}
-	flowMetadata := &voltha.FlowMetadata{}
-	groups := &voltha.FlowGroups{}
-	for _, arg := range args {
-		switch arg.Key {
-		case "device":
-			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
-				return nil, err
-			}
-		case "flows":
-			if err := ptypes.UnmarshalAny(arg.Value, flows); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-flows", log.Fields{"error": err})
-				return nil, err
-			}
-		case "groups":
-			if err := ptypes.UnmarshalAny(arg.Value, groups); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-groups", log.Fields{"error": err})
-				return nil, err
-			}
-		case "flow_metadata":
-			if err := ptypes.UnmarshalAny(arg.Value, flowMetadata); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-metadata", log.Fields{"error": err})
-				return nil, err
-			}
-		case kafka.TransactionKey:
-			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
-				return nil, err
-			}
-		}
-	}
-	logger.Debugw(ctx, "Update_flows_bulk", log.Fields{"flows": flows, "groups": groups})
-	//Invoke the bulk flow update API of the adapter
-	if err := rhp.adapter.Update_flows_bulk(ctx, device, flows, groups, flowMetadata); err != nil {
-		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
-	}
-	return new(empty.Empty), nil
-}
-
-func (rhp *RequestHandlerProxy) Update_flows_incrementally(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
-	logger.Debug(ctx, "Update_flows_incrementally")
-	if len(args) < 5 {
-		logger.Warn(ctx, "Update_flows_incrementally-invalid-number-of-args", log.Fields{"args": args})
-		err := errors.New("invalid-number-of-args")
-		return nil, err
-	}
-	device := &voltha.Device{}
-	transactionID := &ic.StrType{}
-	flows := &openflow_13.FlowChanges{}
-	flowMetadata := &voltha.FlowMetadata{}
-	groups := &openflow_13.FlowGroupChanges{}
-	for _, arg := range args {
-		switch arg.Key {
-		case "device":
-			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
-				return nil, err
-			}
-		case "flow_changes":
-			if err := ptypes.UnmarshalAny(arg.Value, flows); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-flows", log.Fields{"error": err})
-				return nil, err
-			}
-		case "group_changes":
-			if err := ptypes.UnmarshalAny(arg.Value, groups); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-groups", log.Fields{"error": err})
-				return nil, err
-			}
-		case "flow_metadata":
-			if err := ptypes.UnmarshalAny(arg.Value, flowMetadata); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-metadata", log.Fields{"error": err})
-				return nil, err
-			}
-		case kafka.TransactionKey:
-			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
-				return nil, err
-			}
-		}
-	}
-	logger.Debugw(ctx, "Update_flows_incrementally", log.Fields{"flows": flows, "groups": groups})
-	//Invoke the incremental flow update API of the adapter
-	if err := rhp.adapter.Update_flows_incrementally(ctx, device, flows, groups, flowMetadata); err != nil {
-		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
-	}
-	return new(empty.Empty), nil
-}
-
-func (rhp *RequestHandlerProxy) Update_pm_config(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
-	logger.Debug(ctx, "Update_pm_config")
-	if len(args) < 2 {
-		logger.Warn(ctx, "Update_pm_config-invalid-number-of-args", log.Fields{"args": args})
-		err := errors.New("invalid-number-of-args")
-		return nil, err
-	}
-	device := &voltha.Device{}
-	transactionID := &ic.StrType{}
-	pmConfigs := &voltha.PmConfigs{}
-	for _, arg := range args {
-		switch arg.Key {
-		case "device":
-			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
-				return nil, err
-			}
-		case "pm_configs":
-			if err := ptypes.UnmarshalAny(arg.Value, pmConfigs); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-pm-configs", log.Fields{"error": err})
-				return nil, err
-			}
-		case kafka.TransactionKey:
-			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
-				return nil, err
-			}
-		}
-	}
-	logger.Debugw(ctx, "Update_pm_config", log.Fields{"device-id": device.Id, "pmConfigs": pmConfigs})
-	//Invoke the pm config update API of the adapter
-	if err := rhp.adapter.Update_pm_config(ctx, device, pmConfigs); err != nil {
-		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
-	}
-	return new(empty.Empty), nil
-}
-
-func (rhp *RequestHandlerProxy) Receive_packet_out(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
-	logger.Debugw(ctx, "Receive_packet_out", log.Fields{"args": args})
-	if len(args) < 3 {
-		logger.Warn(ctx, "Receive_packet_out-invalid-number-of-args", log.Fields{"args": args})
-		err := errors.New("invalid-number-of-args")
-		return nil, err
-	}
-	deviceId := &ic.StrType{}
-	egressPort := &ic.IntType{}
-	packet := &openflow_13.OfpPacketOut{}
-	transactionID := &ic.StrType{}
-	for _, arg := range args {
-		switch arg.Key {
-		case "deviceId":
-			if err := ptypes.UnmarshalAny(arg.Value, deviceId); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device-id", log.Fields{"error": err})
-				return nil, err
-			}
-		case "outPort":
-			if err := ptypes.UnmarshalAny(arg.Value, egressPort); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-egressPort", log.Fields{"error": err})
-				return nil, err
-			}
-		case "packet":
-			if err := ptypes.UnmarshalAny(arg.Value, packet); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-packet", log.Fields{"error": err})
-				return nil, err
-			}
-		case kafka.TransactionKey:
-			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
-				return nil, err
-			}
-		}
-	}
-	logger.Debugw(ctx, "Receive_packet_out", log.Fields{"device-id": deviceId.Val, "outPort": egressPort, "packet": packet})
-	//Invoke the adopt device on the adapter
-	if err := rhp.adapter.Receive_packet_out(ctx, deviceId.Val, int(egressPort.Val), packet); err != nil {
-		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
-	}
-	return new(empty.Empty), nil
-}
-
-func (rhp *RequestHandlerProxy) Suppress_alarm(args []*ic.Argument) (*empty.Empty, error) {
-	return new(empty.Empty), nil
-}
-
-func (rhp *RequestHandlerProxy) Unsuppress_alarm(args []*ic.Argument) (*empty.Empty, error) {
-	return new(empty.Empty), nil
-}
-
-func (rhp *RequestHandlerProxy) Get_ofp_device_info(ctx context.Context, args []*ic.Argument) (*ic.SwitchCapability, error) {
-	if len(args) < 2 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
-		err := errors.New("invalid-number-of-args")
-		return nil, err
-	}
-	device := &voltha.Device{}
-	transactionID := &ic.StrType{}
-	for _, arg := range args {
-		switch arg.Key {
-		case "device":
-			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
-				return nil, err
-			}
-		case kafka.TransactionKey:
-			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
-				return nil, err
-			}
-		}
-	}
-
-	logger.Debugw(ctx, "Get_ofp_device_info", log.Fields{"device-id": device.Id})
-
-	var cap *ic.SwitchCapability
-	var err error
-	if cap, err = rhp.adapter.Get_ofp_device_info(ctx, device); err != nil {
-		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
-	}
-	logger.Debugw(ctx, "Get_ofp_device_info", log.Fields{"cap": cap})
-	return cap, nil
-}
-
-func (rhp *RequestHandlerProxy) Process_inter_adapter_message(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
-	if len(args) < 2 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
-		err := errors.New("invalid-number-of-args")
-		return nil, err
-	}
-	iaMsg := &ic.InterAdapterMessage{}
-	transactionID := &ic.StrType{}
-	for _, arg := range args {
-		switch arg.Key {
-		case "msg":
-			if err := ptypes.UnmarshalAny(arg.Value, iaMsg); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
-				return nil, err
-			}
-		case kafka.TransactionKey:
-			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
-				return nil, err
-			}
-		}
-	}
-
-	logger.Debugw(ctx, "Process_inter_adapter_message", log.Fields{"msgId": iaMsg.Header.Id})
-
-	//Invoke the inter adapter API on the handler
-	if err := rhp.adapter.Process_inter_adapter_message(ctx, iaMsg); err != nil {
-		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
-	}
-
-	return new(empty.Empty), nil
-}
-
-func (rhp *RequestHandlerProxy) Process_tech_profile_instance_request(ctx context.Context, args []*ic.Argument) (*ic.InterAdapterTechProfileDownloadMessage, error) {
-	if len(args) < 2 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
-		err := errors.New("invalid-number-of-args")
-		return nil, err
-	}
-	iaTpReqMsg := &ic.InterAdapterTechProfileInstanceRequestMessage{}
-	transactionID := &ic.StrType{}
-	for _, arg := range args {
-		switch arg.Key {
-		case "msg":
-			if err := ptypes.UnmarshalAny(arg.Value, iaTpReqMsg); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
-				return nil, err
-			}
-		case kafka.TransactionKey:
-			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
-				return nil, err
-			}
-		}
-	}
-
-	logger.Debugw(ctx, "Process_tech_profile_instance_request", log.Fields{"tpPath": iaTpReqMsg.TpInstancePath})
-
-	//Invoke the tech profile instance request
-	tpInst := rhp.adapter.Process_tech_profile_instance_request(ctx, iaTpReqMsg)
-
-	return tpInst, nil
-}
-
-func (rhp *RequestHandlerProxy) Download_image(ctx context.Context, args []*ic.Argument) (*voltha.ImageDownload, error) {
-	device, image, err := unMarshalImageDowload(args, ctx)
-	if err != nil {
-		return nil, err
-	}
-
-	imageDownload, err := rhp.adapter.Download_image(ctx, device, image)
-	if err != nil {
-		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
-	}
-	return imageDownload, nil
-}
-
-func (rhp *RequestHandlerProxy) Get_image_download_status(ctx context.Context, args []*ic.Argument) (*voltha.ImageDownload, error) {
-	device, image, err := unMarshalImageDowload(args, ctx)
-	if err != nil {
-		return nil, err
-	}
-
-	imageDownload, err := rhp.adapter.Get_image_download_status(ctx, device, image)
-	if err != nil {
-		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
-	}
-	return imageDownload, nil
-}
-
-func (rhp *RequestHandlerProxy) Cancel_image_download(ctx context.Context, args []*ic.Argument) (*voltha.ImageDownload, error) {
-	device, image, err := unMarshalImageDowload(args, ctx)
-	if err != nil {
-		return nil, err
-	}
-
-	imageDownload, err := rhp.adapter.Cancel_image_download(ctx, device, image)
-	if err != nil {
-		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
-	}
-	return imageDownload, nil
-}
-
-func (rhp *RequestHandlerProxy) Activate_image_update(ctx context.Context, args []*ic.Argument) (*voltha.ImageDownload, error) {
-
-	device, image, err := unMarshalImageDowload(args, ctx)
-	if err != nil {
-		return nil, err
-	}
-
-	imageDownload, err := rhp.adapter.Activate_image_update(ctx, device, image)
-	if err != nil {
-		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
-	}
-	return imageDownload, nil
-}
-
-func (rhp *RequestHandlerProxy) Revert_image_update(ctx context.Context, args []*ic.Argument) (*voltha.ImageDownload, error) {
-	device, image, err := unMarshalImageDowload(args, ctx)
-	if err != nil {
-		return nil, err
-	}
-
-	imageDownload, err := rhp.adapter.Revert_image_update(ctx, device, image)
-	if err != nil {
-		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
-	}
-	return imageDownload, nil
-}
-
-func unMarshalImageDowload(args []*ic.Argument, ctx context.Context) (*voltha.Device, *voltha.ImageDownload, error) {
-	if len(args) < 4 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
-		err := errors.New("invalid-number-of-args")
-		return nil, nil, err
-	}
-	device := &voltha.Device{}
-	image := &voltha.ImageDownload{}
-	transactionID := &ic.StrType{}
-	fromTopic := &ic.StrType{}
-	for _, arg := range args {
-		switch arg.Key {
-		case "device":
-			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
-				return nil, nil, err
-			}
-		case "request":
-			if err := ptypes.UnmarshalAny(arg.Value, image); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-image", log.Fields{"error": err})
-				return nil, nil, err
-			}
-		case kafka.TransactionKey:
-			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
-				return nil, nil, err
-			}
-		case kafka.FromTopic:
-			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
-				return nil, nil, err
-			}
-		}
-	}
-	return device, image, nil
-}
-
-func (rhp *RequestHandlerProxy) Enable_port(ctx context.Context, args []*ic.Argument) error {
-	logger.Debugw(ctx, "enable_port", log.Fields{"args": args})
-	deviceId, port, err := rhp.getEnableDisableParams(ctx, args)
-	if err != nil {
-		logger.Warnw(ctx, "enable_port", log.Fields{"args": args, "device-id": deviceId, "port": port})
-		return err
-	}
-	return rhp.adapter.Enable_port(ctx, deviceId, port)
-}
-
-func (rhp *RequestHandlerProxy) Disable_port(ctx context.Context, args []*ic.Argument) error {
-	logger.Debugw(ctx, "disable_port", log.Fields{"args": args})
-	deviceId, port, err := rhp.getEnableDisableParams(ctx, args)
-	if err != nil {
-		logger.Warnw(ctx, "disable_port", log.Fields{"args": args, "device-id": deviceId, "port": port})
-		return err
-	}
-	return rhp.adapter.Disable_port(ctx, deviceId, port)
-}
-
-func (rhp *RequestHandlerProxy) getEnableDisableParams(ctx context.Context, args []*ic.Argument) (string, *voltha.Port, error) {
-	logger.Debugw(ctx, "getEnableDisableParams", log.Fields{"args": args})
-	if len(args) < 3 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
-		return "", nil, errors.New("invalid-number-of-args")
-	}
-	deviceId := &ic.StrType{}
-	port := &voltha.Port{}
-	for _, arg := range args {
-		switch arg.Key {
-		case "deviceId":
-			if err := ptypes.UnmarshalAny(arg.Value, deviceId); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
-				return "", nil, err
-			}
-		case "port":
-			if err := ptypes.UnmarshalAny(arg.Value, port); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-port", log.Fields{"error": err})
-				return "", nil, err
-			}
-		}
-	}
-	return deviceId.Val, port, nil
-}
-
-func (rhp *RequestHandlerProxy) Child_device_lost(ctx context.Context, args []*ic.Argument) error {
-	if len(args) < 2 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
-		return errors.New("invalid-number-of-args")
-	}
-	childDevice := &voltha.Device{}
-	fromTopic := &ic.StrType{}
-	for _, arg := range args {
-		switch arg.Key {
-		case "childDevice":
-			if err := ptypes.UnmarshalAny(arg.Value, childDevice); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-child-device", log.Fields{"error": err})
-				return err
-			}
-		case kafka.FromTopic:
-			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
-				return err
-			}
-		}
-	}
-	//Update the core reference for that device
-	rhp.coreProxy.UpdateCoreReference(childDevice.ParentId, fromTopic.Val)
-	//Invoke the Child_device_lost API on the adapter
-	if err := rhp.adapter.Child_device_lost(ctx, childDevice); err != nil {
-		return status.Errorf(codes.NotFound, "%s", err.Error())
-	}
-	return nil
-}
-
-func (rhp *RequestHandlerProxy) Start_omci_test(ctx context.Context, args []*ic.Argument) (*ic.TestResponse, error) {
-	if len(args) < 2 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
-		err := errors.New("invalid-number-of-args")
-		return nil, err
-	}
-
-	// TODO: See related comment in voltha-go:adapter_proxy_go:startOmciTest()
-	//   Second argument should perhaps be uuid instead of omcitestrequest
-
-	device := &voltha.Device{}
-	request := &voltha.OmciTestRequest{}
-	for _, arg := range args {
-		switch arg.Key {
-		case "device":
-			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
-				return nil, err
-			}
-		case "omcitestrequest":
-			if err := ptypes.UnmarshalAny(arg.Value, request); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-omcitestrequest", log.Fields{"error": err})
-				return nil, err
-			}
-		}
-	}
-	logger.Debugw(ctx, "Start_omci_test", log.Fields{"device-id": device.Id, "req": request})
-	result, err := rhp.adapter.Start_omci_test(ctx, device, request)
-	if err != nil {
-		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
-	}
-	return result, nil
-}
-func (rhp *RequestHandlerProxy) Get_ext_value(ctx context.Context, args []*ic.Argument) (*voltha.ReturnValues, error) {
-	if len(args) < 3 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
-		return nil, errors.New("invalid-number-of-args")
-	}
-
-	pDeviceId := &ic.StrType{}
-	device := &voltha.Device{}
-	valuetype := &ic.IntType{}
-	for _, arg := range args {
-		switch arg.Key {
-		case "device":
-			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
-				return nil, err
-			}
-		case "pDeviceId":
-			if err := ptypes.UnmarshalAny(arg.Value, pDeviceId); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-parent-device-id", log.Fields{"error": err})
-				return nil, err
-			}
-		case "valuetype":
-			if err := ptypes.UnmarshalAny(arg.Value, valuetype); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-valuetype", log.Fields{"error": err})
-				return nil, err
-			}
-		default:
-			logger.Warnw(ctx, "key-not-found", log.Fields{"arg.Key": arg.Key})
-		}
-	}
-
-	//Invoke the Get_value API on the adapter
-	return rhp.adapter.Get_ext_value(ctx, pDeviceId.Val, device, voltha.ValueType_Type(valuetype.Val))
-}
-
-func (rhp *RequestHandlerProxy) Single_get_value_request(ctx context.Context, args []*ic.Argument) (*extension.SingleGetValueResponse, error) {
-	logger.Debugw(ctx, "req handler Single_get_value_request", log.Fields{"no of args": len(args), "args": args})
-
-	if len(args) < 1 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
-		return nil, errors.New("invalid-number-of-args")
-	}
-	singleGetvalueReq := extension.SingleGetValueRequest{}
-	errResp := func(status extension.GetValueResponse_Status, reason extension.GetValueResponse_ErrorReason) *extension.SingleGetValueResponse {
-		return &extension.SingleGetValueResponse{
-			Response: &extension.GetValueResponse{
-				Status:    status,
-				ErrReason: reason,
-			},
-		}
-	}
-	for _, arg := range args {
-		switch arg.Key {
-		case "request":
-			if err := ptypes.UnmarshalAny(arg.Value, &singleGetvalueReq); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-singleGetvalueReq", log.Fields{"error": err})
-				return errResp(extension.GetValueResponse_ERROR, extension.GetValueResponse_REASON_UNDEFINED), nil
-			}
-		default:
-			logger.Warnw(ctx, "key-not-found", log.Fields{"arg.Key": arg.Key})
-		}
-	}
-	logger.Debugw(ctx, "invoke rhp.adapter.Single_get_value_request ", log.Fields{"singleGetvalueReq": singleGetvalueReq})
-	return rhp.adapter.Single_get_value_request(ctx, singleGetvalueReq)
-}
-
-func (rhp *RequestHandlerProxy) getDeviceDownloadImageRequest(ctx context.Context, args []*ic.Argument) (*voltha.DeviceImageDownloadRequest, error) {
-	logger.Debugw(ctx, "getDeviceDownloadImageRequest", log.Fields{"args": args})
-	if len(args) < 1 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
-		return nil, errors.New("invalid-number-of-args")
-	}
-
-	deviceDownloadImageReq := ic.DeviceImageDownloadRequest{}
-
-	for _, arg := range args {
-		switch arg.Key {
-		case "deviceImageDownloadReq":
-			if err := ptypes.UnmarshalAny(arg.Value, &deviceDownloadImageReq); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
-				return nil, err
-			}
-		}
-	}
-	return &deviceDownloadImageReq, nil
-}
-
-func (rhp *RequestHandlerProxy) getDeviceImageRequest(ctx context.Context, args []*ic.Argument) (*voltha.DeviceImageRequest, error) {
-	logger.Debugw(ctx, "getDeviceImageRequest", log.Fields{"args": args})
-	if len(args) < 1 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
-		return nil, errors.New("invalid-number-of-args")
-	}
-
-	deviceImageReq := ic.DeviceImageRequest{}
-
-	for _, arg := range args {
-		switch arg.Key {
-		case "deviceImageReq":
-			if err := ptypes.UnmarshalAny(arg.Value, &deviceImageReq); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
-				return nil, err
-			}
-		}
-	}
-	return &deviceImageReq, nil
-}
-
-func (rhp *RequestHandlerProxy) getDeviceID(ctx context.Context, args []*ic.Argument) (string, error) {
-	logger.Debugw(ctx, "getDeviceID", log.Fields{"args": args})
-
-	deviceId := &ic.StrType{}
-
-	for _, arg := range args {
-		switch arg.Key {
-		case "deviceId":
-			if err := ptypes.UnmarshalAny(arg.Value, deviceId); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
-				return "", err
-			}
-		}
-	}
-
-	if deviceId.Val == "" {
-		return "", errors.New("invalid argument")
-	}
-
-	return deviceId.Val, nil
-}
-
-func (rhp *RequestHandlerProxy) Download_onu_image(ctx context.Context, args []*ic.Argument) (*voltha.DeviceImageResponse, error) {
-	imageDownloadReq, err := rhp.getDeviceDownloadImageRequest(ctx, args)
-	if err != nil {
-		return nil, err
-	}
-
-	imageDownloadRsp, err := rhp.adapter.Download_onu_image(ctx, imageDownloadReq)
-	if err != nil {
-		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
-	}
-	return imageDownloadRsp, nil
-}
-
-func (rhp *RequestHandlerProxy) Get_onu_image_status(ctx context.Context, args []*ic.Argument) (*voltha.DeviceImageResponse, error) {
-	imageStatusReq, err := rhp.getDeviceImageRequest(ctx, args)
-	if err != nil {
-		return nil, err
-	}
-
-	imageStatus, err := rhp.adapter.Get_onu_image_status(ctx, imageStatusReq)
-	if err != nil {
-		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
-	}
-	return imageStatus, nil
-}
-
-func (rhp *RequestHandlerProxy) Abort_onu_image_upgrade(ctx context.Context, args []*ic.Argument) (*voltha.DeviceImageResponse, error) {
-	imageAbortReq, err := rhp.getDeviceImageRequest(ctx, args)
-	if err != nil {
-		return nil, err
-	}
-
-	imageAbortRsp, err := rhp.adapter.Abort_onu_image_upgrade(ctx, imageAbortReq)
-	if err != nil {
-		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
-	}
-	return imageAbortRsp, nil
-}
-
-func (rhp *RequestHandlerProxy) Activate_onu_image(ctx context.Context, args []*ic.Argument) (*voltha.DeviceImageResponse, error) {
-	imageActivateReq, err := rhp.getDeviceImageRequest(ctx, args)
-	if err != nil {
-		return nil, err
-	}
-
-	imageActivateRsp, err := rhp.adapter.Activate_onu_image(ctx, imageActivateReq)
-	if err != nil {
-		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
-	}
-	return imageActivateRsp, nil
-}
-
-func (rhp *RequestHandlerProxy) Commit_onu_image(ctx context.Context, args []*ic.Argument) (*voltha.DeviceImageResponse, error) {
-	imageCommitReq, err := rhp.getDeviceImageRequest(ctx, args)
-	if err != nil {
-		return nil, err
-	}
-
-	imageCommitRsp, err := rhp.adapter.Commit_onu_image(ctx, imageCommitReq)
-	if err != nil {
-		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
-	}
-
-	return imageCommitRsp, nil
-}
-
-func (rhp *RequestHandlerProxy) Get_onu_images(ctx context.Context, args []*ic.Argument) (*voltha.OnuImages, error) {
-	deviceID, err := rhp.getDeviceID(ctx, args)
-	if err != nil {
-		return nil, err
-	}
-
-	onuImages, err := rhp.adapter.Get_onu_images(ctx, deviceID)
-	if err != nil {
-		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
-	}
-	return onuImages, nil
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/adapters/common/utils.go b/vendor/github.com/opencord/voltha-lib-go/v6/pkg/adapters/common/utils.go
deleted file mode 100644
index 6516850..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/adapters/common/utils.go
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package common
-
-import (
-	"context"
-	"fmt"
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
-	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
-	"google.golang.org/grpc/codes"
-	"math/rand"
-	"time"
-)
-
-//GetRandomSerialNumber returns a serial number formatted as "HOST:PORT"
-func GetRandomSerialNumber() string {
-	rand.Seed(time.Now().UnixNano())
-	return fmt.Sprintf("%d.%d.%d.%d:%d",
-		rand.Intn(255),
-		rand.Intn(255),
-		rand.Intn(255),
-		rand.Intn(255),
-		rand.Intn(9000)+1000,
-	)
-}
-
-//GetRandomMacAddress returns a random mac address
-func GetRandomMacAddress() string {
-	rand.Seed(time.Now().UnixNano())
-	return fmt.Sprintf("%02x:%02x:%02x:%02x:%02x:%02x",
-		rand.Intn(128),
-		rand.Intn(128),
-		rand.Intn(128),
-		rand.Intn(128),
-		rand.Intn(128),
-		rand.Intn(128),
-	)
-}
-
-const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
-const (
-	letterIdxBits = 6                    // 6 bits to represent a letter index
-	letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
-	letterIdxMax  = 63 / letterIdxBits   // # of letter indices fitting in 63 bits
-)
-
-var src = rand.NewSource(time.Now().UnixNano())
-
-func GetRandomString(n int) string {
-	b := make([]byte, n)
-	// A src.Int63() generates 63 random bits, enough for letterIdxMax characters!
-	for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
-		if remain == 0 {
-			cache, remain = src.Int63(), letterIdxMax
-		}
-		if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
-			b[i] = letterBytes[idx]
-			i--
-		}
-		cache >>= letterIdxBits
-		remain--
-	}
-	return string(b)
-}
-
-func ICProxyErrorCodeToGrpcErrorCode(ctx context.Context, icErr ic.ErrorCodeCodes) codes.Code {
-	switch icErr {
-	case ic.ErrorCode_INVALID_PARAMETERS:
-		return codes.InvalidArgument
-	case ic.ErrorCode_UNSUPPORTED_REQUEST:
-		return codes.Unavailable
-	case ic.ErrorCode_DEADLINE_EXCEEDED:
-		return codes.DeadlineExceeded
-	default:
-		logger.Warnw(ctx, "cannnot-map-ic-error-code-to-grpc-error-code", log.Fields{"err": icErr})
-		return codes.Internal
-	}
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/adapters/iAdapter.go b/vendor/github.com/opencord/voltha-lib-go/v6/pkg/adapters/iAdapter.go
deleted file mode 100644
index aca4271..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/adapters/iAdapter.go
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package adapters
-
-import (
-	"context"
-
-	"github.com/opencord/voltha-protos/v4/go/extension"
-	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
-	"github.com/opencord/voltha-protos/v4/go/openflow_13"
-	"github.com/opencord/voltha-protos/v4/go/voltha"
-)
-
-//IAdapter represents the set of APIs a voltha adapter has to support.
-type IAdapter interface {
-	Adapter_descriptor(ctx context.Context) error
-	Device_types(ctx context.Context) (*voltha.DeviceTypes, error)
-	Health(ctx context.Context) (*voltha.HealthStatus, error)
-	Adopt_device(ctx context.Context, device *voltha.Device) error
-	Reconcile_device(ctx context.Context, device *voltha.Device) error
-	Abandon_device(ctx context.Context, device *voltha.Device) error
-	Disable_device(ctx context.Context, device *voltha.Device) error
-	Reenable_device(ctx context.Context, device *voltha.Device) error
-	Reboot_device(ctx context.Context, device *voltha.Device) error
-	Self_test_device(ctx context.Context, device *voltha.Device) error
-	Delete_device(ctx context.Context, device *voltha.Device) error
-	Get_device_details(ctx context.Context, device *voltha.Device) error
-	Update_flows_bulk(ctx context.Context, device *voltha.Device, flows *voltha.Flows, groups *voltha.FlowGroups, flowMetadata *voltha.FlowMetadata) error
-	Update_flows_incrementally(ctx context.Context, device *voltha.Device, flows *openflow_13.FlowChanges, groups *openflow_13.FlowGroupChanges, flowMetadata *voltha.FlowMetadata) error
-	Update_pm_config(ctx context.Context, device *voltha.Device, pm_configs *voltha.PmConfigs) error
-	Receive_packet_out(ctx context.Context, deviceId string, egress_port_no int, msg *openflow_13.OfpPacketOut) error
-	Suppress_event(ctx context.Context, filter *voltha.EventFilter) error
-	Unsuppress_event(ctx context.Context, filter *voltha.EventFilter) error
-	Get_ofp_device_info(ctx context.Context, device *voltha.Device) (*ic.SwitchCapability, error)
-	Process_inter_adapter_message(ctx context.Context, msg *ic.InterAdapterMessage) error
-	Process_tech_profile_instance_request(ctx context.Context, msg *ic.InterAdapterTechProfileInstanceRequestMessage) *ic.InterAdapterTechProfileDownloadMessage
-	Download_image(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
-	Get_image_download_status(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
-	Cancel_image_download(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
-	Activate_image_update(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
-	Revert_image_update(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
-	Enable_port(ctx context.Context, deviceId string, port *voltha.Port) error
-	Disable_port(ctx context.Context, deviceId string, port *voltha.Port) error
-	Child_device_lost(ctx context.Context, childDevice *voltha.Device) error
-	Start_omci_test(ctx context.Context, device *voltha.Device, request *voltha.OmciTestRequest) (*voltha.TestResponse, error)
-	Get_ext_value(ctx context.Context, deviceId string, device *voltha.Device, valueflag voltha.ValueType_Type) (*voltha.ReturnValues, error)
-	Single_get_value_request(ctx context.Context, request extension.SingleGetValueRequest) (*extension.SingleGetValueResponse, error)
-	Download_onu_image(ctx context.Context, request *voltha.DeviceImageDownloadRequest) (*voltha.DeviceImageResponse, error)
-	Get_onu_image_status(ctx context.Context, in *voltha.DeviceImageRequest) (*voltha.DeviceImageResponse, error)
-	Abort_onu_image_upgrade(ctx context.Context, in *voltha.DeviceImageRequest) (*voltha.DeviceImageResponse, error)
-	Get_onu_images(ctx context.Context, deviceID string) (*voltha.OnuImages, error)
-	Activate_onu_image(ctx context.Context, in *voltha.DeviceImageRequest) (*voltha.DeviceImageResponse, error)
-	Commit_onu_image(ctx context.Context, in *voltha.DeviceImageRequest) (*voltha.DeviceImageResponse, error)
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/kafka/endpoint_manager.go b/vendor/github.com/opencord/voltha-lib-go/v6/pkg/kafka/endpoint_manager.go
deleted file mode 100644
index b10d7bf..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/kafka/endpoint_manager.go
+++ /dev/null
@@ -1,352 +0,0 @@
-/*
- * Copyright 2020-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package kafka
-
-import (
-	"context"
-	"fmt"
-	"github.com/buraksezer/consistent"
-	"github.com/cespare/xxhash"
-	"github.com/golang/protobuf/proto"
-	"github.com/opencord/voltha-lib-go/v6/pkg/db"
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
-	"github.com/opencord/voltha-protos/v4/go/voltha"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/status"
-	"sync"
-)
-
-const (
-	// All the values below can be tuned to get optimal data distribution.  The numbers below seems to work well when
-	// supporting 1000-10000 devices and 1 - 20 replicas of a service
-
-	// Keys are distributed among partitions. Prime numbers are good to distribute keys uniformly.
-	DefaultPartitionCount = 1117
-
-	// Represents how many times a node is replicated on the consistent ring.
-	DefaultReplicationFactor = 117
-
-	// Load is used to calculate average load.
-	DefaultLoad = 1.1
-)
-
-type Endpoint string // Endpoint of a service instance.  When using kafka, this is the topic name of a service
-type ReplicaID int32 // The replication ID of a service instance
-
-type EndpointManager interface {
-
-	// GetEndpoint is called to get the endpoint to communicate with for a specific device and service type.  For
-	// now this will return the topic name
-	GetEndpoint(ctx context.Context, deviceID string, serviceType string) (Endpoint, error)
-
-	// IsDeviceOwnedByService is invoked when a specific service (service type + replicaNumber) is restarted and
-	// devices owned by that service need to be reconciled
-	IsDeviceOwnedByService(ctx context.Context, deviceID string, serviceType string, replicaNumber int32) (bool, error)
-
-	// GetReplicaAssignment returns the replica number of the service that owns the deviceID.  This is used by the
-	// test only
-	GetReplicaAssignment(ctx context.Context, deviceID string, serviceType string) (ReplicaID, error)
-}
-
-type service struct {
-	id             string // Id of the service.  The same id is used for all replicas
-	totalReplicas  int32
-	replicas       map[ReplicaID]Endpoint
-	consistentRing *consistent.Consistent
-}
-
-type endpointManager struct {
-	partitionCount           int
-	replicationFactor        int
-	load                     float64
-	backend                  *db.Backend
-	services                 map[string]*service
-	servicesLock             sync.RWMutex
-	deviceTypeServiceMap     map[string]string
-	deviceTypeServiceMapLock sync.RWMutex
-}
-
-type EndpointManagerOption func(*endpointManager)
-
-func PartitionCount(count int) EndpointManagerOption {
-	return func(args *endpointManager) {
-		args.partitionCount = count
-	}
-}
-
-func ReplicationFactor(replicas int) EndpointManagerOption {
-	return func(args *endpointManager) {
-		args.replicationFactor = replicas
-	}
-}
-
-func Load(load float64) EndpointManagerOption {
-	return func(args *endpointManager) {
-		args.load = load
-	}
-}
-
-func newEndpointManager(backend *db.Backend, opts ...EndpointManagerOption) EndpointManager {
-	tm := &endpointManager{
-		partitionCount:       DefaultPartitionCount,
-		replicationFactor:    DefaultReplicationFactor,
-		load:                 DefaultLoad,
-		backend:              backend,
-		services:             make(map[string]*service),
-		deviceTypeServiceMap: make(map[string]string),
-	}
-
-	for _, option := range opts {
-		option(tm)
-	}
-	return tm
-}
-
-func NewEndpointManager(backend *db.Backend, opts ...EndpointManagerOption) EndpointManager {
-	return newEndpointManager(backend, opts...)
-}
-
-func (ep *endpointManager) GetEndpoint(ctx context.Context, deviceID string, serviceType string) (Endpoint, error) {
-	logger.Debugw(ctx, "getting-endpoint", log.Fields{"device-id": deviceID, "service": serviceType})
-	owner, err := ep.getOwner(ctx, deviceID, serviceType)
-	if err != nil {
-		return "", err
-	}
-	m, ok := owner.(Member)
-	if !ok {
-		return "", status.Errorf(codes.Aborted, "invalid-member-%v", owner)
-	}
-	endpoint := m.getEndPoint()
-	if endpoint == "" {
-		return "", status.Errorf(codes.Unavailable, "endpoint-not-set-%s", serviceType)
-	}
-	logger.Debugw(ctx, "returning-endpoint", log.Fields{"device-id": deviceID, "service": serviceType, "endpoint": endpoint})
-	return endpoint, nil
-}
-
-func (ep *endpointManager) IsDeviceOwnedByService(ctx context.Context, deviceID string, serviceType string, replicaNumber int32) (bool, error) {
-	logger.Debugw(ctx, "device-ownership", log.Fields{"device-id": deviceID, "service": serviceType, "replica-number": replicaNumber})
-	owner, err := ep.getOwner(ctx, deviceID, serviceType)
-	if err != nil {
-		return false, nil
-	}
-	m, ok := owner.(Member)
-	if !ok {
-		return false, status.Errorf(codes.Aborted, "invalid-member-%v", owner)
-	}
-	return m.getReplica() == ReplicaID(replicaNumber), nil
-}
-
-func (ep *endpointManager) GetReplicaAssignment(ctx context.Context, deviceID string, serviceType string) (ReplicaID, error) {
-	owner, err := ep.getOwner(ctx, deviceID, serviceType)
-	if err != nil {
-		return 0, nil
-	}
-	m, ok := owner.(Member)
-	if !ok {
-		return 0, status.Errorf(codes.Aborted, "invalid-member-%v", owner)
-	}
-	return m.getReplica(), nil
-}
-
-func (ep *endpointManager) getOwner(ctx context.Context, deviceID string, serviceType string) (consistent.Member, error) {
-	serv, dType, err := ep.getServiceAndDeviceType(ctx, serviceType)
-	if err != nil {
-		return nil, err
-	}
-	key := ep.makeKey(deviceID, dType, serviceType)
-	return serv.consistentRing.LocateKey(key), nil
-}
-
-func (ep *endpointManager) getServiceAndDeviceType(ctx context.Context, serviceType string) (*service, string, error) {
-	// Check whether service exist
-	ep.servicesLock.RLock()
-	serv, serviceExist := ep.services[serviceType]
-	ep.servicesLock.RUnlock()
-
-	// Load the service and device types if needed
-	if !serviceExist || serv == nil || int(serv.totalReplicas) != len(serv.consistentRing.GetMembers()) {
-		if err := ep.loadServices(ctx); err != nil {
-			return nil, "", err
-		}
-
-		// Check whether the service exists now
-		ep.servicesLock.RLock()
-		serv, serviceExist = ep.services[serviceType]
-		ep.servicesLock.RUnlock()
-		if !serviceExist || serv == nil || int(serv.totalReplicas) != len(serv.consistentRing.GetMembers()) {
-			return nil, "", status.Errorf(codes.NotFound, "service-%s", serviceType)
-		}
-	}
-
-	ep.deviceTypeServiceMapLock.RLock()
-	defer ep.deviceTypeServiceMapLock.RUnlock()
-	for dType, sType := range ep.deviceTypeServiceMap {
-		if sType == serviceType {
-			return serv, dType, nil
-		}
-	}
-	return nil, "", status.Errorf(codes.NotFound, "service-%s", serviceType)
-}
-
-func (ep *endpointManager) getConsistentConfig() consistent.Config {
-	return consistent.Config{
-		PartitionCount:    ep.partitionCount,
-		ReplicationFactor: ep.replicationFactor,
-		Load:              ep.load,
-		Hasher:            hasher{},
-	}
-}
-
-// loadServices loads the services (adapters) and device types in memory. Because of the small size of the data and
-// the data format in the dB being binary protobuf then it is better to load all the data if inconsistency is detected,
-// instead of watching for updates in the dB and acting on it.
-func (ep *endpointManager) loadServices(ctx context.Context) error {
-	ep.servicesLock.Lock()
-	defer ep.servicesLock.Unlock()
-	ep.deviceTypeServiceMapLock.Lock()
-	defer ep.deviceTypeServiceMapLock.Unlock()
-
-	if ep.backend == nil {
-		return status.Error(codes.Aborted, "backend-not-set")
-	}
-	ep.services = make(map[string]*service)
-	ep.deviceTypeServiceMap = make(map[string]string)
-
-	// Load the adapters
-	blobs, err := ep.backend.List(log.WithSpanFromContext(context.Background(), ctx), "adapters")
-	if err != nil {
-		return err
-	}
-
-	// Data is marshalled as proto bytes in the data store
-	for _, blob := range blobs {
-		data := blob.Value.([]byte)
-		adapter := &voltha.Adapter{}
-		if err := proto.Unmarshal(data, adapter); err != nil {
-			return err
-		}
-		// A valid adapter should have the vendorID set
-		if adapter.Vendor != "" {
-			if _, ok := ep.services[adapter.Type]; !ok {
-				ep.services[adapter.Type] = &service{
-					id:             adapter.Type,
-					totalReplicas:  adapter.TotalReplicas,
-					replicas:       make(map[ReplicaID]Endpoint),
-					consistentRing: consistent.New(nil, ep.getConsistentConfig()),
-				}
-
-			}
-			currentReplica := ReplicaID(adapter.CurrentReplica)
-			endpoint := Endpoint(adapter.Endpoint)
-			ep.services[adapter.Type].replicas[currentReplica] = endpoint
-			ep.services[adapter.Type].consistentRing.Add(newMember(adapter.Id, adapter.Type, adapter.Vendor, endpoint, adapter.Version, currentReplica))
-		}
-	}
-	// Load the device types
-	blobs, err = ep.backend.List(log.WithSpanFromContext(context.Background(), ctx), "device_types")
-	if err != nil {
-		return err
-	}
-	for _, blob := range blobs {
-		data := blob.Value.([]byte)
-		deviceType := &voltha.DeviceType{}
-		if err := proto.Unmarshal(data, deviceType); err != nil {
-			return err
-		}
-		if _, ok := ep.deviceTypeServiceMap[deviceType.Id]; !ok {
-			ep.deviceTypeServiceMap[deviceType.Id] = deviceType.Adapter
-		}
-	}
-
-	// Log the loaded data in debug mode to facilitate trouble shooting
-	if logger.V(log.DebugLevel) {
-		for key, val := range ep.services {
-			members := val.consistentRing.GetMembers()
-			logger.Debugw(ctx, "service", log.Fields{"service": key, "expected-replica": val.totalReplicas, "replicas": len(val.consistentRing.GetMembers())})
-			for _, m := range members {
-				n := m.(Member)
-				logger.Debugw(ctx, "service-loaded", log.Fields{"serviceId": n.getID(), "serviceType": n.getServiceType(), "replica": n.getReplica(), "endpoint": n.getEndPoint()})
-			}
-		}
-		logger.Debugw(ctx, "device-types-loaded", log.Fields{"device-types": ep.deviceTypeServiceMap})
-	}
-	return nil
-}
-
-// makeKey creates the string that the hash function uses to create the hash
-func (ep *endpointManager) makeKey(deviceID string, deviceType string, serviceType string) []byte {
-	return []byte(fmt.Sprintf("%s_%s_%s", serviceType, deviceType, deviceID))
-}
-
-// The consistent package requires a hasher function
-type hasher struct{}
-
-// Sum64 provides the hasher function.  Based upon numerous testing scenarios, the xxhash package seems to provide the
-// best distribution compare to other hash packages
-func (h hasher) Sum64(data []byte) uint64 {
-	return xxhash.Sum64(data)
-}
-
-// Member represents a member on the consistent ring
-type Member interface {
-	String() string
-	getReplica() ReplicaID
-	getEndPoint() Endpoint
-	getID() string
-	getServiceType() string
-}
-
-// member implements the Member interface
-type member struct {
-	id          string
-	serviceType string
-	vendor      string
-	version     string
-	replica     ReplicaID
-	endpoint    Endpoint
-}
-
-func newMember(ID string, serviceType string, vendor string, endPoint Endpoint, version string, replica ReplicaID) Member {
-	return &member{
-		id:          ID,
-		serviceType: serviceType,
-		vendor:      vendor,
-		version:     version,
-		replica:     replica,
-		endpoint:    endPoint,
-	}
-}
-
-func (m *member) String() string {
-	return string(m.endpoint)
-}
-
-func (m *member) getReplica() ReplicaID {
-	return m.replica
-}
-
-func (m *member) getEndPoint() Endpoint {
-	return m.endpoint
-}
-
-func (m *member) getID() string {
-	return m.id
-}
-
-func (m *member) getServiceType() string {
-	return m.serviceType
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/kafka/kafka_inter_container_library.go b/vendor/github.com/opencord/voltha-lib-go/v6/pkg/kafka/kafka_inter_container_library.go
deleted file mode 100644
index 0f006dd..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/kafka/kafka_inter_container_library.go
+++ /dev/null
@@ -1,1097 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package kafka
-
-import (
-	"context"
-	"encoding/json"
-	"errors"
-	"fmt"
-	"reflect"
-	"strings"
-	"sync"
-	"time"
-
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/status"
-
-	"github.com/golang/protobuf/proto"
-	"github.com/golang/protobuf/ptypes"
-	"github.com/golang/protobuf/ptypes/any"
-	"github.com/google/uuid"
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
-	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
-	"github.com/opentracing/opentracing-go"
-)
-
-const (
-	DefaultMaxRetries     = 3
-	DefaultRequestTimeout = 60000 // 60000 milliseconds - to handle a wider latency range
-)
-
-const (
-	TransactionKey = "transactionID"
-	FromTopic      = "fromTopic"
-)
-
-var ErrorTransactionNotAcquired = errors.New("transaction-not-acquired")
-var ErrorTransactionInvalidId = errors.New("transaction-invalid-id")
-
-// requestHandlerChannel represents an interface associated with a channel.  Whenever, an event is
-// obtained from that channel, this interface is invoked.   This is used to handle
-// async requests into the Core via the kafka messaging bus
-type requestHandlerChannel struct {
-	requesthandlerInterface interface{}
-	ch                      <-chan *ic.InterContainerMessage
-}
-
-// transactionChannel represents a combination of a topic and a channel onto which a response received
-// on the kafka bus will be sent to
-type transactionChannel struct {
-	topic *Topic
-	ch    chan *ic.InterContainerMessage
-}
-
-type InterContainerProxy interface {
-	Start(ctx context.Context) error
-	Stop(ctx context.Context)
-	GetDefaultTopic() *Topic
-	InvokeRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic, waitForResponse bool, key string, kvArgs ...*KVArg) (bool, *any.Any)
-	InvokeAsyncRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic, waitForResponse bool, key string, kvArgs ...*KVArg) chan *RpcResponse
-	SubscribeWithRequestHandlerInterface(ctx context.Context, topic Topic, handler interface{}) error
-	SubscribeWithDefaultRequestHandler(ctx context.Context, topic Topic, initialOffset int64) error
-	UnSubscribeFromRequestHandler(ctx context.Context, topic Topic) error
-	DeleteTopic(ctx context.Context, topic Topic) error
-	EnableLivenessChannel(ctx context.Context, enable bool) chan bool
-	SendLiveness(ctx context.Context) error
-}
-
-// interContainerProxy represents the messaging proxy
-type interContainerProxy struct {
-	kafkaAddress                   string
-	defaultTopic                   *Topic
-	defaultRequestHandlerInterface interface{}
-	kafkaClient                    Client
-	doneCh                         chan struct{}
-	doneOnce                       sync.Once
-
-	// This map is used to map a topic to an interface and channel.   When a request is received
-	// on that channel (registered to the topic) then that interface is invoked.
-	topicToRequestHandlerChannelMap   map[string]*requestHandlerChannel
-	lockTopicRequestHandlerChannelMap sync.RWMutex
-
-	// This map is used to map a channel to a response topic.   This channel handles all responses on that
-	// channel for that topic and forward them to the appropriate consumers channel, using the
-	// transactionIdToChannelMap.
-	topicToResponseChannelMap   map[string]<-chan *ic.InterContainerMessage
-	lockTopicResponseChannelMap sync.RWMutex
-
-	// This map is used to map a transaction to a consumers channel.  This is used whenever a request has been
-	// sent out and we are waiting for a response.
-	transactionIdToChannelMap     map[string]*transactionChannel
-	lockTransactionIdToChannelMap sync.RWMutex
-}
-
-type InterContainerProxyOption func(*interContainerProxy)
-
-func InterContainerAddress(address string) InterContainerProxyOption {
-	return func(args *interContainerProxy) {
-		args.kafkaAddress = address
-	}
-}
-
-func DefaultTopic(topic *Topic) InterContainerProxyOption {
-	return func(args *interContainerProxy) {
-		args.defaultTopic = topic
-	}
-}
-
-func RequestHandlerInterface(handler interface{}) InterContainerProxyOption {
-	return func(args *interContainerProxy) {
-		args.defaultRequestHandlerInterface = handler
-	}
-}
-
-func MsgClient(client Client) InterContainerProxyOption {
-	return func(args *interContainerProxy) {
-		args.kafkaClient = client
-	}
-}
-
-func newInterContainerProxy(opts ...InterContainerProxyOption) *interContainerProxy {
-	proxy := &interContainerProxy{
-		kafkaAddress: DefaultKafkaAddress,
-		doneCh:       make(chan struct{}),
-	}
-
-	for _, option := range opts {
-		option(proxy)
-	}
-
-	return proxy
-}
-
-func NewInterContainerProxy(opts ...InterContainerProxyOption) InterContainerProxy {
-	return newInterContainerProxy(opts...)
-}
-
-func (kp *interContainerProxy) Start(ctx context.Context) error {
-	logger.Info(ctx, "Starting-Proxy")
-
-	// Kafka MsgClient should already have been created.  If not, output fatal error
-	if kp.kafkaClient == nil {
-		logger.Fatal(ctx, "kafka-client-not-set")
-	}
-
-	// Start the kafka client
-	if err := kp.kafkaClient.Start(ctx); err != nil {
-		logger.Errorw(ctx, "Cannot-create-kafka-proxy", log.Fields{"error": err})
-		return err
-	}
-
-	// Create the topic to response channel map
-	kp.topicToResponseChannelMap = make(map[string]<-chan *ic.InterContainerMessage)
-	//
-	// Create the transactionId to Channel Map
-	kp.transactionIdToChannelMap = make(map[string]*transactionChannel)
-
-	// Create the topic to request channel map
-	kp.topicToRequestHandlerChannelMap = make(map[string]*requestHandlerChannel)
-
-	return nil
-}
-
-func (kp *interContainerProxy) Stop(ctx context.Context) {
-	logger.Info(ctx, "stopping-intercontainer-proxy")
-	kp.doneOnce.Do(func() { close(kp.doneCh) })
-	// TODO : Perform cleanup
-	kp.kafkaClient.Stop(ctx)
-	err := kp.deleteAllTopicRequestHandlerChannelMap(ctx)
-	if err != nil {
-		logger.Errorw(ctx, "failed-delete-all-topic-request-handler-channel-map", log.Fields{"error": err})
-	}
-	err = kp.deleteAllTopicResponseChannelMap(ctx)
-	if err != nil {
-		logger.Errorw(ctx, "failed-delete-all-topic-response-channel-map", log.Fields{"error": err})
-	}
-	kp.deleteAllTransactionIdToChannelMap(ctx)
-}
-
-func (kp *interContainerProxy) GetDefaultTopic() *Topic {
-	return kp.defaultTopic
-}
-
-// InvokeAsyncRPC is used to make an RPC request asynchronously
-func (kp *interContainerProxy) InvokeAsyncRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic,
-	waitForResponse bool, key string, kvArgs ...*KVArg) chan *RpcResponse {
-
-	spanArg, span, ctx := kp.embedSpanAsArg(ctx, rpc, !waitForResponse)
-	if spanArg != nil {
-		kvArgs = append(kvArgs, &spanArg[0])
-	}
-
-	defer span.Finish()
-
-	logger.Debugw(ctx, "InvokeAsyncRPC", log.Fields{"rpc": rpc, "key": key, "kvArgs": kvArgs})
-
-	//	If a replyToTopic is provided then we use it, otherwise just use the  default toTopic.  The replyToTopic is
-	// typically the device ID.
-	responseTopic := replyToTopic
-	if responseTopic == nil {
-		responseTopic = kp.GetDefaultTopic()
-	}
-
-	chnl := make(chan *RpcResponse)
-
-	go func() {
-
-		// once we're done,
-		// close the response channel
-		defer close(chnl)
-
-		var err error
-		var protoRequest *ic.InterContainerMessage
-
-		// Encode the request
-		protoRequest, err = encodeRequest(ctx, rpc, toTopic, responseTopic, key, kvArgs...)
-		if err != nil {
-			logger.Warnw(ctx, "cannot-format-request", log.Fields{"rpc": rpc, "error": err})
-			log.MarkSpanError(ctx, errors.New("cannot-format-request"))
-			chnl <- NewResponse(RpcFormattingError, err, nil)
-			return
-		}
-
-		// Subscribe for response, if needed, before sending request
-		var ch <-chan *ic.InterContainerMessage
-		if ch, err = kp.subscribeForResponse(ctx, *responseTopic, protoRequest.Header.Id); err != nil {
-			logger.Errorw(ctx, "failed-to-subscribe-for-response", log.Fields{"error": err, "toTopic": toTopic.Name})
-			log.MarkSpanError(ctx, errors.New("failed-to-subscribe-for-response"))
-			chnl <- NewResponse(RpcTransportError, err, nil)
-			return
-		}
-
-		// Send request - if the topic is formatted with a device Id then we will send the request using a
-		// specific key, hence ensuring a single partition is used to publish the request.  This ensures that the
-		// subscriber on that topic will receive the request in the order it was sent.  The key used is the deviceId.
-		logger.Debugw(ctx, "sending-msg", log.Fields{"rpc": rpc, "toTopic": toTopic, "replyTopic": responseTopic, "key": key, "xId": protoRequest.Header.Id})
-
-		// if the message is not sent on kafka publish an event an close the channel
-		if err = kp.kafkaClient.Send(ctx, protoRequest, toTopic, key); err != nil {
-			chnl <- NewResponse(RpcTransportError, err, nil)
-			return
-		}
-
-		// if the client is not waiting for a response send the ack and close the channel
-		chnl <- NewResponse(RpcSent, nil, nil)
-		if !waitForResponse {
-			return
-		}
-
-		defer func() {
-			// Remove the subscription for a response on return
-			if err := kp.unSubscribeForResponse(ctx, protoRequest.Header.Id); err != nil {
-				logger.Warnw(ctx, "invoke-async-rpc-unsubscriber-for-response-failed", log.Fields{"err": err})
-			}
-		}()
-
-		// Wait for response as well as timeout or cancellation
-		select {
-		case msg, ok := <-ch:
-			if !ok {
-				logger.Warnw(ctx, "channel-closed", log.Fields{"rpc": rpc, "replyTopic": replyToTopic.Name})
-				log.MarkSpanError(ctx, errors.New("channel-closed"))
-				chnl <- NewResponse(RpcTransportError, status.Error(codes.Aborted, "channel closed"), nil)
-			}
-			logger.Debugw(ctx, "received-response", log.Fields{"rpc": rpc, "msgHeader": msg.Header})
-			if responseBody, err := decodeResponse(ctx, msg); err != nil {
-				chnl <- NewResponse(RpcReply, err, nil)
-			} else {
-				if responseBody.Success {
-					chnl <- NewResponse(RpcReply, nil, responseBody.Result)
-				} else {
-					// response body contains an error
-					unpackErr := &ic.Error{}
-					if err := ptypes.UnmarshalAny(responseBody.Result, unpackErr); err != nil {
-						chnl <- NewResponse(RpcReply, err, nil)
-					} else {
-						chnl <- NewResponse(RpcReply, status.Error(codes.Internal, unpackErr.Reason), nil)
-					}
-				}
-			}
-		case <-ctx.Done():
-			logger.Errorw(ctx, "context-cancelled", log.Fields{"rpc": rpc, "ctx": ctx.Err()})
-			log.MarkSpanError(ctx, errors.New("context-cancelled"))
-			err := status.Error(codes.DeadlineExceeded, ctx.Err().Error())
-			chnl <- NewResponse(RpcTimeout, err, nil)
-		case <-kp.doneCh:
-			chnl <- NewResponse(RpcSystemClosing, nil, nil)
-			logger.Warnw(ctx, "received-exit-signal", log.Fields{"toTopic": toTopic.Name, "rpc": rpc})
-		}
-	}()
-	return chnl
-}
-
-// Method to extract Open-tracing Span from Context and serialize it for transport over Kafka embedded as a additional argument.
-// Additional argument is injected using key as "span" and value as Span marshalled into a byte slice
-//
-// The span name is automatically constructed using the RPC name with following convention (<rpc-name> represents name of invoked method):
-// - RPC invoked in Sync manner (WaitForResponse=true) : kafka-rpc-<rpc-name>
-// - RPC invoked in Async manner (WaitForResponse=false) : kafka-async-rpc-<rpc-name>
-// - Inter Adapter RPC invoked in Sync manner (WaitForResponse=true) : kafka-inter-adapter-rpc-<rpc-name>
-// - Inter Adapter RPC invoked in Async manner (WaitForResponse=false) : kafka-inter-adapter-async-rpc-<rpc-name>
-func (kp *interContainerProxy) embedSpanAsArg(ctx context.Context, rpc string, isAsync bool) ([]KVArg, opentracing.Span, context.Context) {
-	var err error
-	var newCtx context.Context
-	var spanToInject opentracing.Span
-
-	if !log.GetGlobalLFM().GetLogCorrelationStatus() && !log.GetGlobalLFM().GetTracePublishingStatus() {
-		// if both log correlation and trace publishing is disable do not generate the span
-		logger.Debugw(ctx, "not-embedding-span-in-KVArg-", log.Fields{"rpc": rpc,
-			"log-correlation-status": log.GetGlobalLFM().GetLogCorrelationStatus(), "trace-publishing-status": log.GetGlobalLFM().GetTracePublishingStatus()})
-		return nil, opentracing.GlobalTracer().StartSpan(rpc), ctx
-	}
-
-	var spanName strings.Builder
-	spanName.WriteString("kafka-")
-
-	// In case of inter adapter message, use Msg Type for constructing RPC name
-	if rpc == "process_inter_adapter_message" {
-		if msgType, ok := ctx.Value("inter-adapter-msg-type").(ic.InterAdapterMessageType_Types); ok {
-			spanName.WriteString("inter-adapter-")
-			rpc = msgType.String()
-		}
-	}
-
-	if isAsync {
-		spanName.WriteString("async-rpc-")
-	} else {
-		spanName.WriteString("rpc-")
-	}
-	spanName.WriteString(rpc)
-
-	if isAsync {
-		spanToInject, newCtx = log.CreateAsyncSpan(ctx, spanName.String())
-	} else {
-		spanToInject, newCtx = log.CreateChildSpan(ctx, spanName.String())
-	}
-
-	spanToInject.SetBaggageItem("rpc-span-name", spanName.String())
-
-	textMapCarrier := opentracing.TextMapCarrier(make(map[string]string))
-	if err = opentracing.GlobalTracer().Inject(spanToInject.Context(), opentracing.TextMap, textMapCarrier); err != nil {
-		logger.Warnw(ctx, "unable-to-serialize-span-to-textmap", log.Fields{"span": spanToInject, "error": err})
-		return nil, spanToInject, newCtx
-	}
-
-	var textMapJson []byte
-	if textMapJson, err = json.Marshal(textMapCarrier); err != nil {
-		logger.Warnw(ctx, "unable-to-marshal-textmap-to-json-string", log.Fields{"textMap": textMapCarrier, "error": err})
-		return nil, spanToInject, newCtx
-	}
-
-	spanArg := make([]KVArg, 1)
-	spanArg[0] = KVArg{Key: "span", Value: &ic.StrType{Val: string(textMapJson)}}
-	return spanArg, spanToInject, newCtx
-}
-
-// InvokeRPC is used to send a request to a given topic
-func (kp *interContainerProxy) InvokeRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic,
-	waitForResponse bool, key string, kvArgs ...*KVArg) (bool, *any.Any) {
-
-	spanArg, span, ctx := kp.embedSpanAsArg(ctx, rpc, false)
-	if spanArg != nil {
-		kvArgs = append(kvArgs, &spanArg[0])
-	}
-
-	defer span.Finish()
-
-	logger.Debugw(ctx, "InvokeRPC", log.Fields{"rpc": rpc, "key": key, "kvArgs": kvArgs})
-
-	//	If a replyToTopic is provided then we use it, otherwise just use the  default toTopic.  The replyToTopic is
-	// typically the device ID.
-	responseTopic := replyToTopic
-	if responseTopic == nil {
-		responseTopic = kp.defaultTopic
-	}
-
-	// Encode the request
-	protoRequest, err := encodeRequest(ctx, rpc, toTopic, responseTopic, key, kvArgs...)
-	if err != nil {
-		logger.Warnw(ctx, "cannot-format-request", log.Fields{"rpc": rpc, "error": err})
-		log.MarkSpanError(ctx, errors.New("cannot-format-request"))
-		return false, nil
-	}
-
-	// Subscribe for response, if needed, before sending request
-	var ch <-chan *ic.InterContainerMessage
-	if waitForResponse {
-		var err error
-		if ch, err = kp.subscribeForResponse(ctx, *responseTopic, protoRequest.Header.Id); err != nil {
-			logger.Errorw(ctx, "failed-to-subscribe-for-response", log.Fields{"error": err, "toTopic": toTopic.Name})
-		}
-	}
-
-	// Send request - if the topic is formatted with a device Id then we will send the request using a
-	// specific key, hence ensuring a single partition is used to publish the request.  This ensures that the
-	// subscriber on that topic will receive the request in the order it was sent.  The key used is the deviceId.
-	//key := GetDeviceIdFromTopic(*toTopic)
-	logger.Debugw(ctx, "sending-msg", log.Fields{"rpc": rpc, "toTopic": toTopic, "replyTopic": responseTopic, "key": key, "xId": protoRequest.Header.Id})
-	go func() {
-		if err := kp.kafkaClient.Send(ctx, protoRequest, toTopic, key); err != nil {
-			log.MarkSpanError(ctx, errors.New("send-failed"))
-			logger.Errorw(ctx, "send-failed", log.Fields{
-				"topic": toTopic,
-				"key":   key,
-				"error": err})
-		}
-	}()
-
-	if waitForResponse {
-		// Create a child context based on the parent context, if any
-		var cancel context.CancelFunc
-		childCtx := context.Background()
-		if ctx == nil {
-			ctx, cancel = context.WithTimeout(context.Background(), DefaultRequestTimeout*time.Millisecond)
-		} else {
-			childCtx, cancel = context.WithTimeout(ctx, DefaultRequestTimeout*time.Millisecond)
-		}
-		defer cancel()
-
-		// Wait for response as well as timeout or cancellation
-		// Remove the subscription for a response on return
-		defer func() {
-			if err := kp.unSubscribeForResponse(ctx, protoRequest.Header.Id); err != nil {
-				logger.Errorw(ctx, "response-unsubscribe-failed", log.Fields{
-					"id":    protoRequest.Header.Id,
-					"error": err})
-			}
-		}()
-		select {
-		case msg, ok := <-ch:
-			if !ok {
-				logger.Warnw(ctx, "channel-closed", log.Fields{"rpc": rpc, "replyTopic": replyToTopic.Name})
-				log.MarkSpanError(ctx, errors.New("channel-closed"))
-				protoError := &ic.Error{Reason: "channel-closed"}
-				var marshalledArg *any.Any
-				if marshalledArg, err = ptypes.MarshalAny(protoError); err != nil {
-					return false, nil // Should never happen
-				}
-				return false, marshalledArg
-			}
-			logger.Debugw(ctx, "received-response", log.Fields{"rpc": rpc, "msgHeader": msg.Header})
-			var responseBody *ic.InterContainerResponseBody
-			var err error
-			if responseBody, err = decodeResponse(ctx, msg); err != nil {
-				logger.Errorw(ctx, "decode-response-error", log.Fields{"error": err})
-				// FIXME we should return something
-			}
-			return responseBody.Success, responseBody.Result
-		case <-ctx.Done():
-			logger.Debugw(ctx, "context-cancelled", log.Fields{"rpc": rpc, "ctx": ctx.Err()})
-			log.MarkSpanError(ctx, errors.New("context-cancelled"))
-			//	 pack the error as proto any type
-			protoError := &ic.Error{Reason: ctx.Err().Error(), Code: ic.ErrorCode_DEADLINE_EXCEEDED}
-
-			var marshalledArg *any.Any
-			if marshalledArg, err = ptypes.MarshalAny(protoError); err != nil {
-				return false, nil // Should never happen
-			}
-			return false, marshalledArg
-		case <-childCtx.Done():
-			logger.Debugw(ctx, "context-cancelled", log.Fields{"rpc": rpc, "ctx": childCtx.Err()})
-			log.MarkSpanError(ctx, errors.New("context-cancelled"))
-			//	 pack the error as proto any type
-			protoError := &ic.Error{Reason: childCtx.Err().Error(), Code: ic.ErrorCode_DEADLINE_EXCEEDED}
-
-			var marshalledArg *any.Any
-			if marshalledArg, err = ptypes.MarshalAny(protoError); err != nil {
-				return false, nil // Should never happen
-			}
-			return false, marshalledArg
-		case <-kp.doneCh:
-			logger.Infow(ctx, "received-exit-signal", log.Fields{"toTopic": toTopic.Name, "rpc": rpc})
-			return true, nil
-		}
-	}
-	return true, nil
-}
-
-// SubscribeWithRequestHandlerInterface allows a caller to assign a target object to be invoked automatically
-// when a message is received on a given topic
-func (kp *interContainerProxy) SubscribeWithRequestHandlerInterface(ctx context.Context, topic Topic, handler interface{}) error {
-
-	// Subscribe to receive messages for that topic
-	var ch <-chan *ic.InterContainerMessage
-	var err error
-	if ch, err = kp.kafkaClient.Subscribe(ctx, &topic); err != nil {
-		//if ch, err = kp.Subscribe(topic); err != nil {
-		logger.Errorw(ctx, "failed-to-subscribe", log.Fields{"error": err, "topic": topic.Name})
-		return err
-	}
-
-	kp.defaultRequestHandlerInterface = handler
-	kp.addToTopicRequestHandlerChannelMap(topic.Name, &requestHandlerChannel{requesthandlerInterface: handler, ch: ch})
-	// Launch a go routine to receive and process kafka messages
-	go kp.waitForMessages(ctx, ch, topic, handler)
-
-	return nil
-}
-
-// SubscribeWithDefaultRequestHandler allows a caller to add a topic to an existing target object to be invoked automatically
-// when a message is received on a given topic.  So far there is only 1 target registered per microservice
-func (kp *interContainerProxy) SubscribeWithDefaultRequestHandler(ctx context.Context, topic Topic, initialOffset int64) error {
-	// Subscribe to receive messages for that topic
-	var ch <-chan *ic.InterContainerMessage
-	var err error
-	if ch, err = kp.kafkaClient.Subscribe(ctx, &topic, &KVArg{Key: Offset, Value: initialOffset}); err != nil {
-		logger.Errorw(ctx, "failed-to-subscribe", log.Fields{"error": err, "topic": topic.Name})
-		return err
-	}
-	kp.addToTopicRequestHandlerChannelMap(topic.Name, &requestHandlerChannel{requesthandlerInterface: kp.defaultRequestHandlerInterface, ch: ch})
-
-	// Launch a go routine to receive and process kafka messages
-	go kp.waitForMessages(ctx, ch, topic, kp.defaultRequestHandlerInterface)
-
-	return nil
-}
-
-func (kp *interContainerProxy) UnSubscribeFromRequestHandler(ctx context.Context, topic Topic) error {
-	return kp.deleteFromTopicRequestHandlerChannelMap(ctx, topic.Name)
-}
-
-func (kp *interContainerProxy) deleteFromTopicResponseChannelMap(ctx context.Context, topic string) error {
-	kp.lockTopicResponseChannelMap.Lock()
-	defer kp.lockTopicResponseChannelMap.Unlock()
-	if _, exist := kp.topicToResponseChannelMap[topic]; exist {
-		// Unsubscribe to this topic first - this will close the subscribed channel
-		var err error
-		if err = kp.kafkaClient.UnSubscribe(ctx, &Topic{Name: topic}, kp.topicToResponseChannelMap[topic]); err != nil {
-			logger.Errorw(ctx, "unsubscribing-error", log.Fields{"topic": topic})
-		}
-		delete(kp.topicToResponseChannelMap, topic)
-		return err
-	} else {
-		return fmt.Errorf("%s-Topic-not-found", topic)
-	}
-}
-
-// nolint: unused
-func (kp *interContainerProxy) deleteAllTopicResponseChannelMap(ctx context.Context) error {
-	logger.Debug(ctx, "delete-all-topic-response-channel")
-	kp.lockTopicResponseChannelMap.Lock()
-	defer kp.lockTopicResponseChannelMap.Unlock()
-	var unsubscribeFailTopics []string
-	for topic := range kp.topicToResponseChannelMap {
-		// Unsubscribe to this topic first - this will close the subscribed channel
-		if err := kp.kafkaClient.UnSubscribe(ctx, &Topic{Name: topic}, kp.topicToResponseChannelMap[topic]); err != nil {
-			unsubscribeFailTopics = append(unsubscribeFailTopics, topic)
-			logger.Errorw(ctx, "unsubscribing-error", log.Fields{"topic": topic, "error": err})
-			// Do not return. Continue to try to unsubscribe to other topics.
-		} else {
-			// Only delete from channel map if successfully unsubscribed.
-			delete(kp.topicToResponseChannelMap, topic)
-		}
-	}
-	if len(unsubscribeFailTopics) > 0 {
-		return fmt.Errorf("unsubscribe-errors: %v", unsubscribeFailTopics)
-	}
-	return nil
-}
-
-func (kp *interContainerProxy) addToTopicRequestHandlerChannelMap(topic string, arg *requestHandlerChannel) {
-	kp.lockTopicRequestHandlerChannelMap.Lock()
-	defer kp.lockTopicRequestHandlerChannelMap.Unlock()
-	if _, exist := kp.topicToRequestHandlerChannelMap[topic]; !exist {
-		kp.topicToRequestHandlerChannelMap[topic] = arg
-	}
-}
-
-func (kp *interContainerProxy) deleteFromTopicRequestHandlerChannelMap(ctx context.Context, topic string) error {
-	kp.lockTopicRequestHandlerChannelMap.Lock()
-	defer kp.lockTopicRequestHandlerChannelMap.Unlock()
-	if _, exist := kp.topicToRequestHandlerChannelMap[topic]; exist {
-		// Close the kafka client client first by unsubscribing to this topic
-		if err := kp.kafkaClient.UnSubscribe(ctx, &Topic{Name: topic}, kp.topicToRequestHandlerChannelMap[topic].ch); err != nil {
-			return err
-		}
-		delete(kp.topicToRequestHandlerChannelMap, topic)
-		return nil
-	} else {
-		return fmt.Errorf("%s-Topic-not-found", topic)
-	}
-}
-
-// nolint: unused
-func (kp *interContainerProxy) deleteAllTopicRequestHandlerChannelMap(ctx context.Context) error {
-	logger.Debug(ctx, "delete-all-topic-request-channel")
-	kp.lockTopicRequestHandlerChannelMap.Lock()
-	defer kp.lockTopicRequestHandlerChannelMap.Unlock()
-	var unsubscribeFailTopics []string
-	for topic := range kp.topicToRequestHandlerChannelMap {
-		// Close the kafka client client first by unsubscribing to this topic
-		if err := kp.kafkaClient.UnSubscribe(ctx, &Topic{Name: topic}, kp.topicToRequestHandlerChannelMap[topic].ch); err != nil {
-			unsubscribeFailTopics = append(unsubscribeFailTopics, topic)
-			logger.Errorw(ctx, "unsubscribing-error", log.Fields{"topic": topic, "error": err})
-			// Do not return. Continue to try to unsubscribe to other topics.
-		} else {
-			// Only delete from channel map if successfully unsubscribed.
-			delete(kp.topicToRequestHandlerChannelMap, topic)
-		}
-	}
-	if len(unsubscribeFailTopics) > 0 {
-		return fmt.Errorf("unsubscribe-errors: %v", unsubscribeFailTopics)
-	}
-	return nil
-}
-
-func (kp *interContainerProxy) addToTransactionIdToChannelMap(id string, topic *Topic, arg chan *ic.InterContainerMessage) {
-	kp.lockTransactionIdToChannelMap.Lock()
-	defer kp.lockTransactionIdToChannelMap.Unlock()
-	if _, exist := kp.transactionIdToChannelMap[id]; !exist {
-		kp.transactionIdToChannelMap[id] = &transactionChannel{topic: topic, ch: arg}
-	}
-}
-
-func (kp *interContainerProxy) deleteFromTransactionIdToChannelMap(id string) {
-	kp.lockTransactionIdToChannelMap.Lock()
-	defer kp.lockTransactionIdToChannelMap.Unlock()
-	if transChannel, exist := kp.transactionIdToChannelMap[id]; exist {
-		// Close the channel first
-		close(transChannel.ch)
-		delete(kp.transactionIdToChannelMap, id)
-	}
-}
-
-func (kp *interContainerProxy) deleteTopicTransactionIdToChannelMap(id string) {
-	kp.lockTransactionIdToChannelMap.Lock()
-	defer kp.lockTransactionIdToChannelMap.Unlock()
-	for key, value := range kp.transactionIdToChannelMap {
-		if value.topic.Name == id {
-			close(value.ch)
-			delete(kp.transactionIdToChannelMap, key)
-		}
-	}
-}
-
-// nolint: unused
-func (kp *interContainerProxy) deleteAllTransactionIdToChannelMap(ctx context.Context) {
-	logger.Debug(ctx, "delete-all-transaction-id-channel-map")
-	kp.lockTransactionIdToChannelMap.Lock()
-	defer kp.lockTransactionIdToChannelMap.Unlock()
-	for key, value := range kp.transactionIdToChannelMap {
-		close(value.ch)
-		delete(kp.transactionIdToChannelMap, key)
-	}
-}
-
-func (kp *interContainerProxy) DeleteTopic(ctx context.Context, topic Topic) error {
-	// If we have any consumers on that topic we need to close them
-	if err := kp.deleteFromTopicResponseChannelMap(ctx, topic.Name); err != nil {
-		logger.Errorw(ctx, "delete-from-topic-responsechannelmap-failed", log.Fields{"error": err})
-	}
-	if err := kp.deleteFromTopicRequestHandlerChannelMap(ctx, topic.Name); err != nil {
-		logger.Errorw(ctx, "delete-from-topic-requesthandlerchannelmap-failed", log.Fields{"error": err})
-	}
-	kp.deleteTopicTransactionIdToChannelMap(topic.Name)
-
-	return kp.kafkaClient.DeleteTopic(ctx, &topic)
-}
-
-func encodeReturnedValue(ctx context.Context, returnedVal interface{}) (*any.Any, error) {
-	// Encode the response argument - needs to be a proto message
-	if returnedVal == nil {
-		return nil, nil
-	}
-	protoValue, ok := returnedVal.(proto.Message)
-	if !ok {
-		logger.Warnw(ctx, "response-value-not-proto-message", log.Fields{"error": ok, "returnVal": returnedVal})
-		err := errors.New("response-value-not-proto-message")
-		return nil, err
-	}
-
-	// Marshal the returned value, if any
-	var marshalledReturnedVal *any.Any
-	var err error
-	if marshalledReturnedVal, err = ptypes.MarshalAny(protoValue); err != nil {
-		logger.Warnw(ctx, "cannot-marshal-returned-val", log.Fields{"error": err})
-		return nil, err
-	}
-	return marshalledReturnedVal, nil
-}
-
-func encodeDefaultFailedResponse(ctx context.Context, request *ic.InterContainerMessage) *ic.InterContainerMessage {
-	responseHeader := &ic.Header{
-		Id:        request.Header.Id,
-		Type:      ic.MessageType_RESPONSE,
-		FromTopic: request.Header.ToTopic,
-		ToTopic:   request.Header.FromTopic,
-		Timestamp: ptypes.TimestampNow(),
-	}
-	responseBody := &ic.InterContainerResponseBody{
-		Success: false,
-		Result:  nil,
-	}
-	var marshalledResponseBody *any.Any
-	var err error
-	// Error should never happen here
-	if marshalledResponseBody, err = ptypes.MarshalAny(responseBody); err != nil {
-		logger.Warnw(ctx, "cannot-marshal-failed-response-body", log.Fields{"error": err})
-	}
-
-	return &ic.InterContainerMessage{
-		Header: responseHeader,
-		Body:   marshalledResponseBody,
-	}
-
-}
-
-//formatRequest formats a request to send over kafka and returns an InterContainerMessage message on success
-//or an error on failure
-func encodeResponse(ctx context.Context, request *ic.InterContainerMessage, success bool, returnedValues ...interface{}) (*ic.InterContainerMessage, error) {
-	//logger.Debugw(ctx, "encodeResponse", log.Fields{"success": success, "returnedValues": returnedValues})
-	responseHeader := &ic.Header{
-		Id:        request.Header.Id,
-		Type:      ic.MessageType_RESPONSE,
-		FromTopic: request.Header.ToTopic,
-		ToTopic:   request.Header.FromTopic,
-		KeyTopic:  request.Header.KeyTopic,
-		Timestamp: ptypes.TimestampNow(),
-	}
-
-	// Go over all returned values
-	var marshalledReturnedVal *any.Any
-	var err error
-
-	// for now we support only 1 returned value - (excluding the error)
-	if len(returnedValues) > 0 {
-		if marshalledReturnedVal, err = encodeReturnedValue(ctx, returnedValues[0]); err != nil {
-			logger.Warnw(ctx, "cannot-marshal-response-body", log.Fields{"error": err})
-		}
-	}
-
-	responseBody := &ic.InterContainerResponseBody{
-		Success: success,
-		Result:  marshalledReturnedVal,
-	}
-
-	// Marshal the response body
-	var marshalledResponseBody *any.Any
-	if marshalledResponseBody, err = ptypes.MarshalAny(responseBody); err != nil {
-		logger.Warnw(ctx, "cannot-marshal-response-body", log.Fields{"error": err})
-		return nil, err
-	}
-
-	return &ic.InterContainerMessage{
-		Header: responseHeader,
-		Body:   marshalledResponseBody,
-	}, nil
-}
-
-func CallFuncByName(ctx context.Context, myClass interface{}, funcName string, params ...interface{}) (out []reflect.Value, err error) {
-	myClassValue := reflect.ValueOf(myClass)
-	// Capitalize the first letter in the funcName to workaround the first capital letters required to
-	// invoke a function from a different package
-	funcName = strings.Title(funcName)
-	m := myClassValue.MethodByName(funcName)
-	if !m.IsValid() {
-		return make([]reflect.Value, 0), fmt.Errorf("method-not-found \"%s\"", funcName)
-	}
-	in := make([]reflect.Value, len(params)+1)
-	in[0] = reflect.ValueOf(ctx)
-	for i, param := range params {
-		in[i+1] = reflect.ValueOf(param)
-	}
-	out = m.Call(in)
-	return
-}
-
-func (kp *interContainerProxy) addTransactionId(ctx context.Context, transactionId string, currentArgs []*ic.Argument) []*ic.Argument {
-	arg := &KVArg{
-		Key:   TransactionKey,
-		Value: &ic.StrType{Val: transactionId},
-	}
-
-	var marshalledArg *any.Any
-	var err error
-	if marshalledArg, err = ptypes.MarshalAny(&ic.StrType{Val: transactionId}); err != nil {
-		logger.Warnw(ctx, "cannot-add-transactionId", log.Fields{"error": err})
-		return currentArgs
-	}
-	protoArg := &ic.Argument{
-		Key:   arg.Key,
-		Value: marshalledArg,
-	}
-	return append(currentArgs, protoArg)
-}
-
-func (kp *interContainerProxy) addFromTopic(ctx context.Context, fromTopic string, currentArgs []*ic.Argument) []*ic.Argument {
-	var marshalledArg *any.Any
-	var err error
-	if marshalledArg, err = ptypes.MarshalAny(&ic.StrType{Val: fromTopic}); err != nil {
-		logger.Warnw(ctx, "cannot-add-transactionId", log.Fields{"error": err})
-		return currentArgs
-	}
-	protoArg := &ic.Argument{
-		Key:   FromTopic,
-		Value: marshalledArg,
-	}
-	return append(currentArgs, protoArg)
-}
-
-// Method to extract the Span embedded in Kafka RPC request on the receiver side. If span is found embedded in the KV args (with key as "span"),
-// it is de-serialized and injected into the Context to be carried forward by the RPC request processor thread.
-// If no span is found embedded, even then a span is created with name as "kafka-rpc-<rpc-name>" to enrich the Context for RPC calls coming
-// from components currently not sending the span (e.g. openonu adapter)
-func (kp *interContainerProxy) enrichContextWithSpan(ctx context.Context, rpcName string, args []*ic.Argument) (opentracing.Span, context.Context) {
-
-	for _, arg := range args {
-		if arg.Key == "span" {
-			var err error
-			var textMapString ic.StrType
-			if err = ptypes.UnmarshalAny(arg.Value, &textMapString); err != nil {
-				logger.Debug(ctx, "unable-to-unmarshal-kvarg-to-textmap-string", log.Fields{"value": arg.Value})
-				break
-			}
-
-			spanTextMap := make(map[string]string)
-			if err = json.Unmarshal([]byte(textMapString.Val), &spanTextMap); err != nil {
-				logger.Debug(ctx, "unable-to-unmarshal-textmap-from-json-string", log.Fields{"textMapString": textMapString, "error": err})
-				break
-			}
-
-			var spanContext opentracing.SpanContext
-			if spanContext, err = opentracing.GlobalTracer().Extract(opentracing.TextMap, opentracing.TextMapCarrier(spanTextMap)); err != nil {
-				logger.Debug(ctx, "unable-to-deserialize-textmap-to-span", log.Fields{"textMap": spanTextMap, "error": err})
-				break
-			}
-
-			var receivedRpcName string
-			extractBaggage := func(k, v string) bool {
-				if k == "rpc-span-name" {
-					receivedRpcName = v
-					return false
-				}
-				return true
-			}
-
-			spanContext.ForeachBaggageItem(extractBaggage)
-
-			return opentracing.StartSpanFromContext(ctx, receivedRpcName, opentracing.FollowsFrom(spanContext))
-		}
-	}
-
-	// Create new Child Span with rpc as name if no span details were received in kafka arguments
-	var spanName strings.Builder
-	spanName.WriteString("kafka-")
-
-	// In case of inter adapter message, use Msg Type for constructing RPC name
-	if rpcName == "process_inter_adapter_message" {
-		for _, arg := range args {
-			if arg.Key == "msg" {
-				iamsg := ic.InterAdapterMessage{}
-				if err := ptypes.UnmarshalAny(arg.Value, &iamsg); err == nil {
-					spanName.WriteString("inter-adapter-")
-					rpcName = iamsg.Header.Type.String()
-				}
-			}
-		}
-	}
-
-	spanName.WriteString("rpc-")
-	spanName.WriteString(rpcName)
-
-	return opentracing.StartSpanFromContext(ctx, spanName.String())
-}
-
-func (kp *interContainerProxy) handleMessage(ctx context.Context, msg *ic.InterContainerMessage, targetInterface interface{}) {
-
-	// First extract the header to know whether this is a request - responses are handled by a different handler
-	if msg.Header.Type == ic.MessageType_REQUEST {
-		var out []reflect.Value
-		var err error
-
-		// Get the request body
-		requestBody := &ic.InterContainerRequestBody{}
-		if err = ptypes.UnmarshalAny(msg.Body, requestBody); err != nil {
-			logger.Warnw(ctx, "cannot-unmarshal-request", log.Fields{"error": err})
-		} else {
-			logger.Debugw(ctx, "received-request", log.Fields{"rpc": requestBody.Rpc, "header": msg.Header, "args": requestBody.Args})
-			span, ctx := kp.enrichContextWithSpan(ctx, requestBody.Rpc, requestBody.Args)
-			defer span.Finish()
-
-			// let the callee unpack the arguments as its the only one that knows the real proto type
-			// Augment the requestBody with the message Id as it will be used in scenarios where cores
-			// are set in pairs and competing
-			requestBody.Args = kp.addTransactionId(ctx, msg.Header.Id, requestBody.Args)
-
-			// Augment the requestBody with the From topic name as it will be used in scenarios where a container
-			// needs to send an unsollicited message to the currently requested container
-			requestBody.Args = kp.addFromTopic(ctx, msg.Header.FromTopic, requestBody.Args)
-
-			out, err = CallFuncByName(ctx, targetInterface, requestBody.Rpc, requestBody.Args)
-			if err != nil {
-				logger.Warn(ctx, err)
-			}
-		}
-		// Response required?
-		if requestBody.ResponseRequired {
-			// If we already have an error before then just return that
-			var returnError *ic.Error
-			var returnedValues []interface{}
-			var success bool
-			if err != nil {
-				returnError = &ic.Error{Reason: err.Error()}
-				returnedValues = make([]interface{}, 1)
-				returnedValues[0] = returnError
-			} else {
-				returnedValues = make([]interface{}, 0)
-				// Check for errors first
-				lastIndex := len(out) - 1
-				if out[lastIndex].Interface() != nil { // Error
-					if retError, ok := out[lastIndex].Interface().(error); ok {
-						if retError.Error() == ErrorTransactionNotAcquired.Error() {
-							logger.Debugw(ctx, "Ignoring request", log.Fields{"error": retError, "txId": msg.Header.Id})
-							return // Ignore - process is in competing mode and ignored transaction
-						}
-						returnError = &ic.Error{Reason: retError.Error()}
-						returnedValues = append(returnedValues, returnError)
-					} else { // Should never happen
-						returnError = &ic.Error{Reason: "incorrect-error-returns"}
-						returnedValues = append(returnedValues, returnError)
-					}
-				} else if len(out) == 2 && reflect.ValueOf(out[0].Interface()).IsValid() && reflect.ValueOf(out[0].Interface()).IsNil() {
-					logger.Warnw(ctx, "Unexpected response of (nil,nil)", log.Fields{"txId": msg.Header.Id})
-					return // Ignore - should not happen
-				} else { // Non-error case
-					success = true
-					for idx, val := range out {
-						//logger.Debugw(ctx, "returned-api-response-loop", log.Fields{"idx": idx, "val": val.Interface()})
-						if idx != lastIndex {
-							returnedValues = append(returnedValues, val.Interface())
-						}
-					}
-				}
-			}
-
-			var icm *ic.InterContainerMessage
-			if icm, err = encodeResponse(ctx, msg, success, returnedValues...); err != nil {
-				logger.Warnw(ctx, "error-encoding-response-returning-failure-result", log.Fields{"error": err})
-				icm = encodeDefaultFailedResponse(ctx, msg)
-			}
-			// To preserve ordering of messages, all messages to a given topic are sent to the same partition
-			// by providing a message key.   The key is encoded in the topic name.  If the deviceId is not
-			// present then the key will be empty, hence all messages for a given topic will be sent to all
-			// partitions.
-			replyTopic := &Topic{Name: msg.Header.FromTopic}
-			key := msg.Header.KeyTopic
-			logger.Debugw(ctx, "sending-response-to-kafka", log.Fields{"rpc": requestBody.Rpc, "header": icm.Header, "key": key})
-			// TODO: handle error response.
-			go func() {
-				if err := kp.kafkaClient.Send(ctx, icm, replyTopic, key); err != nil {
-					logger.Errorw(ctx, "send-reply-failed", log.Fields{
-						"topic": replyTopic,
-						"key":   key,
-						"error": err})
-				}
-			}()
-		}
-	} else if msg.Header.Type == ic.MessageType_RESPONSE {
-		logger.Debugw(ctx, "response-received", log.Fields{"msg-header": msg.Header})
-		go kp.dispatchResponse(ctx, msg)
-	} else {
-		logger.Warnw(ctx, "unsupported-message-received", log.Fields{"msg-header": msg.Header})
-	}
-}
-
-func (kp *interContainerProxy) waitForMessages(ctx context.Context, ch <-chan *ic.InterContainerMessage, topic Topic, targetInterface interface{}) {
-	//	Wait for messages
-	for msg := range ch {
-		//logger.Debugw(ctx, "request-received", log.Fields{"msg": msg, "topic": topic.Name, "target": targetInterface})
-		go kp.handleMessage(context.Background(), msg, targetInterface)
-	}
-}
-
-func (kp *interContainerProxy) dispatchResponse(ctx context.Context, msg *ic.InterContainerMessage) {
-	kp.lockTransactionIdToChannelMap.RLock()
-	defer kp.lockTransactionIdToChannelMap.RUnlock()
-	if _, exist := kp.transactionIdToChannelMap[msg.Header.Id]; !exist {
-		logger.Debugw(ctx, "no-waiting-channel", log.Fields{"transaction": msg.Header.Id})
-		return
-	}
-	kp.transactionIdToChannelMap[msg.Header.Id].ch <- msg
-}
-
-// subscribeForResponse allows a caller to subscribe to a given topic when waiting for a response.
-// This method is built to prevent all subscribers to receive all messages as is the case of the Subscribe
-// API. There is one response channel waiting for kafka messages before dispatching the message to the
-// corresponding waiting channel
-func (kp *interContainerProxy) subscribeForResponse(ctx context.Context, topic Topic, trnsId string) (chan *ic.InterContainerMessage, error) {
-	logger.Debugw(ctx, "subscribeForResponse", log.Fields{"topic": topic.Name, "trnsid": trnsId})
-
-	// Create a specific channel for this consumers.  We cannot use the channel from the kafkaclient as it will
-	// broadcast any message for this topic to all channels waiting on it.
-	// Set channel size to 1 to prevent deadlock, see VOL-2708
-	ch := make(chan *ic.InterContainerMessage, 1)
-	kp.addToTransactionIdToChannelMap(trnsId, &topic, ch)
-
-	return ch, nil
-}
-
-func (kp *interContainerProxy) unSubscribeForResponse(ctx context.Context, trnsId string) error {
-	logger.Debugw(ctx, "unsubscribe-for-response", log.Fields{"trnsId": trnsId})
-	kp.deleteFromTransactionIdToChannelMap(trnsId)
-	return nil
-}
-
-func (kp *interContainerProxy) EnableLivenessChannel(ctx context.Context, enable bool) chan bool {
-	return kp.kafkaClient.EnableLivenessChannel(ctx, enable)
-}
-
-func (kp *interContainerProxy) EnableHealthinessChannel(ctx context.Context, enable bool) chan bool {
-	return kp.kafkaClient.EnableHealthinessChannel(ctx, enable)
-}
-
-func (kp *interContainerProxy) SendLiveness(ctx context.Context) error {
-	return kp.kafkaClient.SendLiveness(ctx)
-}
-
-//formatRequest formats a request to send over kafka and returns an InterContainerMessage message on success
-//or an error on failure
-func encodeRequest(ctx context.Context, rpc string, toTopic *Topic, replyTopic *Topic, key string, kvArgs ...*KVArg) (*ic.InterContainerMessage, error) {
-	requestHeader := &ic.Header{
-		Id:        uuid.New().String(),
-		Type:      ic.MessageType_REQUEST,
-		FromTopic: replyTopic.Name,
-		ToTopic:   toTopic.Name,
-		KeyTopic:  key,
-		Timestamp: ptypes.TimestampNow(),
-	}
-	requestBody := &ic.InterContainerRequestBody{
-		Rpc:              rpc,
-		ResponseRequired: true,
-		ReplyToTopic:     replyTopic.Name,
-	}
-
-	for _, arg := range kvArgs {
-		if arg == nil {
-			// In case the caller sends an array with empty args
-			continue
-		}
-		var marshalledArg *any.Any
-		var err error
-		// ascertain the value interface type is a proto.Message
-		protoValue, ok := arg.Value.(proto.Message)
-		if !ok {
-			logger.Warnw(ctx, "argument-value-not-proto-message", log.Fields{"error": ok, "Value": arg.Value})
-			err := errors.New("argument-value-not-proto-message")
-			return nil, err
-		}
-		if marshalledArg, err = ptypes.MarshalAny(protoValue); err != nil {
-			logger.Warnw(ctx, "cannot-marshal-request", log.Fields{"error": err})
-			return nil, err
-		}
-		protoArg := &ic.Argument{
-			Key:   arg.Key,
-			Value: marshalledArg,
-		}
-		requestBody.Args = append(requestBody.Args, protoArg)
-	}
-
-	var marshalledData *any.Any
-	var err error
-	if marshalledData, err = ptypes.MarshalAny(requestBody); err != nil {
-		logger.Warnw(ctx, "cannot-marshal-request", log.Fields{"error": err})
-		return nil, err
-	}
-	request := &ic.InterContainerMessage{
-		Header: requestHeader,
-		Body:   marshalledData,
-	}
-	return request, nil
-}
-
-func decodeResponse(ctx context.Context, response *ic.InterContainerMessage) (*ic.InterContainerResponseBody, error) {
-	//	Extract the message body
-	responseBody := ic.InterContainerResponseBody{}
-	if err := ptypes.UnmarshalAny(response.Body, &responseBody); err != nil {
-		logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
-		return nil, err
-	}
-	//logger.Debugw(ctx, "response-decoded-successfully", log.Fields{"response-status": &responseBody.Success})
-
-	return &responseBody, nil
-
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/kafka/utils.go b/vendor/github.com/opencord/voltha-lib-go/v6/pkg/kafka/utils.go
deleted file mode 100644
index bdc615f..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/kafka/utils.go
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package kafka
-
-import (
-	"github.com/golang/protobuf/ptypes/any"
-	"strings"
-)
-
-const (
-	TopicSeparator = "_"
-	DeviceIdLength = 24
-)
-
-// A Topic definition - may be augmented with additional attributes eventually
-type Topic struct {
-	// The name of the topic. It must start with a letter,
-	// and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),
-	// underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent
-	// signs (`%`).
-	Name string
-}
-
-type KVArg struct {
-	Key   string
-	Value interface{}
-}
-
-type RpcMType int
-
-const (
-	RpcFormattingError RpcMType = iota
-	RpcSent
-	RpcReply
-	RpcTimeout
-	RpcTransportError
-	RpcSystemClosing
-)
-
-type RpcResponse struct {
-	MType RpcMType
-	Err   error
-	Reply *any.Any
-}
-
-func NewResponse(messageType RpcMType, err error, body *any.Any) *RpcResponse {
-	return &RpcResponse{
-		MType: messageType,
-		Err:   err,
-		Reply: body,
-	}
-}
-
-// TODO:  Remove and provide better may to get the device id
-// GetDeviceIdFromTopic extract the deviceId from the topic name.  The topic name is formatted either as:
-//			<any string> or <any string>_<deviceId>.  The device Id is 24 characters long.
-func GetDeviceIdFromTopic(topic Topic) string {
-	pos := strings.LastIndex(topic.Name, TopicSeparator)
-	if pos == -1 {
-		return ""
-	}
-	adjustedPos := pos + len(TopicSeparator)
-	if adjustedPos >= len(topic.Name) {
-		return ""
-	}
-	deviceId := topic.Name[adjustedPos:len(topic.Name)]
-	if len(deviceId) != DeviceIdLength {
-		return ""
-	}
-	return deviceId
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/config/common.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/config/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/common.go
index 66a866e..4813ba1 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/config/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/common.go
@@ -16,7 +16,7 @@
 package config
 
 import (
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
 )
 
 var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/config/configmanager.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/configmanager.go
similarity index 97%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/config/configmanager.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/configmanager.go
index 01e4375..c489407 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/config/configmanager.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/configmanager.go
@@ -22,14 +22,14 @@
 	"strings"
 	"time"
 
-	"github.com/opencord/voltha-lib-go/v6/pkg/db"
-	"github.com/opencord/voltha-lib-go/v6/pkg/db/kvstore"
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
+	"github.com/opencord/voltha-lib-go/v7/pkg/db"
+	"github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
 )
 
 const (
 	defaultkvStoreConfigPath     = "config"
-	defaultkvStoreDataPathPrefix = "service/voltha_voltha"
+	defaultkvStoreDataPathPrefix = "service/voltha/voltha1_voltha1"
 	kvStorePathSeparator         = "/"
 )
 
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/config/logcontroller.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logcontroller.go
similarity index 99%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/config/logcontroller.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logcontroller.go
index 914ea25..b58f999 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/config/logcontroller.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logcontroller.go
@@ -26,10 +26,11 @@
 	"crypto/md5"
 	"encoding/json"
 	"errors"
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
 	"os"
 	"sort"
 	"strings"
+
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
 )
 
 const (
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/config/logfeaturescontroller.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logfeaturescontroller.go
similarity index 99%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/config/logfeaturescontroller.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logfeaturescontroller.go
index 8e3b9fa..579c1de 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/config/logfeaturescontroller.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logfeaturescontroller.go
@@ -19,9 +19,10 @@
 import (
 	"context"
 	"errors"
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
 	"os"
 	"strings"
+
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
 )
 
 const (
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/db/backend.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/backend.go
similarity index 98%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/db/backend.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/backend.go
index 431970d..2e57a27 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/db/backend.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/backend.go
@@ -23,8 +23,8 @@
 	"sync"
 	"time"
 
-	"github.com/opencord/voltha-lib-go/v6/pkg/db/kvstore"
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
+	"github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
 )
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/db/common.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/db/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/common.go
index 422740b..d8a0571 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/db/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/common.go
@@ -16,7 +16,7 @@
 package db
 
 import (
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
 )
 
 var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/db/kvstore/client.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/client.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/db/kvstore/client.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/client.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/db/kvstore/common.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/db/kvstore/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/common.go
index a0edae1..8ac2a4a 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/db/kvstore/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/common.go
@@ -16,7 +16,7 @@
 package kvstore
 
 import (
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
 )
 
 var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/db/kvstore/etcdclient.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdclient.go
similarity index 99%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/db/kvstore/etcdclient.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdclient.go
index 88b5b45..6ca5329 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/db/kvstore/etcdclient.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdclient.go
@@ -19,13 +19,14 @@
 	"context"
 	"errors"
 	"fmt"
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
-	v3Client "go.etcd.io/etcd/clientv3"
-	v3rpcTypes "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
 	"os"
 	"strconv"
 	"sync"
 	"time"
+
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+	v3Client "go.etcd.io/etcd/clientv3"
+	v3rpcTypes "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
 )
 
 const (
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/db/kvstore/etcdpool.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdpool.go
similarity index 98%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/db/kvstore/etcdpool.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdpool.go
index 7ac2511..4d33c27 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/db/kvstore/etcdpool.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdpool.go
@@ -19,10 +19,11 @@
 	"container/list"
 	"context"
 	"errors"
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
-	"go.etcd.io/etcd/clientv3"
 	"sync"
 	"time"
+
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+	"go.etcd.io/etcd/clientv3"
 )
 
 // EtcdClientAllocator represents a generic interface to allocate an Etcd Client
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/db/kvstore/kvutils.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/kvutils.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/db/kvstore/kvutils.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/kvutils.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/events/common.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/events/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/events/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/events/common.go
index 9ef5bfe..0f0468e 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/events/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/events/common.go
@@ -16,7 +16,7 @@
 package events
 
 import (
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
 )
 
 var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/events/eventif/events_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/events/eventif/events_proxy_if.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/events/eventif/events_proxy_if.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/events/eventif/events_proxy_if.go
index 35f821f..e4ebc36 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/events/eventif/events_proxy_if.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/events/eventif/events_proxy_if.go
@@ -18,7 +18,8 @@
 
 import (
 	"context"
-	"github.com/opencord/voltha-protos/v4/go/voltha"
+
+	"github.com/opencord/voltha-protos/v5/go/voltha"
 )
 
 // EventProxy interface for eventproxy
@@ -31,6 +32,8 @@
 		subCategory *EventSubCategory, raisedTs int64) error
 	EnableLivenessChannel(ctx context.Context, enable bool) chan bool
 	SendLiveness(ctx context.Context) error
+	Start() error
+	Stop()
 }
 
 const (
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/events/events_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/events/events_proxy.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/events/events_proxy.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/events/events_proxy.go
index 89d58a0..e4493f9 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/events/events_proxy.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/events/events_proxy.go
@@ -26,11 +26,11 @@
 	"sync"
 	"time"
 
-	"github.com/golang/protobuf/ptypes"
-	"github.com/opencord/voltha-lib-go/v6/pkg/events/eventif"
-	"github.com/opencord/voltha-lib-go/v6/pkg/kafka"
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
-	"github.com/opencord/voltha-protos/v4/go/voltha"
+	"github.com/opencord/voltha-lib-go/v7/pkg/events/eventif"
+	"github.com/opencord/voltha-lib-go/v7/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+	"github.com/opencord/voltha-protos/v5/go/voltha"
+	"google.golang.org/protobuf/types/known/timestamppb"
 )
 
 // TODO: Make configurable through helm chart
@@ -97,17 +97,8 @@
 	header.TypeVersion = eventif.EventTypeVersion
 
 	// raisedTs is in seconds
-	timestamp, err := ptypes.TimestampProto(time.Unix(raisedTs, 0))
-	if err != nil {
-		return nil, err
-	}
-	header.RaisedTs = timestamp
-
-	timestamp, err = ptypes.TimestampProto(time.Now())
-	if err != nil {
-		return nil, err
-	}
-	header.ReportedTs = timestamp
+	header.RaisedTs = timestamppb.New(time.Unix(raisedTs, 0))
+	header.ReportedTs = timestamppb.New(time.Now())
 
 	return &header, nil
 }
@@ -201,7 +192,7 @@
 }
 
 // Start the event proxy
-func (ep *EventProxy) Start() {
+func (ep *EventProxy) Start() error {
 	eq := ep.eventQueue
 	go eq.start(ep.queueCtx)
 	logger.Debugw(context.Background(), "event-proxy-starting...", log.Fields{"events-threashold": EVENT_THRESHOLD})
@@ -235,10 +226,13 @@
 				"reported-ts": event.Header.ReportedTs, "event-type": event.EventType})
 		}
 	}
+	return nil
 }
 
 func (ep *EventProxy) Stop() {
-	ep.eventQueue.stop()
+	if ep.eventQueue != nil {
+		ep.eventQueue.stop()
+	}
 }
 
 type EventQueue struct {
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/events/utils.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/events/utils.go
similarity index 91%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/events/utils.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/events/utils.go
index fe3a017..4598161 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/events/utils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/events/utils.go
@@ -19,7 +19,8 @@
 	"fmt"
 	"strconv"
 
-	"github.com/opencord/voltha-protos/v4/go/voltha"
+	"github.com/opencord/voltha-protos/v5/go/common"
+	"github.com/opencord/voltha-protos/v5/go/voltha"
 )
 
 type ContextType string
@@ -64,8 +65,8 @@
 
 //CreateDeviceStateChangeEvent forms and returns a new DeviceStateChange Event
 func CreateDeviceStateChangeEvent(serialNumber string, deviceID string, parentID string,
-	prevOperStatus voltha.OperStatus_Types, prevConnStatus voltha.ConnectStatus_Types, prevAdminStatus voltha.AdminState_Types,
-	operStatus voltha.OperStatus_Types, connStatus voltha.ConnectStatus_Types, adminStatus voltha.AdminState_Types,
+	prevOperStatus common.OperStatus_Types, prevConnStatus common.ConnectStatus_Types, prevAdminStatus common.AdminState_Types,
+	operStatus common.OperStatus_Types, connStatus common.ConnectStatus_Types, adminStatus common.AdminState_Types,
 	parentPort uint32, isRoot bool) *voltha.DeviceEvent {
 
 	context := make(map[string]string)
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/flows/common.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/flows/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/flows/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/flows/common.go
index b1279bf..2f5ff92 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/flows/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/flows/common.go
@@ -16,7 +16,7 @@
 package flows
 
 import (
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
 )
 
 var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/flows/flow_utils.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/flows/flow_utils.go
similarity index 98%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/flows/flow_utils.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/flows/flow_utils.go
index 9a9448f..41b615a 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/flows/flow_utils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/flows/flow_utils.go
@@ -23,11 +23,12 @@
 	"fmt"
 	"hash"
 	"sort"
+	"sync"
 
 	"github.com/cevaris/ordered_map"
-	"github.com/gogo/protobuf/proto"
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
-	ofp "github.com/opencord/voltha-protos/v4/go/openflow_13"
+	"github.com/golang/protobuf/proto"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+	ofp "github.com/opencord/voltha-protos/v5/go/openflow_13"
 )
 
 var (
@@ -1323,7 +1324,8 @@
 }
 
 type DeviceRules struct {
-	Rules map[string]*FlowsAndGroups
+	Rules     map[string]*FlowsAndGroups
+	rulesLock sync.RWMutex
 }
 
 func NewDeviceRules() *DeviceRules {
@@ -1335,6 +1337,8 @@
 func (dr *DeviceRules) Copy() *DeviceRules {
 	copyDR := NewDeviceRules()
 	if dr != nil {
+		dr.rulesLock.RLock()
+		defer dr.rulesLock.RUnlock()
 		for key, val := range dr.Rules {
 			if val != nil {
 				copyDR.Rules[key] = val.Copy()
@@ -1344,7 +1348,19 @@
 	return copyDR
 }
 
+func (dr *DeviceRules) Keys() []string {
+	dr.rulesLock.RLock()
+	defer dr.rulesLock.RUnlock()
+	keys := make([]string, 0, len(dr.Rules))
+	for k := range dr.Rules {
+		keys = append(keys, k)
+	}
+	return keys
+}
+
 func (dr *DeviceRules) ClearFlows(deviceId string) {
+	dr.rulesLock.Lock()
+	defer dr.rulesLock.Unlock()
 	if _, exist := dr.Rules[deviceId]; exist {
 		dr.Rules[deviceId].Flows = ordered_map.NewOrderedMap()
 	}
@@ -1352,6 +1368,8 @@
 
 func (dr *DeviceRules) FilterRules(deviceIds map[string]string) *DeviceRules {
 	filteredDR := NewDeviceRules()
+	dr.rulesLock.RLock()
+	defer dr.rulesLock.RUnlock()
 	for key, val := range dr.Rules {
 		if _, exist := deviceIds[key]; exist {
 			filteredDR.Rules[key] = val.Copy()
@@ -1361,6 +1379,8 @@
 }
 
 func (dr *DeviceRules) AddFlow(deviceId string, flow *ofp.OfpFlowStats) {
+	dr.rulesLock.Lock()
+	defer dr.rulesLock.Unlock()
 	if _, exist := dr.Rules[deviceId]; !exist {
 		dr.Rules[deviceId] = NewFlowsAndGroups()
 	}
@@ -1368,11 +1388,15 @@
 }
 
 func (dr *DeviceRules) GetRules() map[string]*FlowsAndGroups {
+	dr.rulesLock.RLock()
+	defer dr.rulesLock.RUnlock()
 	return dr.Rules
 }
 
 func (dr *DeviceRules) String() string {
 	var buffer bytes.Buffer
+	dr.rulesLock.RLock()
+	defer dr.rulesLock.RUnlock()
 	for key, value := range dr.Rules {
 		buffer.WriteString("DeviceId:")
 		buffer.WriteString(key)
@@ -1383,6 +1407,8 @@
 }
 
 func (dr *DeviceRules) AddFlowsAndGroup(deviceId string, fg *FlowsAndGroups) {
+	dr.rulesLock.Lock()
+	defer dr.rulesLock.Unlock()
 	if _, ok := dr.Rules[deviceId]; !ok {
 		dr.Rules[deviceId] = NewFlowsAndGroups()
 	}
@@ -1392,6 +1418,8 @@
 // CreateEntryIfNotExist creates a new deviceId in the Map if it does not exist and assigns an
 // empty FlowsAndGroups to it.  Otherwise, it does nothing.
 func (dr *DeviceRules) CreateEntryIfNotExist(deviceId string) {
+	dr.rulesLock.Lock()
+	defer dr.rulesLock.Unlock()
 	if _, ok := dr.Rules[deviceId]; !ok {
 		dr.Rules[deviceId] = NewFlowsAndGroups()
 	}
@@ -1568,7 +1596,7 @@
 	//convert each octet to decimal
 	for i := 0; i < 6; i++ {
 		if i != 0 {
-			catalyze = catalyze >> 8
+			catalyze >>= 8
 		}
 		octet := mac & catalyze
 		octetDecimal := octet >> uint8(40-i*8)
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/client.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/client.go
new file mode 100644
index 0000000..de649d6
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/client.go
@@ -0,0 +1,541 @@
+/*
+ * Copyright 2021-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package grpc
+
+import (
+	"context"
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+	"time"
+
+	grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
+	grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+	"github.com/opencord/voltha-lib-go/v7/pkg/probe"
+	"github.com/opencord/voltha-protos/v5/go/adapter_services"
+	"github.com/opencord/voltha-protos/v5/go/core"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/keepalive"
+)
+
+type event byte
+type state byte
+type SetAndTestServiceHandler func(context.Context, *grpc.ClientConn) interface{}
+type RestartedHandler func(ctx context.Context, endPoint string) error
+
+type contextKey string
+
+func (c contextKey) String() string {
+	return string(c)
+}
+
+var (
+	grpcMonitorContextKey = contextKey("grpc-monitor")
+)
+
+const (
+	grpcBackoffInitialInterval = "GRPC_BACKOFF_INITIAL_INTERVAL"
+	grpcBackoffMaxInterval     = "GRPC_BACKOFF_MAX_INTERVAL"
+	grpcBackoffMaxElapsedTime  = "GRPC_BACKOFF_MAX_ELAPSED_TIME"
+	grpcMonitorInterval        = "GRPC_MONITOR_INTERVAL"
+)
+
+const (
+	DefaultBackoffInitialInterval = 100 * time.Millisecond
+	DefaultBackoffMaxInterval     = 5 * time.Second
+	DefaultBackoffMaxElapsedTime  = 0 * time.Second // No time limit
+	DefaultGRPCMonitorInterval    = 5 * time.Second
+)
+
+const (
+	connectionErrorSubString  = "SubConns are in TransientFailure"
+	connectionClosedSubstring = "client connection is closing"
+	connectionError           = "connection error"
+	connectionSystemNotReady  = "system is not ready"
+)
+
+const (
+	eventConnecting = event(iota)
+	eventConnected
+	eventDisconnected
+	eventStopped
+	eventError
+
+	stateConnected = state(iota)
+	stateConnecting
+	stateDisconnected
+)
+
+type Client struct {
+	apiEndPoint            string
+	connection             *grpc.ClientConn
+	connectionLock         sync.RWMutex
+	stateLock              sync.RWMutex
+	state                  state
+	service                interface{}
+	events                 chan event
+	onRestart              RestartedHandler
+	backoffInitialInterval time.Duration
+	backoffMaxInterval     time.Duration
+	backoffMaxElapsedTime  time.Duration
+	activityCheck          bool
+	monitorInterval        time.Duration
+	activeCh               chan struct{}
+	activeChMutex          sync.RWMutex
+	done                   bool
+	livenessCallback       func(timestamp time.Time)
+}
+
+type ClientOption func(*Client)
+
+func ActivityCheck(enable bool) ClientOption {
+	return func(args *Client) {
+		args.activityCheck = enable
+	}
+}
+
+func NewClient(endpoint string, onRestart RestartedHandler, opts ...ClientOption) (*Client, error) {
+	c := &Client{
+		apiEndPoint:            endpoint,
+		onRestart:              onRestart,
+		events:                 make(chan event, 1),
+		state:                  stateDisconnected,
+		backoffInitialInterval: DefaultBackoffInitialInterval,
+		backoffMaxInterval:     DefaultBackoffMaxInterval,
+		backoffMaxElapsedTime:  DefaultBackoffMaxElapsedTime,
+		monitorInterval:        DefaultGRPCMonitorInterval,
+	}
+	for _, option := range opts {
+		option(c)
+	}
+
+	// Check for environment variables
+	if err := SetFromEnvVariable(grpcBackoffInitialInterval, &c.backoffInitialInterval); err != nil {
+		logger.Warnw(context.Background(), "failure-reading-env-variable", log.Fields{"error": err, "variable": grpcBackoffInitialInterval})
+	}
+
+	if err := SetFromEnvVariable(grpcBackoffMaxInterval, &c.backoffMaxInterval); err != nil {
+		logger.Warnw(context.Background(), "failure-reading-env-variable", log.Fields{"error": err, "variable": grpcBackoffMaxInterval})
+	}
+
+	if err := SetFromEnvVariable(grpcBackoffMaxElapsedTime, &c.backoffMaxElapsedTime); err != nil {
+		logger.Warnw(context.Background(), "failure-reading-env-variable", log.Fields{"error": err, "variable": grpcBackoffMaxElapsedTime})
+	}
+
+	if err := SetFromEnvVariable(grpcMonitorInterval, &c.monitorInterval); err != nil {
+		logger.Warnw(context.Background(), "failure-reading-env-variable", log.Fields{"error": err, "variable": grpcMonitorInterval})
+	}
+
+	logger.Infow(context.Background(), "initialized-client", log.Fields{"client": c})
+
+	// Sanity check
+	if c.backoffInitialInterval > c.backoffMaxInterval {
+		return nil, fmt.Errorf("initial retry delay %v is greater than maximum retry delay %v", c.backoffInitialInterval, c.backoffMaxInterval)
+	}
+
+	return c, nil
+}
+
+func (c *Client) GetClient() (interface{}, error) {
+	c.connectionLock.RLock()
+	defer c.connectionLock.RUnlock()
+	if c.service == nil {
+		return nil, fmt.Errorf("no connection to %s", c.apiEndPoint)
+	}
+	return c.service, nil
+}
+
+// GetCoreServiceClient is a helper function that returns a concrete service instead of the GetClient() API
+// which returns an interface
+func (c *Client) GetCoreServiceClient() (core.CoreServiceClient, error) {
+	c.connectionLock.RLock()
+	defer c.connectionLock.RUnlock()
+	if c.service == nil {
+		return nil, fmt.Errorf("no core connection to %s", c.apiEndPoint)
+	}
+	client, ok := c.service.(core.CoreServiceClient)
+	if ok {
+		return client, nil
+	}
+	return nil, fmt.Errorf("invalid-service-%s", reflect.TypeOf(c.service))
+}
+
+// GetOnuAdapterServiceClient is a helper function that returns a concrete service instead of the GetClient() API
+// which returns an interface
+func (c *Client) GetOnuInterAdapterServiceClient() (adapter_services.OnuInterAdapterServiceClient, error) {
+	c.connectionLock.RLock()
+	defer c.connectionLock.RUnlock()
+	if c.service == nil {
+		return nil, fmt.Errorf("no child adapter connection to %s", c.apiEndPoint)
+	}
+	client, ok := c.service.(adapter_services.OnuInterAdapterServiceClient)
+	if ok {
+		return client, nil
+	}
+	return nil, fmt.Errorf("invalid-service-%s", reflect.TypeOf(c.service))
+}
+
+// GetOltAdapterServiceClient is a helper function that returns a concrete service instead of the GetClient() API
+// which returns an interface
+func (c *Client) GetOltInterAdapterServiceClient() (adapter_services.OltInterAdapterServiceClient, error) {
+	c.connectionLock.RLock()
+	defer c.connectionLock.RUnlock()
+	if c.service == nil {
+		return nil, fmt.Errorf("no parent adapter connection to %s", c.apiEndPoint)
+	}
+	client, ok := c.service.(adapter_services.OltInterAdapterServiceClient)
+	if ok {
+		return client, nil
+	}
+	return nil, fmt.Errorf("invalid-service-%s", reflect.TypeOf(c.service))
+}
+
+func (c *Client) Reset(ctx context.Context) {
+	logger.Debugw(ctx, "resetting-client-connection", log.Fields{"endpoint": c.apiEndPoint})
+	c.stateLock.Lock()
+	defer c.stateLock.Unlock()
+	if c.state == stateConnected {
+		c.state = stateDisconnected
+		c.events <- eventDisconnected
+	}
+}
+
+func (c *Client) clientInterceptor(ctx context.Context, method string, req interface{}, reply interface{},
+	cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+	// Nothing to do before intercepting the call
+	err := invoker(ctx, method, req, reply, cc, opts...)
+	// On connection failure, start the reconnect process depending on the error response
+	if err != nil {
+		logger.Errorw(ctx, "received-error", log.Fields{"error": err, "context": ctx, "endpoint": c.apiEndPoint})
+		if strings.Contains(err.Error(), connectionErrorSubString) ||
+			strings.Contains(err.Error(), connectionError) ||
+			strings.Contains(err.Error(), connectionSystemNotReady) ||
+			isGrpcMonitorKeyPresentInContext(ctx) {
+			c.stateLock.Lock()
+			if c.state == stateConnected {
+				logger.Warnw(context.Background(), "sending-disconnect-event", log.Fields{"endpoint": c.apiEndPoint, "error": err})
+				c.state = stateDisconnected
+				c.events <- eventDisconnected
+			}
+			c.stateLock.Unlock()
+		} else if strings.Contains(err.Error(), connectionClosedSubstring) {
+			logger.Errorw(context.Background(), "invalid-client-connection-closed", log.Fields{"endpoint": c.apiEndPoint, "error": err})
+		}
+		return err
+	}
+	// Update activity on success only
+	c.updateActivity(ctx)
+	return nil
+}
+
+// updateActivity pushes an activity indication on the channel so that the monitoring routine does not validate
+// the gRPC connection when the connection is being used. Note that this update is done both when the connection
+// is alive or a connection error is returned. A separate routine takes care of doing the re-connect.
+func (c *Client) updateActivity(ctx context.Context) {
+	c.activeChMutex.RLock()
+	defer c.activeChMutex.RUnlock()
+	if c.activeCh != nil {
+		logger.Debugw(ctx, "update-activity", log.Fields{"api-endpoint": c.apiEndPoint})
+		c.activeCh <- struct{}{}
+
+		// Update liveness only in connected state
+		if c.livenessCallback != nil {
+			c.stateLock.RLock()
+			if c.state == stateConnected {
+				c.livenessCallback(time.Now())
+			}
+			c.stateLock.RUnlock()
+		}
+	}
+}
+
+func WithGrpcMonitorContext(ctx context.Context, name string) context.Context {
+	ctx = context.WithValue(ctx, grpcMonitorContextKey, name)
+	return ctx
+}
+
+func isGrpcMonitorKeyPresentInContext(ctx context.Context) bool {
+	if ctx != nil {
+		_, present := ctx.Value(grpcMonitorContextKey).(string)
+		return present
+	}
+	return false
+}
+
+// monitorActivity monitors the activity on the gRPC connection.   If there are no activity after a specified
+// timeout, it will send a default API request on that connection.   If the connection is good then nothing
+// happens.  If it's bad this will trigger reconnection attempts.
+func (c *Client) monitorActivity(ctx context.Context, handler SetAndTestServiceHandler) {
+	logger.Infow(ctx, "start-activity-monitor", log.Fields{"endpoint": c.apiEndPoint})
+
+	// Create an activity monitor channel.  Unbuffered channel works well.  However, we use a buffered
+	// channel here as a safeguard of having the grpc interceptor publishing too many events that can be
+	// consumed by this monitoring thread
+	c.activeChMutex.Lock()
+	c.activeCh = make(chan struct{}, 10)
+	c.activeChMutex.Unlock()
+
+	// Interval to wait for no activity before probing the connection
+	timeout := c.monitorInterval
+loop:
+	for {
+		timeoutTimer := time.NewTimer(timeout)
+		select {
+
+		case <-c.activeCh:
+			logger.Debugw(ctx, "received-active-notification", log.Fields{"endpoint": c.apiEndPoint})
+
+			// Reset timer
+			if !timeoutTimer.Stop() {
+				<-timeoutTimer.C
+			}
+
+		case <-ctx.Done():
+			break loop
+
+		case <-timeoutTimer.C:
+			// Trigger an activity check if the state is connected.  If the state is not connected then there is already
+			// a backoff retry mechanism in place to retry establishing connection.
+			c.stateLock.RLock()
+			runCheck := c.state == stateConnected
+			c.stateLock.RUnlock()
+			if runCheck {
+				go func() {
+					logger.Debugw(ctx, "connection-check-start", log.Fields{"api-endpoint": c.apiEndPoint})
+					subCtx, cancel := context.WithTimeout(ctx, c.backoffMaxInterval)
+					defer cancel()
+					subCtx = WithGrpcMonitorContext(subCtx, "grpc-monitor")
+					c.connectionLock.RLock()
+					defer c.connectionLock.RUnlock()
+					if c.connection != nil {
+						response := handler(subCtx, c.connection)
+						logger.Debugw(ctx, "connection-check-response", log.Fields{"api-endpoint": c.apiEndPoint, "up": response != nil})
+					}
+				}()
+			}
+		}
+	}
+	logger.Infow(ctx, "activity-monitor-stopping", log.Fields{"endpoint": c.apiEndPoint})
+}
+
+// Start kicks off the adapter agent by trying to connect to the adapter
+func (c *Client) Start(ctx context.Context, handler SetAndTestServiceHandler) {
+	logger.Debugw(ctx, "Starting GRPC - Client", log.Fields{"api-endpoint": c.apiEndPoint})
+
+	// If the context contains a k8s probe then register services
+	p := probe.GetProbeFromContext(ctx)
+	if p != nil {
+		p.RegisterService(ctx, c.apiEndPoint)
+	}
+
+	// Enable activity check, if required
+	if c.activityCheck {
+		go c.monitorActivity(ctx, handler)
+	}
+
+	initialConnection := true
+	c.events <- eventConnecting
+	backoff := NewBackoff(c.backoffInitialInterval, c.backoffMaxInterval, c.backoffMaxElapsedTime)
+	attempt := 1
+loop:
+	for {
+		select {
+		case <-ctx.Done():
+			logger.Debugw(ctx, "context-closing", log.Fields{"endpoint": c.apiEndPoint})
+			return
+		case event := <-c.events:
+			logger.Debugw(ctx, "received-event", log.Fields{"event": event, "endpoint": c.apiEndPoint})
+			switch event {
+			case eventConnecting:
+				logger.Debugw(ctx, "connection-start", log.Fields{"endpoint": c.apiEndPoint, "attempts": attempt})
+
+				c.stateLock.Lock()
+				if c.state == stateConnected {
+					c.state = stateDisconnected
+				}
+				if c.state != stateConnecting {
+					c.state = stateConnecting
+					go func() {
+						if err := c.connectToEndpoint(ctx, handler, p); err != nil {
+							c.stateLock.Lock()
+							c.state = stateDisconnected
+							c.stateLock.Unlock()
+							logger.Errorw(ctx, "connection-failed", log.Fields{"endpoint": c.apiEndPoint, "attempt": attempt, "error": err})
+
+							// Retry connection after a delay
+							if err = backoff.Backoff(ctx); err != nil {
+								// Context has closed or reached maximum elapsed time, if set
+								logger.Errorw(ctx, "retry-aborted", log.Fields{"endpoint": c.apiEndPoint, "error": err})
+								return
+							}
+							attempt += 1
+							c.events <- eventConnecting
+						} else {
+							backoff.Reset()
+						}
+					}()
+				}
+				c.stateLock.Unlock()
+
+			case eventConnected:
+				logger.Debugw(ctx, "endpoint-connected", log.Fields{"endpoint": c.apiEndPoint})
+				attempt = 1
+				c.stateLock.Lock()
+				if c.state != stateConnected {
+					c.state = stateConnected
+					if initialConnection {
+						logger.Debugw(ctx, "initial-endpoint-connection", log.Fields{"endpoint": c.apiEndPoint})
+						initialConnection = false
+					} else {
+						logger.Debugw(ctx, "endpoint-reconnection", log.Fields{"endpoint": c.apiEndPoint})
+						// Trigger any callback on a restart
+						go func() {
+							err := c.onRestart(log.WithSpanFromContext(context.Background(), ctx), c.apiEndPoint)
+							if err != nil {
+								logger.Errorw(ctx, "unable-to-restart-endpoint", log.Fields{"error": err, "endpoint": c.apiEndPoint})
+							}
+						}()
+					}
+				}
+				c.stateLock.Unlock()
+
+			case eventDisconnected:
+				if p != nil {
+					p.UpdateStatus(ctx, c.apiEndPoint, probe.ServiceStatusNotReady)
+				}
+				logger.Debugw(ctx, "endpoint-disconnected", log.Fields{"endpoint": c.apiEndPoint, "status": c.state})
+
+				// Try to connect again
+				c.events <- eventConnecting
+
+			case eventStopped:
+				logger.Debugw(ctx, "endPoint-stopped", log.Fields{"adapter": c.apiEndPoint})
+				go func() {
+					if err := c.closeConnection(ctx, p); err != nil {
+						logger.Errorw(ctx, "endpoint-closing-connection-failed", log.Fields{"endpoint": c.apiEndPoint, "error": err})
+					}
+				}()
+				break loop
+			case eventError:
+				logger.Errorw(ctx, "endpoint-error-event", log.Fields{"endpoint": c.apiEndPoint})
+			default:
+				logger.Errorw(ctx, "endpoint-unknown-event", log.Fields{"endpoint": c.apiEndPoint, "error": event})
+			}
+		}
+	}
+	logger.Infow(ctx, "endpoint-stopped", log.Fields{"endpoint": c.apiEndPoint})
+}
+
+func (c *Client) connectToEndpoint(ctx context.Context, handler SetAndTestServiceHandler, p *probe.Probe) error {
+	if p != nil {
+		p.UpdateStatus(ctx, c.apiEndPoint, probe.ServiceStatusPreparing)
+	}
+
+	c.connectionLock.Lock()
+	defer c.connectionLock.Unlock()
+
+	if c.connection != nil {
+		_ = c.connection.Close()
+		c.connection = nil
+	}
+
+	c.service = nil
+
+	// Use Interceptors to:
+	// 1. automatically inject
+	// 2. publish Open Tracing Spans by this GRPC Client
+	// 3. detect connection failure on client calls such that the reconnection process can begin
+	conn, err := grpc.Dial(c.apiEndPoint,
+		grpc.WithInsecure(),
+		grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(
+			grpc_opentracing.StreamClientInterceptor(grpc_opentracing.WithTracer(log.ActiveTracerProxy{})),
+		)),
+		grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(
+			grpc_opentracing.UnaryClientInterceptor(grpc_opentracing.WithTracer(log.ActiveTracerProxy{})),
+		)),
+		grpc.WithUnaryInterceptor(c.clientInterceptor),
+		// Set keealive parameter - use default grpc values
+		grpc.WithKeepaliveParams(keepalive.ClientParameters{
+			Time:                c.monitorInterval,
+			Timeout:             c.backoffMaxInterval,
+			PermitWithoutStream: true,
+		}),
+	)
+
+	if err == nil {
+		subCtx, cancel := context.WithTimeout(ctx, c.backoffMaxInterval)
+		defer cancel()
+		svc := handler(subCtx, conn)
+		if svc != nil {
+			c.connection = conn
+			c.service = svc
+			if p != nil {
+				p.UpdateStatus(ctx, c.apiEndPoint, probe.ServiceStatusRunning)
+			}
+			logger.Infow(ctx, "connected-to-endpoint", log.Fields{"endpoint": c.apiEndPoint})
+			c.events <- eventConnected
+			return nil
+		}
+	}
+	logger.Warnw(ctx, "Failed to connect to endpoint",
+		log.Fields{
+			"endpoint": c.apiEndPoint,
+			"error":    err,
+		})
+
+	if p != nil {
+		p.UpdateStatus(ctx, c.apiEndPoint, probe.ServiceStatusFailed)
+	}
+	return fmt.Errorf("no connection to endpoint %s", c.apiEndPoint)
+}
+
+func (c *Client) closeConnection(ctx context.Context, p *probe.Probe) error {
+	if p != nil {
+		p.UpdateStatus(ctx, c.apiEndPoint, probe.ServiceStatusStopped)
+	}
+
+	c.connectionLock.Lock()
+	defer c.connectionLock.Unlock()
+
+	if c.connection != nil {
+		err := c.connection.Close()
+		c.connection = nil
+		return err
+	}
+
+	return nil
+}
+
+func (c *Client) Stop(ctx context.Context) {
+	if !c.done {
+		c.events <- eventStopped
+		close(c.events)
+		c.done = true
+	}
+}
+
+// SetService is used for testing only
+func (c *Client) SetService(srv interface{}) {
+	c.connectionLock.Lock()
+	defer c.connectionLock.Unlock()
+	c.service = srv
+}
+
+func (c *Client) SubscribeForLiveness(callback func(timestamp time.Time)) {
+	c.livenessCallback = callback
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/kafka/common.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/common.go
similarity index 93%
copy from vendor/github.com/opencord/voltha-lib-go/v6/pkg/kafka/common.go
copy to vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/common.go
index aa37746..77aad4f 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/kafka/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/common.go
@@ -13,10 +13,10 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package kafka
+package grpc
 
 import (
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
 )
 
 var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/mock_core_service.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/mock_core_service.go
new file mode 100644
index 0000000..745753c
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/mock_core_service.go
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2021-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package grpc
+
+import (
+	"context"
+	"strconv"
+	"time"
+
+	"github.com/golang/protobuf/ptypes/empty"
+	"github.com/opencord/voltha-protos/v5/go/common"
+	ic "github.com/opencord/voltha-protos/v5/go/inter_container"
+	"github.com/opencord/voltha-protos/v5/go/voltha"
+)
+
+//MockCoreServiceHandler implements the methods in the core service
+type MockCoreServiceHandler struct{}
+
+func (handler *MockCoreServiceHandler) RegisterAdapter(ctx context.Context, reg *ic.AdapterRegistration) (*empty.Empty, error) {
+	//logger.Debugw(ctx, "registration-received", log.Fields{"input": reg})
+	return &empty.Empty{}, nil
+}
+
+func (handler *MockCoreServiceHandler) DeviceUpdate(context.Context, *voltha.Device) (*empty.Empty, error) {
+	return &empty.Empty{}, nil
+}
+
+func (handler *MockCoreServiceHandler) PortCreated(context.Context, *voltha.Port) (*empty.Empty, error) {
+	return &empty.Empty{}, nil
+}
+
+func (handler *MockCoreServiceHandler) PortsStateUpdate(context.Context, *ic.PortStateFilter) (*empty.Empty, error) {
+	return &empty.Empty{}, nil
+}
+
+func (handler *MockCoreServiceHandler) DeleteAllPorts(context.Context, *common.ID) (*empty.Empty, error) {
+	return &empty.Empty{}, nil
+}
+
+func (handler *MockCoreServiceHandler) GetDevicePort(context.Context, *ic.PortFilter) (*voltha.Port, error) {
+	return &voltha.Port{}, nil
+}
+
+func (handler *MockCoreServiceHandler) ListDevicePorts(context.Context, *common.ID) (*voltha.Ports, error) {
+	return &voltha.Ports{}, nil
+}
+
+func (handler *MockCoreServiceHandler) DeviceStateUpdate(context.Context, *ic.DeviceStateFilter) (*empty.Empty, error) {
+	return &empty.Empty{}, nil
+}
+
+func (handler *MockCoreServiceHandler) DevicePMConfigUpdate(context.Context, *voltha.PmConfigs) (*empty.Empty, error) {
+	return &empty.Empty{}, nil
+}
+
+func (handler *MockCoreServiceHandler) ChildDeviceDetected(context.Context, *ic.DeviceDiscovery) (*voltha.Device, error) {
+	return &voltha.Device{}, nil
+}
+
+func (handler *MockCoreServiceHandler) ChildDevicesLost(context.Context, *common.ID) (*empty.Empty, error) {
+	return &empty.Empty{}, nil
+}
+
+func (handler *MockCoreServiceHandler) ChildDevicesDetected(context.Context, *common.ID) (*empty.Empty, error) {
+	time.Sleep(50 * time.Millisecond)
+	return &empty.Empty{}, nil
+}
+
+func (handler *MockCoreServiceHandler) GetDevice(ctx context.Context, id *common.ID) (*voltha.Device, error) {
+	time.Sleep(50 * time.Millisecond)
+	vlan, _ := strconv.Atoi(id.Id)
+	return &voltha.Device{
+		Id:   id.Id,
+		Type: "test-1234",
+		Vlan: uint32(vlan),
+	}, nil
+}
+
+func (handler *MockCoreServiceHandler) GetChildDevice(context.Context, *ic.ChildDeviceFilter) (*voltha.Device, error) {
+	return nil, nil
+}
+
+func (handler *MockCoreServiceHandler) GetChildDevices(context.Context, *common.ID) (*voltha.Devices, error) {
+	return &voltha.Devices{}, nil
+}
+
+func (handler *MockCoreServiceHandler) SendPacketIn(context.Context, *ic.PacketIn) (*empty.Empty, error) {
+	return &empty.Empty{}, nil
+}
+
+func (handler *MockCoreServiceHandler) DeviceReasonUpdate(context.Context, *ic.DeviceReason) (*empty.Empty, error) {
+	return &empty.Empty{}, nil
+}
+
+func (handler *MockCoreServiceHandler) PortStateUpdate(context.Context, *ic.PortState) (*empty.Empty, error) {
+	return &empty.Empty{}, nil
+}
+
+// Additional API found in the Core - unused?
+func (handler *MockCoreServiceHandler) ReconcileChildDevices(context.Context, *common.ID) (*empty.Empty, error) {
+	return &empty.Empty{}, nil
+}
+
+func (handler *MockCoreServiceHandler) GetChildDeviceWithProxyAddress(context.Context, *voltha.Device_ProxyAddress) (*voltha.Device, error) {
+	return &voltha.Device{}, nil
+}
+
+func (handler *MockCoreServiceHandler) GetPorts(context.Context, *ic.PortFilter) (*voltha.Ports, error) {
+	return &voltha.Ports{}, nil
+}
+
+func (handler *MockCoreServiceHandler) ChildrenStateUpdate(context.Context, *ic.DeviceStateFilter) (*empty.Empty, error) {
+	return &empty.Empty{}, nil
+}
+
+func (handler *MockCoreServiceHandler) UpdateImageDownload(context.Context, *voltha.ImageDownload) (*empty.Empty, error) {
+	return &empty.Empty{}, nil
+}
+
+func (handler *MockCoreServiceHandler) GetHealthStatus(ctx context.Context, empty *empty.Empty) (*voltha.HealthStatus, error) {
+	return &voltha.HealthStatus{State: voltha.HealthStatus_HEALTHY}, nil
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/db/common.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/security.go
similarity index 60%
copy from vendor/github.com/opencord/voltha-lib-go/v6/pkg/db/common.go
copy to vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/security.go
index 422740b..930d2c8 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/db/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/security.go
@@ -1,31 +1,22 @@
 /*
- * Copyright 2020-present Open Networking Foundation
- *
+ * Copyright 2018-present Open Networking Foundation
+
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
  * You may obtain a copy of the License at
- *
+
  * http://www.apache.org/licenses/LICENSE-2.0
- *
+
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package db
+package grpc
 
-import (
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
-)
-
-var logger log.CLogger
-
-func init() {
-	// Setup this package so that it's log level can be modified at run time
-	var err error
-	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
-	if err != nil {
-		panic(err)
-	}
+type GrpcSecurity struct {
+	KeyFile  string
+	CertFile string
+	CaFile   string
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/server.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/server.go
new file mode 100644
index 0000000..bee418d
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/server.go
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package grpc
+
+import (
+	"context"
+	"net"
+
+	grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
+	grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/reflection"
+	"google.golang.org/grpc/status"
+)
+
+/*
+To add a GRPC server to your existing component simply follow these steps:
+
+1. Create a server instance by passing the host and port where it should run and optionally add certificate information
+
+	e.g.
+	s.server = server.NewGrpcServer(s.config.GrpcHost, s.config.GrpcPort, nil, false)
+
+2. Create a function that will register your service with the GRPC server
+
+    e.g.
+	f := func(gs *grpc.Server) {
+		voltha.RegisterVolthaReadOnlyServiceServer(
+			gs,
+			core.NewReadOnlyServiceHandler(s.root),
+		)
+	}
+
+3. Add the service to the server
+
+    e.g.
+	s.server.AddService(f)
+
+4. Start the server
+
+	s.server.Start(ctx)
+*/
+
+// Interface allows probes to be attached to server
+// A probe must support the IsReady() method
+type ReadyProbe interface {
+	IsReady() bool
+}
+
+type GrpcServer struct {
+	gs       *grpc.Server
+	address  string
+	secure   bool
+	services []func(*grpc.Server)
+	probe    ReadyProbe // optional
+
+	*GrpcSecurity
+}
+
+/*
+Instantiate a GRPC server data structure
+*/
+func NewGrpcServer(
+	address string,
+	certs *GrpcSecurity,
+	secure bool,
+	probe ReadyProbe,
+) *GrpcServer {
+	server := &GrpcServer{
+		address:      address,
+		secure:       secure,
+		GrpcSecurity: certs,
+		probe:        probe,
+	}
+	return server
+}
+
+/*
+Start prepares the GRPC server and starts servicing requests
+*/
+func (s *GrpcServer) Start(ctx context.Context) {
+
+	lis, err := net.Listen("tcp", s.address)
+	if err != nil {
+		logger.Fatalf(ctx, "failed to listen: %v", err)
+	}
+
+	// Use Intercepters to automatically inject and publish Open Tracing Spans by this GRPC server
+	serverOptions := []grpc.ServerOption{
+		grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
+			grpc_opentracing.StreamServerInterceptor(grpc_opentracing.WithTracer(log.ActiveTracerProxy{})),
+		)),
+		grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
+			grpc_opentracing.UnaryServerInterceptor(grpc_opentracing.WithTracer(log.ActiveTracerProxy{})),
+			mkServerInterceptor(s),
+		))}
+
+	if s.secure && s.GrpcSecurity != nil {
+		creds, err := credentials.NewServerTLSFromFile(s.CertFile, s.KeyFile)
+		if err != nil {
+			logger.Fatalf(ctx, "could not load TLS keys: %s", err)
+		}
+
+		serverOptions = append(serverOptions, grpc.Creds(creds))
+		s.gs = grpc.NewServer(serverOptions...)
+	} else {
+		logger.Info(ctx, "starting-insecure-grpc-server")
+		s.gs = grpc.NewServer(serverOptions...)
+	}
+
+	// Register all required services
+	for _, service := range s.services {
+		service(s.gs)
+	}
+	reflection.Register(s.gs)
+
+	if err := s.gs.Serve(lis); err != nil {
+		logger.Fatalf(ctx, "failed to serve: %v\n", err)
+	}
+}
+
+// Make a serverInterceptor for the given GrpcServer
+// This interceptor will check whether there is an attached probe,
+// and if that probe indicates NotReady, then an UNAVAILABLE
+// response will be returned.
+func mkServerInterceptor(s *GrpcServer) func(ctx context.Context,
+	req interface{},
+	info *grpc.UnaryServerInfo,
+	handler grpc.UnaryHandler) (interface{}, error) {
+
+	return func(ctx context.Context,
+		req interface{},
+		info *grpc.UnaryServerInfo,
+		handler grpc.UnaryHandler) (interface{}, error) {
+
+		if (s.probe != nil) && (!s.probe.IsReady()) {
+			logger.Warnf(ctx, "Grpc request received while not ready %v", req)
+			return nil, status.Error(codes.Unavailable, "system is not ready")
+		}
+
+		// Calls the handler
+		h, err := handler(ctx, req)
+
+		return h, err
+	}
+}
+
+/*
+Stop servicing GRPC requests
+*/
+func (s *GrpcServer) Stop() {
+	if s.gs != nil {
+		s.gs.Stop()
+	}
+}
+
+/*
+AddService appends a generic service request function
+*/
+func (s *GrpcServer) AddService(
+	registerFunction func(*grpc.Server),
+) {
+	s.services = append(s.services, registerFunction)
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/utils.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/utils.go
new file mode 100644
index 0000000..85686de
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/utils.go
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2021-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package grpc
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"math"
+	"os"
+	"reflect"
+	"sync"
+	"time"
+)
+
+const (
+	incrementalFactor = 1.5
+	minBackOff        = 10 * time.Millisecond
+)
+
+type Backoff struct {
+	attempt          int
+	initialInterval  time.Duration
+	maxElapsedTime   time.Duration
+	maxInterval      time.Duration
+	totalElapsedTime time.Duration
+	mutex            sync.RWMutex
+}
+
+func NewBackoff(initialInterval, maxInterval, maxElapsedTime time.Duration) *Backoff {
+	bo := &Backoff{}
+	bo.initialInterval = initialInterval
+	bo.maxInterval = maxInterval
+	bo.maxElapsedTime = maxElapsedTime
+	return bo
+}
+
+func (bo *Backoff) Backoff(ctx context.Context) error {
+	duration, err := bo.getBackOffDuration()
+	if err != nil {
+		return err
+	}
+
+	ticker := time.NewTicker(duration)
+	defer ticker.Stop()
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	case <-ticker.C:
+	}
+	return nil
+}
+
+func (bo *Backoff) getBackOffDuration() (duration time.Duration, err error) {
+	err = nil
+	defer func() {
+		bo.mutex.Lock()
+		defer bo.mutex.Unlock()
+		bo.attempt += 1
+		bo.totalElapsedTime += duration
+		if bo.maxElapsedTime > 0 && bo.totalElapsedTime > bo.maxElapsedTime {
+			err = errors.New("max elapsed backoff time reached")
+		}
+	}()
+
+	if bo.initialInterval <= minBackOff {
+		bo.initialInterval = minBackOff
+	}
+	if bo.initialInterval > bo.maxInterval {
+		duration = bo.initialInterval
+		return
+	}
+
+	// Calculate incremental duration
+	minf := float64(bo.initialInterval)
+	durf := minf * math.Pow(incrementalFactor, float64(bo.attempt))
+
+	if durf > math.MaxInt64 {
+		duration = bo.maxInterval
+		return
+	}
+	duration = time.Duration(durf)
+
+	//Keep within bounds
+	if duration < bo.initialInterval {
+		duration = bo.initialInterval
+	}
+	if duration > bo.maxInterval {
+		duration = bo.maxInterval
+	}
+	return
+}
+
+func (bo *Backoff) Reset() {
+	bo.mutex.Lock()
+	defer bo.mutex.Unlock()
+	bo.attempt = 0
+	bo.totalElapsedTime = 0
+}
+
+func SetFromEnvVariable(key string, variableToSet interface{}) error {
+	if _, ok := variableToSet.(*time.Duration); !ok {
+		return fmt.Errorf("unsupported type %T", variableToSet)
+	}
+	if valStr, present := os.LookupEnv(key); present {
+		val, err := time.ParseDuration(valStr)
+		if err != nil {
+			return err
+		}
+		reflect.ValueOf(variableToSet).Elem().Set(reflect.ValueOf(val))
+	}
+	return nil
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/kafka/client.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/client.go
similarity index 86%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/kafka/client.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/client.go
index eee9631..fdc05bc 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/kafka/client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/client.go
@@ -12,6 +12,11 @@
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
+ *
+ *
+ * NOTE:  The kafka client is used to publish events on Kafka in voltha
+ * release 2.9.  It is no longer used for inter voltha container
+ * communication.
  */
 package kafka
 
@@ -19,7 +24,7 @@
 	"context"
 	"time"
 
-	ca "github.com/opencord/voltha-protos/v4/go/inter_container"
+	"github.com/golang/protobuf/proto"
 )
 
 const (
@@ -55,6 +60,7 @@
 	DefaultNumberReplicas           = 1
 	DefaultAutoCreateTopic          = false
 	DefaultMetadataMaxRetry         = 3
+	DefaultMaxRetries               = 3
 	DefaultLivenessChannelInterval  = time.Second * 30
 )
 
@@ -64,8 +70,8 @@
 	Stop(ctx context.Context)
 	CreateTopic(ctx context.Context, topic *Topic, numPartition int, repFactor int) error
 	DeleteTopic(ctx context.Context, topic *Topic) error
-	Subscribe(ctx context.Context, topic *Topic, kvArgs ...*KVArg) (<-chan *ca.InterContainerMessage, error)
-	UnSubscribe(ctx context.Context, topic *Topic, ch <-chan *ca.InterContainerMessage) error
+	Subscribe(ctx context.Context, topic *Topic, kvArgs ...*KVArg) (<-chan proto.Message, error)
+	UnSubscribe(ctx context.Context, topic *Topic, ch <-chan proto.Message) error
 	SubscribeForMetadata(context.Context, func(fromTopic string, timestamp time.Time))
 	Send(ctx context.Context, msg interface{}, topic *Topic, keys ...string) error
 	SendLiveness(ctx context.Context) error
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/kafka/common.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/kafka/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/common.go
index aa37746..c0d169a 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/kafka/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/common.go
@@ -16,7 +16,7 @@
 package kafka
 
 import (
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
 )
 
 var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/kafka/sarama_client.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/sarama_client.go
similarity index 95%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/kafka/sarama_client.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/sarama_client.go
index 79827aa..185f6ec 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/kafka/sarama_client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/sarama_client.go
@@ -27,10 +27,8 @@
 	scc "github.com/bsm/sarama-cluster"
 	"github.com/eapache/go-resiliency/breaker"
 	"github.com/golang/protobuf/proto"
-	"github.com/golang/protobuf/ptypes"
 	"github.com/google/uuid"
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
-	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
 )
 
 // consumerChannels represents one or more consumers listening on a kafka topic.  Once a message is received on that
@@ -38,7 +36,7 @@
 //consumer or a group consumer
 type consumerChannels struct {
 	consumers []interface{}
-	channels  []chan *ic.InterContainerMessage
+	channels  []chan proto.Message
 }
 
 // static check to ensure SaramaClient implements Client
@@ -378,7 +376,7 @@
 
 // Subscribe registers a caller to a topic. It returns a channel that the caller can use to receive
 // messages from that topic
-func (sc *SaramaClient) Subscribe(ctx context.Context, topic *Topic, kvArgs ...*KVArg) (<-chan *ic.InterContainerMessage, error) {
+func (sc *SaramaClient) Subscribe(ctx context.Context, topic *Topic, kvArgs ...*KVArg) (<-chan proto.Message, error) {
 	sc.lockTopic(topic)
 	defer sc.unLockTopic(topic)
 
@@ -388,13 +386,13 @@
 	if consumerCh := sc.getConsumerChannel(topic); consumerCh != nil {
 		logger.Debugw(ctx, "topic-already-subscribed", log.Fields{"topic": topic.Name})
 		// Create a channel specific for that consumers and add it to the consumers channel map
-		ch := make(chan *ic.InterContainerMessage)
+		ch := make(chan proto.Message)
 		sc.addChannelToConsumerChannelMap(ctx, topic, ch)
 		return ch, nil
 	}
 
 	// Register for the topic and set it up
-	var consumerListeningChannel chan *ic.InterContainerMessage
+	var consumerListeningChannel chan proto.Message
 	var err error
 
 	// Use the consumerType option to figure out the type of consumer to launch
@@ -441,7 +439,7 @@
 }
 
 //UnSubscribe unsubscribe a consumer from a given topic
-func (sc *SaramaClient) UnSubscribe(ctx context.Context, topic *Topic, ch <-chan *ic.InterContainerMessage) error {
+func (sc *SaramaClient) UnSubscribe(ctx context.Context, topic *Topic, ch <-chan proto.Message) error {
 	sc.lockTopic(topic)
 	defer sc.unLockTopic(topic)
 
@@ -609,7 +607,7 @@
 			// without blocking others. The monitor shouldn't really fall
 			// behind...
 			sc.liveness = make(chan bool, 10)
-			// post intial state to the channel
+			// post initial state to the channel
 			sc.liveness <- sc.alive
 		}
 	} else {
@@ -635,7 +633,7 @@
 			// without blocking others. The monitor shouldn't really fall
 			// behind...
 			sc.healthiness = make(chan bool, 10)
-			// post intial state to the channel
+			// post initial state to the channel
 			sc.healthiness <- sc.healthy
 		}
 	} else {
@@ -749,7 +747,7 @@
 	return nil
 }
 
-func (sc *SaramaClient) addChannelToConsumerChannelMap(ctx context.Context, topic *Topic, ch chan *ic.InterContainerMessage) {
+func (sc *SaramaClient) addChannelToConsumerChannelMap(ctx context.Context, topic *Topic, ch chan proto.Message) {
 	sc.lockTopicToConsumerChannelMap.Lock()
 	defer sc.lockTopicToConsumerChannelMap.Unlock()
 	if consumerCh, exist := sc.topicToConsumerChannelMap[topic.Name]; exist {
@@ -788,7 +786,7 @@
 	return err
 }
 
-func (sc *SaramaClient) removeChannelFromConsumerChannelMap(ctx context.Context, topic Topic, ch <-chan *ic.InterContainerMessage) error {
+func (sc *SaramaClient) removeChannelFromConsumerChannelMap(ctx context.Context, topic Topic, ch <-chan proto.Message) error {
 	sc.lockTopicToConsumerChannelMap.Lock()
 	defer sc.lockTopicToConsumerChannelMap.Unlock()
 	if consumerCh, exist := sc.topicToConsumerChannelMap[topic.Name]; exist {
@@ -908,19 +906,18 @@
 
 // dispatchToConsumers sends the intercontainermessage received on a given topic to all subscribers for that
 // topic via the unique channel each subscriber received during subscription
-func (sc *SaramaClient) dispatchToConsumers(consumerCh *consumerChannels, protoMessage *ic.InterContainerMessage) {
+func (sc *SaramaClient) dispatchToConsumers(consumerCh *consumerChannels, protoMessage proto.Message, fromTopic string, ts time.Time) {
 	// Need to go over all channels and publish messages to them - do we need to copy msg?
 	sc.lockTopicToConsumerChannelMap.RLock()
 	for _, ch := range consumerCh.channels {
-		go func(c chan *ic.InterContainerMessage) {
+		go func(c chan proto.Message) {
 			c <- protoMessage
 		}(ch)
 	}
 	sc.lockTopicToConsumerChannelMap.RUnlock()
 
 	if callback := sc.metadataCallback; callback != nil {
-		ts, _ := ptypes.Timestamp(protoMessage.Header.Timestamp)
-		callback(protoMessage.Header.FromTopic, ts)
+		callback(fromTopic, ts)
 	}
 }
 
@@ -948,12 +945,12 @@
 			msgBody := msg.Value
 			sc.updateLiveness(ctx, true)
 			logger.Debugw(ctx, "message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
-			icm := &ic.InterContainerMessage{}
-			if err := proto.Unmarshal(msgBody, icm); err != nil {
+			var protoMsg proto.Message
+			if err := proto.Unmarshal(msgBody, protoMsg); err != nil {
 				logger.Warnw(ctx, "partition-invalid-message", log.Fields{"error": err})
 				continue
 			}
-			go sc.dispatchToConsumers(consumerChnls, icm)
+			go sc.dispatchToConsumers(consumerChnls, protoMsg, msg.Topic, msg.Timestamp)
 		case <-sc.doneCh:
 			logger.Infow(ctx, "partition-received-exit-signal", log.Fields{"topic": topic.Name})
 			break startloop
@@ -989,12 +986,12 @@
 			sc.updateLiveness(ctx, true)
 			logger.Debugw(ctx, "message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
 			msgBody := msg.Value
-			icm := &ic.InterContainerMessage{}
-			if err := proto.Unmarshal(msgBody, icm); err != nil {
+			var protoMsg proto.Message
+			if err := proto.Unmarshal(msgBody, protoMsg); err != nil {
 				logger.Warnw(ctx, "invalid-message", log.Fields{"error": err})
 				continue
 			}
-			go sc.dispatchToConsumers(consumerChnls, icm)
+			go sc.dispatchToConsumers(consumerChnls, protoMsg, msg.Topic, msg.Timestamp)
 			consumer.MarkOffset(msg, "")
 		case ntf := <-consumer.Notifications():
 			logger.Debugw(ctx, "group-received-notification", log.Fields{"notification": ntf})
@@ -1030,7 +1027,7 @@
 
 //// setupConsumerChannel creates a consumerChannels object for that topic and add it to the consumerChannels map
 //// for that topic.  It also starts the routine that listens for messages on that topic.
-func (sc *SaramaClient) setupPartitionConsumerChannel(ctx context.Context, topic *Topic, initialOffset int64) (chan *ic.InterContainerMessage, error) {
+func (sc *SaramaClient) setupPartitionConsumerChannel(ctx context.Context, topic *Topic, initialOffset int64) (chan proto.Message, error) {
 	var pConsumers []sarama.PartitionConsumer
 	var err error
 
@@ -1046,10 +1043,10 @@
 
 	// Create the consumers/channel structure and set the consumers and create a channel on that topic - for now
 	// unbuffered to verify race conditions.
-	consumerListeningChannel := make(chan *ic.InterContainerMessage)
+	consumerListeningChannel := make(chan proto.Message)
 	cc := &consumerChannels{
 		consumers: consumersIf,
-		channels:  []chan *ic.InterContainerMessage{consumerListeningChannel},
+		channels:  []chan proto.Message{consumerListeningChannel},
 	}
 
 	// Add the consumers channel to the map
@@ -1069,7 +1066,7 @@
 
 // setupConsumerChannel creates a consumerChannels object for that topic and add it to the consumerChannels map
 // for that topic.  It also starts the routine that listens for messages on that topic.
-func (sc *SaramaClient) setupGroupConsumerChannel(ctx context.Context, topic *Topic, groupId string, initialOffset int64) (chan *ic.InterContainerMessage, error) {
+func (sc *SaramaClient) setupGroupConsumerChannel(ctx context.Context, topic *Topic, groupId string, initialOffset int64) (chan proto.Message, error) {
 	// TODO:  Replace this development partition consumers with a group consumers
 	var pConsumer *scc.Consumer
 	var err error
@@ -1079,10 +1076,10 @@
 	}
 	// Create the consumers/channel structure and set the consumers and create a channel on that topic - for now
 	// unbuffered to verify race conditions.
-	consumerListeningChannel := make(chan *ic.InterContainerMessage)
+	consumerListeningChannel := make(chan proto.Message)
 	cc := &consumerChannels{
 		consumers: []interface{}{pConsumer},
-		channels:  []chan *ic.InterContainerMessage{consumerListeningChannel},
+		channels:  []chan proto.Message{consumerListeningChannel},
 	}
 
 	// Add the consumers channel to the map
@@ -1120,9 +1117,9 @@
 	return pConsumers, nil
 }
 
-func removeChannel(ctx context.Context, channels []chan *ic.InterContainerMessage, ch <-chan *ic.InterContainerMessage) []chan *ic.InterContainerMessage {
+func removeChannel(ctx context.Context, channels []chan proto.Message, ch <-chan proto.Message) []chan proto.Message {
 	var i int
-	var channel chan *ic.InterContainerMessage
+	var channel chan proto.Message
 	for i, channel = range channels {
 		if channel == ch {
 			channels[len(channels)-1], channels[i] = channels[i], channels[len(channels)-1]
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/utils.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/utils.go
new file mode 100644
index 0000000..608361b
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/utils.go
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kafka
+
+import (
+	"context"
+	"errors"
+	"strings"
+	"time"
+
+	"github.com/golang/protobuf/ptypes/any"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+	"github.com/opencord/voltha-lib-go/v7/pkg/probe"
+)
+
+const (
+	TopicSeparator = "_"
+	DeviceIdLength = 24
+)
+
+// A Topic definition - may be augmented with additional attributes eventually
+type Topic struct {
+	// The name of the topic. It must start with a letter,
+	// and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),
+	// underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent
+	// signs (`%`).
+	Name string
+}
+
+type KVArg struct {
+	Key   string
+	Value interface{}
+}
+
+type RpcMType int
+
+const (
+	RpcFormattingError RpcMType = iota
+	RpcSent
+	RpcReply
+	RpcTimeout
+	RpcTransportError
+	RpcSystemClosing
+)
+
+type RpcResponse struct {
+	MType RpcMType
+	Err   error
+	Reply *any.Any
+}
+
+func NewResponse(messageType RpcMType, err error, body *any.Any) *RpcResponse {
+	return &RpcResponse{
+		MType: messageType,
+		Err:   err,
+		Reply: body,
+	}
+}
+
+// TODO:  Remove and provide better may to get the device id
+// GetDeviceIdFromTopic extract the deviceId from the topic name.  The topic name is formatted either as:
+//			<any string> or <any string>_<deviceId>.  The device Id is 24 characters long.
+func GetDeviceIdFromTopic(topic Topic) string {
+	pos := strings.LastIndex(topic.Name, TopicSeparator)
+	if pos == -1 {
+		return ""
+	}
+	adjustedPos := pos + len(TopicSeparator)
+	if adjustedPos >= len(topic.Name) {
+		return ""
+	}
+	deviceId := topic.Name[adjustedPos:len(topic.Name)]
+	if len(deviceId) != DeviceIdLength {
+		return ""
+	}
+	return deviceId
+}
+
+// WaitUntilKafkaConnectionIsUp waits until the kafka client can establish a connection to the kafka broker or until the
+// context times out.
+func StartAndWaitUntilKafkaConnectionIsUp(ctx context.Context, kClient Client, connectionRetryInterval time.Duration, serviceName string) error {
+	if kClient == nil {
+		return errors.New("kafka-client-is-nil")
+	}
+	for {
+		if err := kClient.Start(ctx); err != nil {
+			probe.UpdateStatusFromContext(ctx, serviceName, probe.ServiceStatusNotReady)
+			logger.Warnw(ctx, "kafka-connection-down", log.Fields{"error": err})
+			select {
+			case <-time.After(connectionRetryInterval):
+				continue
+			case <-ctx.Done():
+				return ctx.Err()
+			}
+		}
+		probe.UpdateStatusFromContext(ctx, serviceName, probe.ServiceStatusRunning)
+		logger.Info(ctx, "kafka-connection-up")
+		break
+	}
+	return nil
+}
+
+/**
+MonitorKafkaReadiness checks the liveliness and readiness of the kafka service
+and update the status in the probe.
+*/
+func MonitorKafkaReadiness(ctx context.Context,
+	kClient Client,
+	liveProbeInterval, notLiveProbeInterval time.Duration,
+	serviceName string) {
+
+	if kClient == nil {
+		logger.Fatal(ctx, "kafka-client-is-nil")
+	}
+
+	logger.Infow(ctx, "monitor-kafka-readiness", log.Fields{"service": serviceName})
+
+	livelinessChannel := kClient.EnableLivenessChannel(ctx, true)
+	healthinessChannel := kClient.EnableHealthinessChannel(ctx, true)
+	timeout := liveProbeInterval
+	failed := false
+	for {
+		timeoutTimer := time.NewTimer(timeout)
+
+		select {
+		case healthiness := <-healthinessChannel:
+			if !healthiness {
+				// This will eventually cause K8s to restart the container, and will do
+				// so in a way that allows cleanup to continue, rather than an immediate
+				// panic and exit here.
+				probe.UpdateStatusFromContext(ctx, serviceName, probe.ServiceStatusFailed)
+				logger.Infow(ctx, "kafka-not-healthy", log.Fields{"service": serviceName})
+				failed = true
+			}
+			// Check if the timer has expired or not
+			if !timeoutTimer.Stop() {
+				<-timeoutTimer.C
+			}
+		case liveliness := <-livelinessChannel:
+			if failed {
+				// Failures of the message bus are permanent and can't ever be recovered from,
+				// so make sure we never inadvertently reset a failed state back to unready.
+			} else if !liveliness {
+				// kafka not reachable or down, updating the status to not ready state
+				probe.UpdateStatusFromContext(ctx, serviceName, probe.ServiceStatusNotReady)
+				logger.Infow(ctx, "kafka-not-live", log.Fields{"service": serviceName})
+				timeout = notLiveProbeInterval
+			} else {
+				// kafka is reachable , updating the status to running state
+				probe.UpdateStatusFromContext(ctx, serviceName, probe.ServiceStatusRunning)
+				timeout = liveProbeInterval
+			}
+			// Check if the timer has expired or not
+			if !timeoutTimer.Stop() {
+				<-timeoutTimer.C
+			}
+		case <-timeoutTimer.C:
+			logger.Infow(ctx, "kafka-proxy-liveness-recheck", log.Fields{"service": serviceName})
+			// send the liveness probe in a goroutine; we don't want to deadlock ourselves as
+			// the liveness probe may wait (and block) writing to our channel.
+			go func() {
+				err := kClient.SendLiveness(ctx)
+				if err != nil {
+					// Catch possible error case if sending liveness after Sarama has been stopped.
+					logger.Warnw(ctx, "error-kafka-send-liveness", log.Fields{"error": err, "service": serviceName})
+				}
+			}()
+		case <-ctx.Done():
+			return // just exit
+		}
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/log/common.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/common.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/log/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/common.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/log/log.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/log.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/log/log.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/log.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/log/utils.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/utils.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/log/utils.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/utils.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/meters/common.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/meters/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/meters/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/meters/common.go
index f086d96..3aef492 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/meters/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/meters/common.go
@@ -16,7 +16,7 @@
 package meters
 
 import (
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
 )
 
 var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/meters/meter_utils.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/meters/meter_utils.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/meters/meter_utils.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/meters/meter_utils.go
index c3ccba1..56e8ecc 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/meters/meter_utils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/meters/meter_utils.go
@@ -18,9 +18,10 @@
 import (
 	"context"
 	"fmt"
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
-	ofp "github.com/opencord/voltha-protos/v4/go/openflow_13"
-	tp_pb "github.com/opencord/voltha-protos/v4/go/tech_profile"
+
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+	ofp "github.com/opencord/voltha-protos/v5/go/openflow_13"
+	tp_pb "github.com/opencord/voltha-protos/v5/go/tech_profile"
 )
 
 // GetTrafficShapingInfo returns CIR,PIR and GIR values
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/config/common.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/mocks/grpc/common.go
similarity index 87%
copy from vendor/github.com/opencord/voltha-lib-go/v6/pkg/config/common.go
copy to vendor/github.com/opencord/voltha-lib-go/v7/pkg/mocks/grpc/common.go
index 66a866e..1d0c2bc 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/config/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/mocks/grpc/common.go
@@ -1,5 +1,5 @@
 /*
- * Copyright 2020-present Open Networking Foundation
+ * Copyright 2021-present Open Networking Foundation
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -13,10 +13,10 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package config
+package grpc
 
 import (
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
 )
 
 var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/mocks/grpc/mock_adapter_services.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/mocks/grpc/mock_adapter_services.go
new file mode 100644
index 0000000..cbab060
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/mocks/grpc/mock_adapter_services.go
@@ -0,0 +1,1710 @@
+/*
+ * Copyright 2021-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Code generated by MockGen. DO NOT EDIT.
+// Source: ./adapter_services.pb.go
+
+package grpc
+
+import (
+	context "context"
+	reflect "reflect"
+
+	gomock "github.com/golang/mock/gomock"
+	empty "github.com/golang/protobuf/ptypes/empty"
+	common "github.com/opencord/voltha-protos/v5/go/common"
+	extension "github.com/opencord/voltha-protos/v5/go/extension"
+	inter_container "github.com/opencord/voltha-protos/v5/go/inter_container"
+	voltha "github.com/opencord/voltha-protos/v5/go/voltha"
+	grpc "google.golang.org/grpc"
+)
+
+// MockAdapterServiceClient is a mock of AdapterServiceClient interface.
+type MockAdapterServiceClient struct {
+	ctrl     *gomock.Controller
+	recorder *MockAdapterServiceClientMockRecorder
+}
+
+// MockAdapterServiceClientMockRecorder is the mock recorder for MockAdapterServiceClient.
+type MockAdapterServiceClientMockRecorder struct {
+	mock *MockAdapterServiceClient
+}
+
+// NewMockAdapterServiceClient creates a new mock instance.
+func NewMockAdapterServiceClient(ctrl *gomock.Controller) *MockAdapterServiceClient {
+	mock := &MockAdapterServiceClient{ctrl: ctrl}
+	mock.recorder = &MockAdapterServiceClientMockRecorder{mock}
+	return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockAdapterServiceClient) EXPECT() *MockAdapterServiceClientMockRecorder {
+	return m.recorder
+}
+
+// AbortOnuImageUpgrade mocks base method.
+func (m *MockAdapterServiceClient) AbortOnuImageUpgrade(ctx context.Context, in *voltha.DeviceImageRequest, opts ...grpc.CallOption) (*voltha.DeviceImageResponse, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "AbortOnuImageUpgrade", varargs...)
+	ret0, _ := ret[0].(*voltha.DeviceImageResponse)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// AbortOnuImageUpgrade indicates an expected call of AbortOnuImageUpgrade.
+func (mr *MockAdapterServiceClientMockRecorder) AbortOnuImageUpgrade(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AbortOnuImageUpgrade", reflect.TypeOf((*MockAdapterServiceClient)(nil).AbortOnuImageUpgrade), varargs...)
+}
+
+// ActivateImageUpdate mocks base method.
+func (m *MockAdapterServiceClient) ActivateImageUpdate(ctx context.Context, in *inter_container.ImageDownloadMessage, opts ...grpc.CallOption) (*voltha.ImageDownload, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "ActivateImageUpdate", varargs...)
+	ret0, _ := ret[0].(*voltha.ImageDownload)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// ActivateImageUpdate indicates an expected call of ActivateImageUpdate.
+func (mr *MockAdapterServiceClientMockRecorder) ActivateImageUpdate(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ActivateImageUpdate", reflect.TypeOf((*MockAdapterServiceClient)(nil).ActivateImageUpdate), varargs...)
+}
+
+// ActivateOnuImage mocks base method.
+func (m *MockAdapterServiceClient) ActivateOnuImage(ctx context.Context, in *voltha.DeviceImageRequest, opts ...grpc.CallOption) (*voltha.DeviceImageResponse, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "ActivateOnuImage", varargs...)
+	ret0, _ := ret[0].(*voltha.DeviceImageResponse)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// ActivateOnuImage indicates an expected call of ActivateOnuImage.
+func (mr *MockAdapterServiceClientMockRecorder) ActivateOnuImage(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ActivateOnuImage", reflect.TypeOf((*MockAdapterServiceClient)(nil).ActivateOnuImage), varargs...)
+}
+
+// AdoptDevice mocks base method.
+func (m *MockAdapterServiceClient) AdoptDevice(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "AdoptDevice", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// AdoptDevice indicates an expected call of AdoptDevice.
+func (mr *MockAdapterServiceClientMockRecorder) AdoptDevice(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AdoptDevice", reflect.TypeOf((*MockAdapterServiceClient)(nil).AdoptDevice), varargs...)
+}
+
+// CancelImageDownload mocks base method.
+func (m *MockAdapterServiceClient) CancelImageDownload(ctx context.Context, in *inter_container.ImageDownloadMessage, opts ...grpc.CallOption) (*voltha.ImageDownload, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "CancelImageDownload", varargs...)
+	ret0, _ := ret[0].(*voltha.ImageDownload)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// CancelImageDownload indicates an expected call of CancelImageDownload.
+func (mr *MockAdapterServiceClientMockRecorder) CancelImageDownload(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelImageDownload", reflect.TypeOf((*MockAdapterServiceClient)(nil).CancelImageDownload), varargs...)
+}
+
+// ChildDeviceLost mocks base method.
+func (m *MockAdapterServiceClient) ChildDeviceLost(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "ChildDeviceLost", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// ChildDeviceLost indicates an expected call of ChildDeviceLost.
+func (mr *MockAdapterServiceClientMockRecorder) ChildDeviceLost(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChildDeviceLost", reflect.TypeOf((*MockAdapterServiceClient)(nil).ChildDeviceLost), varargs...)
+}
+
+// CommitOnuImage mocks base method.
+func (m *MockAdapterServiceClient) CommitOnuImage(ctx context.Context, in *voltha.DeviceImageRequest, opts ...grpc.CallOption) (*voltha.DeviceImageResponse, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "CommitOnuImage", varargs...)
+	ret0, _ := ret[0].(*voltha.DeviceImageResponse)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// CommitOnuImage indicates an expected call of CommitOnuImage.
+func (mr *MockAdapterServiceClientMockRecorder) CommitOnuImage(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitOnuImage", reflect.TypeOf((*MockAdapterServiceClient)(nil).CommitOnuImage), varargs...)
+}
+
+// DeleteDevice mocks base method.
+func (m *MockAdapterServiceClient) DeleteDevice(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "DeleteDevice", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// DeleteDevice indicates an expected call of DeleteDevice.
+func (mr *MockAdapterServiceClientMockRecorder) DeleteDevice(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteDevice", reflect.TypeOf((*MockAdapterServiceClient)(nil).DeleteDevice), varargs...)
+}
+
+// DisableDevice mocks base method.
+func (m *MockAdapterServiceClient) DisableDevice(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "DisableDevice", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// DisableDevice indicates an expected call of DisableDevice.
+func (mr *MockAdapterServiceClientMockRecorder) DisableDevice(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableDevice", reflect.TypeOf((*MockAdapterServiceClient)(nil).DisableDevice), varargs...)
+}
+
+// DisablePort mocks base method.
+func (m *MockAdapterServiceClient) DisablePort(ctx context.Context, in *voltha.Port, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "DisablePort", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// DisablePort indicates an expected call of DisablePort.
+func (mr *MockAdapterServiceClientMockRecorder) DisablePort(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisablePort", reflect.TypeOf((*MockAdapterServiceClient)(nil).DisablePort), varargs...)
+}
+
+// DownloadImage mocks base method.
+func (m *MockAdapterServiceClient) DownloadImage(ctx context.Context, in *inter_container.ImageDownloadMessage, opts ...grpc.CallOption) (*voltha.ImageDownload, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "DownloadImage", varargs...)
+	ret0, _ := ret[0].(*voltha.ImageDownload)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// DownloadImage indicates an expected call of DownloadImage.
+func (mr *MockAdapterServiceClientMockRecorder) DownloadImage(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DownloadImage", reflect.TypeOf((*MockAdapterServiceClient)(nil).DownloadImage), varargs...)
+}
+
+// DownloadOnuImage mocks base method.
+func (m *MockAdapterServiceClient) DownloadOnuImage(ctx context.Context, in *voltha.DeviceImageDownloadRequest, opts ...grpc.CallOption) (*voltha.DeviceImageResponse, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "DownloadOnuImage", varargs...)
+	ret0, _ := ret[0].(*voltha.DeviceImageResponse)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// DownloadOnuImage indicates an expected call of DownloadOnuImage.
+func (mr *MockAdapterServiceClientMockRecorder) DownloadOnuImage(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DownloadOnuImage", reflect.TypeOf((*MockAdapterServiceClient)(nil).DownloadOnuImage), varargs...)
+}
+
+// EnablePort mocks base method.
+func (m *MockAdapterServiceClient) EnablePort(ctx context.Context, in *voltha.Port, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "EnablePort", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// EnablePort indicates an expected call of EnablePort.
+func (mr *MockAdapterServiceClientMockRecorder) EnablePort(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnablePort", reflect.TypeOf((*MockAdapterServiceClient)(nil).EnablePort), varargs...)
+}
+
+// GetExtValue mocks base method.
+func (m *MockAdapterServiceClient) GetExtValue(ctx context.Context, in *inter_container.GetExtValueMessage, opts ...grpc.CallOption) (*common.ReturnValues, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "GetExtValue", varargs...)
+	ret0, _ := ret[0].(*common.ReturnValues)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetExtValue indicates an expected call of GetExtValue.
+func (mr *MockAdapterServiceClientMockRecorder) GetExtValue(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetExtValue", reflect.TypeOf((*MockAdapterServiceClient)(nil).GetExtValue), varargs...)
+}
+
+// GetHealthStatus mocks base method.
+func (m *MockAdapterServiceClient) GetHealthStatus(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*voltha.HealthStatus, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "GetHealthStatus", varargs...)
+	ret0, _ := ret[0].(*voltha.HealthStatus)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetHealthStatus indicates an expected call of GetHealthStatus.
+func (mr *MockAdapterServiceClientMockRecorder) GetHealthStatus(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHealthStatus", reflect.TypeOf((*MockAdapterServiceClient)(nil).GetHealthStatus), varargs...)
+}
+
+// GetImageDownloadStatus mocks base method.
+func (m *MockAdapterServiceClient) GetImageDownloadStatus(ctx context.Context, in *inter_container.ImageDownloadMessage, opts ...grpc.CallOption) (*voltha.ImageDownload, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "GetImageDownloadStatus", varargs...)
+	ret0, _ := ret[0].(*voltha.ImageDownload)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetImageDownloadStatus indicates an expected call of GetImageDownloadStatus.
+func (mr *MockAdapterServiceClientMockRecorder) GetImageDownloadStatus(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetImageDownloadStatus", reflect.TypeOf((*MockAdapterServiceClient)(nil).GetImageDownloadStatus), varargs...)
+}
+
+// GetOfpDeviceInfo mocks base method.
+func (m *MockAdapterServiceClient) GetOfpDeviceInfo(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*inter_container.SwitchCapability, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "GetOfpDeviceInfo", varargs...)
+	ret0, _ := ret[0].(*inter_container.SwitchCapability)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetOfpDeviceInfo indicates an expected call of GetOfpDeviceInfo.
+func (mr *MockAdapterServiceClientMockRecorder) GetOfpDeviceInfo(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOfpDeviceInfo", reflect.TypeOf((*MockAdapterServiceClient)(nil).GetOfpDeviceInfo), varargs...)
+}
+
+// GetOnuImageStatus mocks base method.
+func (m *MockAdapterServiceClient) GetOnuImageStatus(ctx context.Context, in *voltha.DeviceImageRequest, opts ...grpc.CallOption) (*voltha.DeviceImageResponse, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "GetOnuImageStatus", varargs...)
+	ret0, _ := ret[0].(*voltha.DeviceImageResponse)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetOnuImageStatus indicates an expected call of GetOnuImageStatus.
+func (mr *MockAdapterServiceClientMockRecorder) GetOnuImageStatus(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOnuImageStatus", reflect.TypeOf((*MockAdapterServiceClient)(nil).GetOnuImageStatus), varargs...)
+}
+
+// GetOnuImages mocks base method.
+func (m *MockAdapterServiceClient) GetOnuImages(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*voltha.OnuImages, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "GetOnuImages", varargs...)
+	ret0, _ := ret[0].(*voltha.OnuImages)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetOnuImages indicates an expected call of GetOnuImages.
+func (mr *MockAdapterServiceClientMockRecorder) GetOnuImages(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOnuImages", reflect.TypeOf((*MockAdapterServiceClient)(nil).GetOnuImages), varargs...)
+}
+
+// GetSingleValue mocks base method.
+func (m *MockAdapterServiceClient) GetSingleValue(ctx context.Context, in *extension.SingleGetValueRequest, opts ...grpc.CallOption) (*extension.SingleGetValueResponse, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "GetSingleValue", varargs...)
+	ret0, _ := ret[0].(*extension.SingleGetValueResponse)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetSingleValue indicates an expected call of GetSingleValue.
+func (mr *MockAdapterServiceClientMockRecorder) GetSingleValue(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSingleValue", reflect.TypeOf((*MockAdapterServiceClient)(nil).GetSingleValue), varargs...)
+}
+
+// ReEnableDevice mocks base method.
+func (m *MockAdapterServiceClient) ReEnableDevice(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "ReEnableDevice", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// ReEnableDevice indicates an expected call of ReEnableDevice.
+func (mr *MockAdapterServiceClientMockRecorder) ReEnableDevice(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReEnableDevice", reflect.TypeOf((*MockAdapterServiceClient)(nil).ReEnableDevice), varargs...)
+}
+
+// RebootDevice mocks base method.
+func (m *MockAdapterServiceClient) RebootDevice(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "RebootDevice", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// RebootDevice indicates an expected call of RebootDevice.
+func (mr *MockAdapterServiceClientMockRecorder) RebootDevice(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RebootDevice", reflect.TypeOf((*MockAdapterServiceClient)(nil).RebootDevice), varargs...)
+}
+
+// ReconcileDevice mocks base method.
+func (m *MockAdapterServiceClient) ReconcileDevice(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "ReconcileDevice", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// ReconcileDevice indicates an expected call of ReconcileDevice.
+func (mr *MockAdapterServiceClientMockRecorder) ReconcileDevice(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReconcileDevice", reflect.TypeOf((*MockAdapterServiceClient)(nil).ReconcileDevice), varargs...)
+}
+
+// RevertImageUpdate mocks base method.
+func (m *MockAdapterServiceClient) RevertImageUpdate(ctx context.Context, in *inter_container.ImageDownloadMessage, opts ...grpc.CallOption) (*voltha.ImageDownload, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "RevertImageUpdate", varargs...)
+	ret0, _ := ret[0].(*voltha.ImageDownload)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// RevertImageUpdate indicates an expected call of RevertImageUpdate.
+func (mr *MockAdapterServiceClientMockRecorder) RevertImageUpdate(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RevertImageUpdate", reflect.TypeOf((*MockAdapterServiceClient)(nil).RevertImageUpdate), varargs...)
+}
+
+// SelfTestDevice mocks base method.
+func (m *MockAdapterServiceClient) SelfTestDevice(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "SelfTestDevice", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// SelfTestDevice indicates an expected call of SelfTestDevice.
+func (mr *MockAdapterServiceClientMockRecorder) SelfTestDevice(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SelfTestDevice", reflect.TypeOf((*MockAdapterServiceClient)(nil).SelfTestDevice), varargs...)
+}
+
+// SendPacketOut mocks base method.
+func (m *MockAdapterServiceClient) SendPacketOut(ctx context.Context, in *inter_container.PacketOut, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "SendPacketOut", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// SendPacketOut indicates an expected call of SendPacketOut.
+func (mr *MockAdapterServiceClientMockRecorder) SendPacketOut(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendPacketOut", reflect.TypeOf((*MockAdapterServiceClient)(nil).SendPacketOut), varargs...)
+}
+
+// SetExtValue mocks base method.
+func (m *MockAdapterServiceClient) SetExtValue(ctx context.Context, in *inter_container.SetExtValueMessage, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "SetExtValue", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// SetExtValue indicates an expected call of SetExtValue.
+func (mr *MockAdapterServiceClientMockRecorder) SetExtValue(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetExtValue", reflect.TypeOf((*MockAdapterServiceClient)(nil).SetExtValue), varargs...)
+}
+
+// SetSingleValue mocks base method.
+func (m *MockAdapterServiceClient) SetSingleValue(ctx context.Context, in *extension.SingleSetValueRequest, opts ...grpc.CallOption) (*extension.SingleSetValueResponse, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "SetSingleValue", varargs...)
+	ret0, _ := ret[0].(*extension.SingleSetValueResponse)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// SetSingleValue indicates an expected call of SetSingleValue.
+func (mr *MockAdapterServiceClientMockRecorder) SetSingleValue(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSingleValue", reflect.TypeOf((*MockAdapterServiceClient)(nil).SetSingleValue), varargs...)
+}
+
+// SimulateAlarm mocks base method.
+func (m *MockAdapterServiceClient) SimulateAlarm(ctx context.Context, in *inter_container.SimulateAlarmMessage, opts ...grpc.CallOption) (*common.OperationResp, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "SimulateAlarm", varargs...)
+	ret0, _ := ret[0].(*common.OperationResp)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// SimulateAlarm indicates an expected call of SimulateAlarm.
+func (mr *MockAdapterServiceClientMockRecorder) SimulateAlarm(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SimulateAlarm", reflect.TypeOf((*MockAdapterServiceClient)(nil).SimulateAlarm), varargs...)
+}
+
+// StartOmciTest mocks base method.
+func (m *MockAdapterServiceClient) StartOmciTest(ctx context.Context, in *inter_container.OMCITest, opts ...grpc.CallOption) (*voltha.TestResponse, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "StartOmciTest", varargs...)
+	ret0, _ := ret[0].(*voltha.TestResponse)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// StartOmciTest indicates an expected call of StartOmciTest.
+func (mr *MockAdapterServiceClientMockRecorder) StartOmciTest(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartOmciTest", reflect.TypeOf((*MockAdapterServiceClient)(nil).StartOmciTest), varargs...)
+}
+
+// SuppressEvent mocks base method.
+func (m *MockAdapterServiceClient) SuppressEvent(ctx context.Context, in *voltha.EventFilter, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "SuppressEvent", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// SuppressEvent indicates an expected call of SuppressEvent.
+func (mr *MockAdapterServiceClientMockRecorder) SuppressEvent(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SuppressEvent", reflect.TypeOf((*MockAdapterServiceClient)(nil).SuppressEvent), varargs...)
+}
+
+// UnSuppressEvent mocks base method.
+func (m *MockAdapterServiceClient) UnSuppressEvent(ctx context.Context, in *voltha.EventFilter, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "UnSuppressEvent", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// UnSuppressEvent indicates an expected call of UnSuppressEvent.
+func (mr *MockAdapterServiceClientMockRecorder) UnSuppressEvent(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnSuppressEvent", reflect.TypeOf((*MockAdapterServiceClient)(nil).UnSuppressEvent), varargs...)
+}
+
+// UpdateFlowsBulk mocks base method.
+func (m *MockAdapterServiceClient) UpdateFlowsBulk(ctx context.Context, in *inter_container.BulkFlows, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "UpdateFlowsBulk", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// UpdateFlowsBulk indicates an expected call of UpdateFlowsBulk.
+func (mr *MockAdapterServiceClientMockRecorder) UpdateFlowsBulk(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateFlowsBulk", reflect.TypeOf((*MockAdapterServiceClient)(nil).UpdateFlowsBulk), varargs...)
+}
+
+// UpdateFlowsIncrementally mocks base method.
+func (m *MockAdapterServiceClient) UpdateFlowsIncrementally(ctx context.Context, in *inter_container.IncrementalFlows, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "UpdateFlowsIncrementally", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// UpdateFlowsIncrementally indicates an expected call of UpdateFlowsIncrementally.
+func (mr *MockAdapterServiceClientMockRecorder) UpdateFlowsIncrementally(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateFlowsIncrementally", reflect.TypeOf((*MockAdapterServiceClient)(nil).UpdateFlowsIncrementally), varargs...)
+}
+
+// UpdatePmConfig mocks base method.
+func (m *MockAdapterServiceClient) UpdatePmConfig(ctx context.Context, in *inter_container.PmConfigsInfo, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "UpdatePmConfig", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// UpdatePmConfig indicates an expected call of UpdatePmConfig.
+func (mr *MockAdapterServiceClientMockRecorder) UpdatePmConfig(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePmConfig", reflect.TypeOf((*MockAdapterServiceClient)(nil).UpdatePmConfig), varargs...)
+}
+
+// MockAdapterServiceServer is a mock of AdapterServiceServer interface.
+type MockAdapterServiceServer struct {
+	ctrl     *gomock.Controller
+	recorder *MockAdapterServiceServerMockRecorder
+}
+
+// MockAdapterServiceServerMockRecorder is the mock recorder for MockAdapterServiceServer.
+type MockAdapterServiceServerMockRecorder struct {
+	mock *MockAdapterServiceServer
+}
+
+// NewMockAdapterServiceServer creates a new mock instance.
+func NewMockAdapterServiceServer(ctrl *gomock.Controller) *MockAdapterServiceServer {
+	mock := &MockAdapterServiceServer{ctrl: ctrl}
+	mock.recorder = &MockAdapterServiceServerMockRecorder{mock}
+	return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockAdapterServiceServer) EXPECT() *MockAdapterServiceServerMockRecorder {
+	return m.recorder
+}
+
+// AbortOnuImageUpgrade mocks base method.
+func (m *MockAdapterServiceServer) AbortOnuImageUpgrade(arg0 context.Context, arg1 *voltha.DeviceImageRequest) (*voltha.DeviceImageResponse, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "AbortOnuImageUpgrade", arg0, arg1)
+	ret0, _ := ret[0].(*voltha.DeviceImageResponse)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// AbortOnuImageUpgrade indicates an expected call of AbortOnuImageUpgrade.
+func (mr *MockAdapterServiceServerMockRecorder) AbortOnuImageUpgrade(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AbortOnuImageUpgrade", reflect.TypeOf((*MockAdapterServiceServer)(nil).AbortOnuImageUpgrade), arg0, arg1)
+}
+
+// ActivateImageUpdate mocks base method.
+func (m *MockAdapterServiceServer) ActivateImageUpdate(arg0 context.Context, arg1 *inter_container.ImageDownloadMessage) (*voltha.ImageDownload, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "ActivateImageUpdate", arg0, arg1)
+	ret0, _ := ret[0].(*voltha.ImageDownload)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// ActivateImageUpdate indicates an expected call of ActivateImageUpdate.
+func (mr *MockAdapterServiceServerMockRecorder) ActivateImageUpdate(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ActivateImageUpdate", reflect.TypeOf((*MockAdapterServiceServer)(nil).ActivateImageUpdate), arg0, arg1)
+}
+
+// ActivateOnuImage mocks base method.
+func (m *MockAdapterServiceServer) ActivateOnuImage(arg0 context.Context, arg1 *voltha.DeviceImageRequest) (*voltha.DeviceImageResponse, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "ActivateOnuImage", arg0, arg1)
+	ret0, _ := ret[0].(*voltha.DeviceImageResponse)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// ActivateOnuImage indicates an expected call of ActivateOnuImage.
+func (mr *MockAdapterServiceServerMockRecorder) ActivateOnuImage(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ActivateOnuImage", reflect.TypeOf((*MockAdapterServiceServer)(nil).ActivateOnuImage), arg0, arg1)
+}
+
+// AdoptDevice mocks base method.
+func (m *MockAdapterServiceServer) AdoptDevice(arg0 context.Context, arg1 *voltha.Device) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "AdoptDevice", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// AdoptDevice indicates an expected call of AdoptDevice.
+func (mr *MockAdapterServiceServerMockRecorder) AdoptDevice(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AdoptDevice", reflect.TypeOf((*MockAdapterServiceServer)(nil).AdoptDevice), arg0, arg1)
+}
+
+// CancelImageDownload mocks base method.
+func (m *MockAdapterServiceServer) CancelImageDownload(arg0 context.Context, arg1 *inter_container.ImageDownloadMessage) (*voltha.ImageDownload, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "CancelImageDownload", arg0, arg1)
+	ret0, _ := ret[0].(*voltha.ImageDownload)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// CancelImageDownload indicates an expected call of CancelImageDownload.
+func (mr *MockAdapterServiceServerMockRecorder) CancelImageDownload(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelImageDownload", reflect.TypeOf((*MockAdapterServiceServer)(nil).CancelImageDownload), arg0, arg1)
+}
+
+// ChildDeviceLost mocks base method.
+func (m *MockAdapterServiceServer) ChildDeviceLost(arg0 context.Context, arg1 *voltha.Device) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "ChildDeviceLost", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// ChildDeviceLost indicates an expected call of ChildDeviceLost.
+func (mr *MockAdapterServiceServerMockRecorder) ChildDeviceLost(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChildDeviceLost", reflect.TypeOf((*MockAdapterServiceServer)(nil).ChildDeviceLost), arg0, arg1)
+}
+
+// CommitOnuImage mocks base method.
+func (m *MockAdapterServiceServer) CommitOnuImage(arg0 context.Context, arg1 *voltha.DeviceImageRequest) (*voltha.DeviceImageResponse, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "CommitOnuImage", arg0, arg1)
+	ret0, _ := ret[0].(*voltha.DeviceImageResponse)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// CommitOnuImage indicates an expected call of CommitOnuImage.
+func (mr *MockAdapterServiceServerMockRecorder) CommitOnuImage(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitOnuImage", reflect.TypeOf((*MockAdapterServiceServer)(nil).CommitOnuImage), arg0, arg1)
+}
+
+// DeleteDevice mocks base method.
+func (m *MockAdapterServiceServer) DeleteDevice(arg0 context.Context, arg1 *voltha.Device) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "DeleteDevice", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// DeleteDevice indicates an expected call of DeleteDevice.
+func (mr *MockAdapterServiceServerMockRecorder) DeleteDevice(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteDevice", reflect.TypeOf((*MockAdapterServiceServer)(nil).DeleteDevice), arg0, arg1)
+}
+
+// DisableDevice mocks base method.
+func (m *MockAdapterServiceServer) DisableDevice(arg0 context.Context, arg1 *voltha.Device) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "DisableDevice", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// DisableDevice indicates an expected call of DisableDevice.
+func (mr *MockAdapterServiceServerMockRecorder) DisableDevice(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableDevice", reflect.TypeOf((*MockAdapterServiceServer)(nil).DisableDevice), arg0, arg1)
+}
+
+// DisablePort mocks base method.
+func (m *MockAdapterServiceServer) DisablePort(arg0 context.Context, arg1 *voltha.Port) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "DisablePort", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// DisablePort indicates an expected call of DisablePort.
+func (mr *MockAdapterServiceServerMockRecorder) DisablePort(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisablePort", reflect.TypeOf((*MockAdapterServiceServer)(nil).DisablePort), arg0, arg1)
+}
+
+// DownloadImage mocks base method.
+func (m *MockAdapterServiceServer) DownloadImage(arg0 context.Context, arg1 *inter_container.ImageDownloadMessage) (*voltha.ImageDownload, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "DownloadImage", arg0, arg1)
+	ret0, _ := ret[0].(*voltha.ImageDownload)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// DownloadImage indicates an expected call of DownloadImage.
+func (mr *MockAdapterServiceServerMockRecorder) DownloadImage(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DownloadImage", reflect.TypeOf((*MockAdapterServiceServer)(nil).DownloadImage), arg0, arg1)
+}
+
+// DownloadOnuImage mocks base method.
+func (m *MockAdapterServiceServer) DownloadOnuImage(arg0 context.Context, arg1 *voltha.DeviceImageDownloadRequest) (*voltha.DeviceImageResponse, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "DownloadOnuImage", arg0, arg1)
+	ret0, _ := ret[0].(*voltha.DeviceImageResponse)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// DownloadOnuImage indicates an expected call of DownloadOnuImage.
+func (mr *MockAdapterServiceServerMockRecorder) DownloadOnuImage(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DownloadOnuImage", reflect.TypeOf((*MockAdapterServiceServer)(nil).DownloadOnuImage), arg0, arg1)
+}
+
+// EnablePort mocks base method.
+func (m *MockAdapterServiceServer) EnablePort(arg0 context.Context, arg1 *voltha.Port) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "EnablePort", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// EnablePort indicates an expected call of EnablePort.
+func (mr *MockAdapterServiceServerMockRecorder) EnablePort(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnablePort", reflect.TypeOf((*MockAdapterServiceServer)(nil).EnablePort), arg0, arg1)
+}
+
+// GetExtValue mocks base method.
+func (m *MockAdapterServiceServer) GetExtValue(arg0 context.Context, arg1 *inter_container.GetExtValueMessage) (*common.ReturnValues, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetExtValue", arg0, arg1)
+	ret0, _ := ret[0].(*common.ReturnValues)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetExtValue indicates an expected call of GetExtValue.
+func (mr *MockAdapterServiceServerMockRecorder) GetExtValue(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetExtValue", reflect.TypeOf((*MockAdapterServiceServer)(nil).GetExtValue), arg0, arg1)
+}
+
+// GetHealthStatus mocks base method.
+func (m *MockAdapterServiceServer) GetHealthStatus(arg0 context.Context, arg1 *empty.Empty) (*voltha.HealthStatus, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetHealthStatus", arg0, arg1)
+	ret0, _ := ret[0].(*voltha.HealthStatus)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetHealthStatus indicates an expected call of GetHealthStatus.
+func (mr *MockAdapterServiceServerMockRecorder) GetHealthStatus(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHealthStatus", reflect.TypeOf((*MockAdapterServiceServer)(nil).GetHealthStatus), arg0, arg1)
+}
+
+// GetImageDownloadStatus mocks base method.
+func (m *MockAdapterServiceServer) GetImageDownloadStatus(arg0 context.Context, arg1 *inter_container.ImageDownloadMessage) (*voltha.ImageDownload, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetImageDownloadStatus", arg0, arg1)
+	ret0, _ := ret[0].(*voltha.ImageDownload)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetImageDownloadStatus indicates an expected call of GetImageDownloadStatus.
+func (mr *MockAdapterServiceServerMockRecorder) GetImageDownloadStatus(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetImageDownloadStatus", reflect.TypeOf((*MockAdapterServiceServer)(nil).GetImageDownloadStatus), arg0, arg1)
+}
+
+// GetOfpDeviceInfo mocks base method.
+func (m *MockAdapterServiceServer) GetOfpDeviceInfo(arg0 context.Context, arg1 *voltha.Device) (*inter_container.SwitchCapability, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetOfpDeviceInfo", arg0, arg1)
+	ret0, _ := ret[0].(*inter_container.SwitchCapability)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetOfpDeviceInfo indicates an expected call of GetOfpDeviceInfo.
+func (mr *MockAdapterServiceServerMockRecorder) GetOfpDeviceInfo(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOfpDeviceInfo", reflect.TypeOf((*MockAdapterServiceServer)(nil).GetOfpDeviceInfo), arg0, arg1)
+}
+
+// GetOnuImageStatus mocks base method.
+func (m *MockAdapterServiceServer) GetOnuImageStatus(arg0 context.Context, arg1 *voltha.DeviceImageRequest) (*voltha.DeviceImageResponse, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetOnuImageStatus", arg0, arg1)
+	ret0, _ := ret[0].(*voltha.DeviceImageResponse)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetOnuImageStatus indicates an expected call of GetOnuImageStatus.
+func (mr *MockAdapterServiceServerMockRecorder) GetOnuImageStatus(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOnuImageStatus", reflect.TypeOf((*MockAdapterServiceServer)(nil).GetOnuImageStatus), arg0, arg1)
+}
+
+// GetOnuImages mocks base method.
+func (m *MockAdapterServiceServer) GetOnuImages(arg0 context.Context, arg1 *common.ID) (*voltha.OnuImages, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetOnuImages", arg0, arg1)
+	ret0, _ := ret[0].(*voltha.OnuImages)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetOnuImages indicates an expected call of GetOnuImages.
+func (mr *MockAdapterServiceServerMockRecorder) GetOnuImages(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOnuImages", reflect.TypeOf((*MockAdapterServiceServer)(nil).GetOnuImages), arg0, arg1)
+}
+
+// GetSingleValue mocks base method.
+func (m *MockAdapterServiceServer) GetSingleValue(arg0 context.Context, arg1 *extension.SingleGetValueRequest) (*extension.SingleGetValueResponse, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetSingleValue", arg0, arg1)
+	ret0, _ := ret[0].(*extension.SingleGetValueResponse)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetSingleValue indicates an expected call of GetSingleValue.
+func (mr *MockAdapterServiceServerMockRecorder) GetSingleValue(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSingleValue", reflect.TypeOf((*MockAdapterServiceServer)(nil).GetSingleValue), arg0, arg1)
+}
+
+// ReEnableDevice mocks base method.
+func (m *MockAdapterServiceServer) ReEnableDevice(arg0 context.Context, arg1 *voltha.Device) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "ReEnableDevice", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// ReEnableDevice indicates an expected call of ReEnableDevice.
+func (mr *MockAdapterServiceServerMockRecorder) ReEnableDevice(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReEnableDevice", reflect.TypeOf((*MockAdapterServiceServer)(nil).ReEnableDevice), arg0, arg1)
+}
+
+// RebootDevice mocks base method.
+func (m *MockAdapterServiceServer) RebootDevice(arg0 context.Context, arg1 *voltha.Device) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "RebootDevice", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// RebootDevice indicates an expected call of RebootDevice.
+func (mr *MockAdapterServiceServerMockRecorder) RebootDevice(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RebootDevice", reflect.TypeOf((*MockAdapterServiceServer)(nil).RebootDevice), arg0, arg1)
+}
+
+// ReconcileDevice mocks base method.
+func (m *MockAdapterServiceServer) ReconcileDevice(arg0 context.Context, arg1 *voltha.Device) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "ReconcileDevice", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// ReconcileDevice indicates an expected call of ReconcileDevice.
+func (mr *MockAdapterServiceServerMockRecorder) ReconcileDevice(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReconcileDevice", reflect.TypeOf((*MockAdapterServiceServer)(nil).ReconcileDevice), arg0, arg1)
+}
+
+// RevertImageUpdate mocks base method.
+func (m *MockAdapterServiceServer) RevertImageUpdate(arg0 context.Context, arg1 *inter_container.ImageDownloadMessage) (*voltha.ImageDownload, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "RevertImageUpdate", arg0, arg1)
+	ret0, _ := ret[0].(*voltha.ImageDownload)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// RevertImageUpdate indicates an expected call of RevertImageUpdate.
+func (mr *MockAdapterServiceServerMockRecorder) RevertImageUpdate(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RevertImageUpdate", reflect.TypeOf((*MockAdapterServiceServer)(nil).RevertImageUpdate), arg0, arg1)
+}
+
+// SelfTestDevice mocks base method.
+func (m *MockAdapterServiceServer) SelfTestDevice(arg0 context.Context, arg1 *voltha.Device) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "SelfTestDevice", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// SelfTestDevice indicates an expected call of SelfTestDevice.
+func (mr *MockAdapterServiceServerMockRecorder) SelfTestDevice(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SelfTestDevice", reflect.TypeOf((*MockAdapterServiceServer)(nil).SelfTestDevice), arg0, arg1)
+}
+
+// SendPacketOut mocks base method.
+func (m *MockAdapterServiceServer) SendPacketOut(arg0 context.Context, arg1 *inter_container.PacketOut) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "SendPacketOut", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// SendPacketOut indicates an expected call of SendPacketOut.
+func (mr *MockAdapterServiceServerMockRecorder) SendPacketOut(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendPacketOut", reflect.TypeOf((*MockAdapterServiceServer)(nil).SendPacketOut), arg0, arg1)
+}
+
+// SetExtValue mocks base method.
+func (m *MockAdapterServiceServer) SetExtValue(arg0 context.Context, arg1 *inter_container.SetExtValueMessage) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "SetExtValue", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// SetExtValue indicates an expected call of SetExtValue.
+func (mr *MockAdapterServiceServerMockRecorder) SetExtValue(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetExtValue", reflect.TypeOf((*MockAdapterServiceServer)(nil).SetExtValue), arg0, arg1)
+}
+
+// SetSingleValue mocks base method.
+func (m *MockAdapterServiceServer) SetSingleValue(arg0 context.Context, arg1 *extension.SingleSetValueRequest) (*extension.SingleSetValueResponse, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "SetSingleValue", arg0, arg1)
+	ret0, _ := ret[0].(*extension.SingleSetValueResponse)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// SetSingleValue indicates an expected call of SetSingleValue.
+func (mr *MockAdapterServiceServerMockRecorder) SetSingleValue(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSingleValue", reflect.TypeOf((*MockAdapterServiceServer)(nil).SetSingleValue), arg0, arg1)
+}
+
+// SimulateAlarm mocks base method.
+func (m *MockAdapterServiceServer) SimulateAlarm(arg0 context.Context, arg1 *inter_container.SimulateAlarmMessage) (*common.OperationResp, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "SimulateAlarm", arg0, arg1)
+	ret0, _ := ret[0].(*common.OperationResp)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// SimulateAlarm indicates an expected call of SimulateAlarm.
+func (mr *MockAdapterServiceServerMockRecorder) SimulateAlarm(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SimulateAlarm", reflect.TypeOf((*MockAdapterServiceServer)(nil).SimulateAlarm), arg0, arg1)
+}
+
+// StartOmciTest mocks base method.
+func (m *MockAdapterServiceServer) StartOmciTest(arg0 context.Context, arg1 *inter_container.OMCITest) (*voltha.TestResponse, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "StartOmciTest", arg0, arg1)
+	ret0, _ := ret[0].(*voltha.TestResponse)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// StartOmciTest indicates an expected call of StartOmciTest.
+func (mr *MockAdapterServiceServerMockRecorder) StartOmciTest(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartOmciTest", reflect.TypeOf((*MockAdapterServiceServer)(nil).StartOmciTest), arg0, arg1)
+}
+
+// SuppressEvent mocks base method.
+func (m *MockAdapterServiceServer) SuppressEvent(arg0 context.Context, arg1 *voltha.EventFilter) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "SuppressEvent", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// SuppressEvent indicates an expected call of SuppressEvent.
+func (mr *MockAdapterServiceServerMockRecorder) SuppressEvent(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SuppressEvent", reflect.TypeOf((*MockAdapterServiceServer)(nil).SuppressEvent), arg0, arg1)
+}
+
+// UnSuppressEvent mocks base method.
+func (m *MockAdapterServiceServer) UnSuppressEvent(arg0 context.Context, arg1 *voltha.EventFilter) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "UnSuppressEvent", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// UnSuppressEvent indicates an expected call of UnSuppressEvent.
+func (mr *MockAdapterServiceServerMockRecorder) UnSuppressEvent(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnSuppressEvent", reflect.TypeOf((*MockAdapterServiceServer)(nil).UnSuppressEvent), arg0, arg1)
+}
+
+// UpdateFlowsBulk mocks base method.
+func (m *MockAdapterServiceServer) UpdateFlowsBulk(arg0 context.Context, arg1 *inter_container.BulkFlows) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "UpdateFlowsBulk", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// UpdateFlowsBulk indicates an expected call of UpdateFlowsBulk.
+func (mr *MockAdapterServiceServerMockRecorder) UpdateFlowsBulk(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateFlowsBulk", reflect.TypeOf((*MockAdapterServiceServer)(nil).UpdateFlowsBulk), arg0, arg1)
+}
+
+// UpdateFlowsIncrementally mocks base method.
+func (m *MockAdapterServiceServer) UpdateFlowsIncrementally(arg0 context.Context, arg1 *inter_container.IncrementalFlows) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "UpdateFlowsIncrementally", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// UpdateFlowsIncrementally indicates an expected call of UpdateFlowsIncrementally.
+func (mr *MockAdapterServiceServerMockRecorder) UpdateFlowsIncrementally(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateFlowsIncrementally", reflect.TypeOf((*MockAdapterServiceServer)(nil).UpdateFlowsIncrementally), arg0, arg1)
+}
+
+// UpdatePmConfig mocks base method.
+func (m *MockAdapterServiceServer) UpdatePmConfig(arg0 context.Context, arg1 *inter_container.PmConfigsInfo) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "UpdatePmConfig", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// UpdatePmConfig indicates an expected call of UpdatePmConfig.
+func (mr *MockAdapterServiceServerMockRecorder) UpdatePmConfig(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePmConfig", reflect.TypeOf((*MockAdapterServiceServer)(nil).UpdatePmConfig), arg0, arg1)
+}
+
+// MockOnuInterAdapterServiceClient is a mock of OnuInterAdapterServiceClient interface.
+type MockOnuInterAdapterServiceClient struct {
+	ctrl     *gomock.Controller
+	recorder *MockOnuInterAdapterServiceClientMockRecorder
+}
+
+// MockOnuInterAdapterServiceClientMockRecorder is the mock recorder for MockOnuInterAdapterServiceClient.
+type MockOnuInterAdapterServiceClientMockRecorder struct {
+	mock *MockOnuInterAdapterServiceClient
+}
+
+// NewMockOnuInterAdapterServiceClient creates a new mock instance.
+func NewMockOnuInterAdapterServiceClient(ctrl *gomock.Controller) *MockOnuInterAdapterServiceClient {
+	mock := &MockOnuInterAdapterServiceClient{ctrl: ctrl}
+	mock.recorder = &MockOnuInterAdapterServiceClientMockRecorder{mock}
+	return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockOnuInterAdapterServiceClient) EXPECT() *MockOnuInterAdapterServiceClientMockRecorder {
+	return m.recorder
+}
+
+// DeleteGemPort mocks base method.
+func (m *MockOnuInterAdapterServiceClient) DeleteGemPort(ctx context.Context, in *inter_container.DeleteGemPortMessage, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "DeleteGemPort", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// DeleteGemPort indicates an expected call of DeleteGemPort.
+func (mr *MockOnuInterAdapterServiceClientMockRecorder) DeleteGemPort(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteGemPort", reflect.TypeOf((*MockOnuInterAdapterServiceClient)(nil).DeleteGemPort), varargs...)
+}
+
+// DeleteTCont mocks base method.
+func (m *MockOnuInterAdapterServiceClient) DeleteTCont(ctx context.Context, in *inter_container.DeleteTcontMessage, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "DeleteTCont", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// DeleteTCont indicates an expected call of DeleteTCont.
+func (mr *MockOnuInterAdapterServiceClientMockRecorder) DeleteTCont(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteTCont", reflect.TypeOf((*MockOnuInterAdapterServiceClient)(nil).DeleteTCont), varargs...)
+}
+
+// DownloadTechProfile mocks base method.
+func (m *MockOnuInterAdapterServiceClient) DownloadTechProfile(ctx context.Context, in *inter_container.TechProfileDownloadMessage, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "DownloadTechProfile", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// DownloadTechProfile indicates an expected call of DownloadTechProfile.
+func (mr *MockOnuInterAdapterServiceClientMockRecorder) DownloadTechProfile(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DownloadTechProfile", reflect.TypeOf((*MockOnuInterAdapterServiceClient)(nil).DownloadTechProfile), varargs...)
+}
+
+// GetHealthStatus mocks base method.
+func (m *MockOnuInterAdapterServiceClient) GetHealthStatus(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*voltha.HealthStatus, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "GetHealthStatus", varargs...)
+	ret0, _ := ret[0].(*voltha.HealthStatus)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetHealthStatus indicates an expected call of GetHealthStatus.
+func (mr *MockOnuInterAdapterServiceClientMockRecorder) GetHealthStatus(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHealthStatus", reflect.TypeOf((*MockOnuInterAdapterServiceClient)(nil).GetHealthStatus), varargs...)
+}
+
+// OmciResponse mocks base method.
+func (m *MockOnuInterAdapterServiceClient) OmciResponse(ctx context.Context, in *inter_container.OmciMessage, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "OmciResponse", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// OmciResponse indicates an expected call of OmciResponse.
+func (mr *MockOnuInterAdapterServiceClientMockRecorder) OmciResponse(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OmciResponse", reflect.TypeOf((*MockOnuInterAdapterServiceClient)(nil).OmciResponse), varargs...)
+}
+
+// OnuIndication mocks base method.
+func (m *MockOnuInterAdapterServiceClient) OnuIndication(ctx context.Context, in *inter_container.OnuIndicationMessage, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "OnuIndication", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// OnuIndication indicates an expected call of OnuIndication.
+func (mr *MockOnuInterAdapterServiceClientMockRecorder) OnuIndication(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnuIndication", reflect.TypeOf((*MockOnuInterAdapterServiceClient)(nil).OnuIndication), varargs...)
+}
+
+// MockOnuInterAdapterServiceServer is a mock of OnuInterAdapterServiceServer interface.
+type MockOnuInterAdapterServiceServer struct {
+	ctrl     *gomock.Controller
+	recorder *MockOnuInterAdapterServiceServerMockRecorder
+}
+
+// MockOnuInterAdapterServiceServerMockRecorder is the mock recorder for MockOnuInterAdapterServiceServer.
+type MockOnuInterAdapterServiceServerMockRecorder struct {
+	mock *MockOnuInterAdapterServiceServer
+}
+
+// NewMockOnuInterAdapterServiceServer creates a new mock instance.
+func NewMockOnuInterAdapterServiceServer(ctrl *gomock.Controller) *MockOnuInterAdapterServiceServer {
+	mock := &MockOnuInterAdapterServiceServer{ctrl: ctrl}
+	mock.recorder = &MockOnuInterAdapterServiceServerMockRecorder{mock}
+	return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockOnuInterAdapterServiceServer) EXPECT() *MockOnuInterAdapterServiceServerMockRecorder {
+	return m.recorder
+}
+
+// DeleteGemPort mocks base method.
+func (m *MockOnuInterAdapterServiceServer) DeleteGemPort(arg0 context.Context, arg1 *inter_container.DeleteGemPortMessage) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "DeleteGemPort", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// DeleteGemPort indicates an expected call of DeleteGemPort.
+func (mr *MockOnuInterAdapterServiceServerMockRecorder) DeleteGemPort(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteGemPort", reflect.TypeOf((*MockOnuInterAdapterServiceServer)(nil).DeleteGemPort), arg0, arg1)
+}
+
+// DeleteTCont mocks base method.
+func (m *MockOnuInterAdapterServiceServer) DeleteTCont(arg0 context.Context, arg1 *inter_container.DeleteTcontMessage) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "DeleteTCont", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// DeleteTCont indicates an expected call of DeleteTCont.
+func (mr *MockOnuInterAdapterServiceServerMockRecorder) DeleteTCont(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteTCont", reflect.TypeOf((*MockOnuInterAdapterServiceServer)(nil).DeleteTCont), arg0, arg1)
+}
+
+// DownloadTechProfile mocks base method.
+func (m *MockOnuInterAdapterServiceServer) DownloadTechProfile(arg0 context.Context, arg1 *inter_container.TechProfileDownloadMessage) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "DownloadTechProfile", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// DownloadTechProfile indicates an expected call of DownloadTechProfile.
+func (mr *MockOnuInterAdapterServiceServerMockRecorder) DownloadTechProfile(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DownloadTechProfile", reflect.TypeOf((*MockOnuInterAdapterServiceServer)(nil).DownloadTechProfile), arg0, arg1)
+}
+
+// GetHealthStatus mocks base method.
+func (m *MockOnuInterAdapterServiceServer) GetHealthStatus(arg0 context.Context, arg1 *empty.Empty) (*voltha.HealthStatus, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetHealthStatus", arg0, arg1)
+	ret0, _ := ret[0].(*voltha.HealthStatus)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetHealthStatus indicates an expected call of GetHealthStatus.
+func (mr *MockOnuInterAdapterServiceServerMockRecorder) GetHealthStatus(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHealthStatus", reflect.TypeOf((*MockOnuInterAdapterServiceServer)(nil).GetHealthStatus), arg0, arg1)
+}
+
+// OmciResponse mocks base method.
+func (m *MockOnuInterAdapterServiceServer) OmciResponse(arg0 context.Context, arg1 *inter_container.OmciMessage) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "OmciResponse", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// OmciResponse indicates an expected call of OmciResponse.
+func (mr *MockOnuInterAdapterServiceServerMockRecorder) OmciResponse(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OmciResponse", reflect.TypeOf((*MockOnuInterAdapterServiceServer)(nil).OmciResponse), arg0, arg1)
+}
+
+// OnuIndication mocks base method.
+func (m *MockOnuInterAdapterServiceServer) OnuIndication(arg0 context.Context, arg1 *inter_container.OnuIndicationMessage) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "OnuIndication", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// OnuIndication indicates an expected call of OnuIndication.
+func (mr *MockOnuInterAdapterServiceServerMockRecorder) OnuIndication(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnuIndication", reflect.TypeOf((*MockOnuInterAdapterServiceServer)(nil).OnuIndication), arg0, arg1)
+}
+
+// MockOltInterAdapterServiceClient is a mock of OltInterAdapterServiceClient interface.
+type MockOltInterAdapterServiceClient struct {
+	ctrl     *gomock.Controller
+	recorder *MockOltInterAdapterServiceClientMockRecorder
+}
+
+// MockOltInterAdapterServiceClientMockRecorder is the mock recorder for MockOltInterAdapterServiceClient.
+type MockOltInterAdapterServiceClientMockRecorder struct {
+	mock *MockOltInterAdapterServiceClient
+}
+
+// NewMockOltInterAdapterServiceClient creates a new mock instance.
+func NewMockOltInterAdapterServiceClient(ctrl *gomock.Controller) *MockOltInterAdapterServiceClient {
+	mock := &MockOltInterAdapterServiceClient{ctrl: ctrl}
+	mock.recorder = &MockOltInterAdapterServiceClientMockRecorder{mock}
+	return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockOltInterAdapterServiceClient) EXPECT() *MockOltInterAdapterServiceClientMockRecorder {
+	return m.recorder
+}
+
+// GetHealthStatus mocks base method.
+func (m *MockOltInterAdapterServiceClient) GetHealthStatus(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*voltha.HealthStatus, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "GetHealthStatus", varargs...)
+	ret0, _ := ret[0].(*voltha.HealthStatus)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetHealthStatus indicates an expected call of GetHealthStatus.
+func (mr *MockOltInterAdapterServiceClientMockRecorder) GetHealthStatus(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHealthStatus", reflect.TypeOf((*MockOltInterAdapterServiceClient)(nil).GetHealthStatus), varargs...)
+}
+
+// GetTechProfileInstance mocks base method.
+func (m *MockOltInterAdapterServiceClient) GetTechProfileInstance(ctx context.Context, in *inter_container.TechProfileInstanceRequestMessage, opts ...grpc.CallOption) (*inter_container.TechProfileDownloadMessage, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "GetTechProfileInstance", varargs...)
+	ret0, _ := ret[0].(*inter_container.TechProfileDownloadMessage)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetTechProfileInstance indicates an expected call of GetTechProfileInstance.
+func (mr *MockOltInterAdapterServiceClientMockRecorder) GetTechProfileInstance(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTechProfileInstance", reflect.TypeOf((*MockOltInterAdapterServiceClient)(nil).GetTechProfileInstance), varargs...)
+}
+
+// ProxyOmciRequest mocks base method.
+func (m *MockOltInterAdapterServiceClient) ProxyOmciRequest(ctx context.Context, in *inter_container.OmciMessage, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "ProxyOmciRequest", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// ProxyOmciRequest indicates an expected call of ProxyOmciRequest.
+func (mr *MockOltInterAdapterServiceClientMockRecorder) ProxyOmciRequest(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProxyOmciRequest", reflect.TypeOf((*MockOltInterAdapterServiceClient)(nil).ProxyOmciRequest), varargs...)
+}
+
+// MockOltInterAdapterServiceServer is a mock of OltInterAdapterServiceServer interface.
+type MockOltInterAdapterServiceServer struct {
+	ctrl     *gomock.Controller
+	recorder *MockOltInterAdapterServiceServerMockRecorder
+}
+
+// MockOltInterAdapterServiceServerMockRecorder is the mock recorder for MockOltInterAdapterServiceServer.
+type MockOltInterAdapterServiceServerMockRecorder struct {
+	mock *MockOltInterAdapterServiceServer
+}
+
+// NewMockOltInterAdapterServiceServer creates a new mock instance.
+func NewMockOltInterAdapterServiceServer(ctrl *gomock.Controller) *MockOltInterAdapterServiceServer {
+	mock := &MockOltInterAdapterServiceServer{ctrl: ctrl}
+	mock.recorder = &MockOltInterAdapterServiceServerMockRecorder{mock}
+	return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockOltInterAdapterServiceServer) EXPECT() *MockOltInterAdapterServiceServerMockRecorder {
+	return m.recorder
+}
+
+// GetHealthStatus mocks base method.
+func (m *MockOltInterAdapterServiceServer) GetHealthStatus(arg0 context.Context, arg1 *empty.Empty) (*voltha.HealthStatus, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetHealthStatus", arg0, arg1)
+	ret0, _ := ret[0].(*voltha.HealthStatus)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetHealthStatus indicates an expected call of GetHealthStatus.
+func (mr *MockOltInterAdapterServiceServerMockRecorder) GetHealthStatus(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHealthStatus", reflect.TypeOf((*MockOltInterAdapterServiceServer)(nil).GetHealthStatus), arg0, arg1)
+}
+
+// GetTechProfileInstance mocks base method.
+func (m *MockOltInterAdapterServiceServer) GetTechProfileInstance(arg0 context.Context, arg1 *inter_container.TechProfileInstanceRequestMessage) (*inter_container.TechProfileDownloadMessage, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetTechProfileInstance", arg0, arg1)
+	ret0, _ := ret[0].(*inter_container.TechProfileDownloadMessage)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetTechProfileInstance indicates an expected call of GetTechProfileInstance.
+func (mr *MockOltInterAdapterServiceServerMockRecorder) GetTechProfileInstance(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTechProfileInstance", reflect.TypeOf((*MockOltInterAdapterServiceServer)(nil).GetTechProfileInstance), arg0, arg1)
+}
+
+// ProxyOmciRequest mocks base method.
+func (m *MockOltInterAdapterServiceServer) ProxyOmciRequest(arg0 context.Context, arg1 *inter_container.OmciMessage) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "ProxyOmciRequest", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// ProxyOmciRequest indicates an expected call of ProxyOmciRequest.
+func (mr *MockOltInterAdapterServiceServerMockRecorder) ProxyOmciRequest(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProxyOmciRequest", reflect.TypeOf((*MockOltInterAdapterServiceServer)(nil).ProxyOmciRequest), arg0, arg1)
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/mocks/grpc/mock_core_services.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/mocks/grpc/mock_core_services.go
new file mode 100644
index 0000000..49abae7
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/mocks/grpc/mock_core_services.go
@@ -0,0 +1,917 @@
+/*
+ * Copyright 2021-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Code generated by MockGen. DO NOT EDIT.
+// Source: core.pb.go
+
+package grpc
+
+import (
+	context "context"
+	reflect "reflect"
+
+	gomock "github.com/golang/mock/gomock"
+	empty "github.com/golang/protobuf/ptypes/empty"
+	common "github.com/opencord/voltha-protos/v5/go/common"
+	inter_container "github.com/opencord/voltha-protos/v5/go/inter_container"
+	voltha "github.com/opencord/voltha-protos/v5/go/voltha"
+	grpc "google.golang.org/grpc"
+)
+
+// MockCoreServiceClient is a mock of CoreServiceClient interface.
+type MockCoreServiceClient struct {
+	ctrl     *gomock.Controller
+	recorder *MockCoreServiceClientMockRecorder
+}
+
+// MockCoreServiceClientMockRecorder is the mock recorder for MockCoreServiceClient.
+type MockCoreServiceClientMockRecorder struct {
+	mock *MockCoreServiceClient
+}
+
+// NewMockCoreServiceClient creates a new mock instance.
+func NewMockCoreServiceClient(ctrl *gomock.Controller) *MockCoreServiceClient {
+	mock := &MockCoreServiceClient{ctrl: ctrl}
+	mock.recorder = &MockCoreServiceClientMockRecorder{mock}
+	return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockCoreServiceClient) EXPECT() *MockCoreServiceClientMockRecorder {
+	return m.recorder
+}
+
+// ChildDeviceDetected mocks base method.
+func (m *MockCoreServiceClient) ChildDeviceDetected(ctx context.Context, in *inter_container.DeviceDiscovery, opts ...grpc.CallOption) (*voltha.Device, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "ChildDeviceDetected", varargs...)
+	ret0, _ := ret[0].(*voltha.Device)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// ChildDeviceDetected indicates an expected call of ChildDeviceDetected.
+func (mr *MockCoreServiceClientMockRecorder) ChildDeviceDetected(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChildDeviceDetected", reflect.TypeOf((*MockCoreServiceClient)(nil).ChildDeviceDetected), varargs...)
+}
+
+// ChildDevicesDetected mocks base method.
+func (m *MockCoreServiceClient) ChildDevicesDetected(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "ChildDevicesDetected", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// ChildDevicesDetected indicates an expected call of ChildDevicesDetected.
+func (mr *MockCoreServiceClientMockRecorder) ChildDevicesDetected(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChildDevicesDetected", reflect.TypeOf((*MockCoreServiceClient)(nil).ChildDevicesDetected), varargs...)
+}
+
+// ChildDevicesLost mocks base method.
+func (m *MockCoreServiceClient) ChildDevicesLost(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "ChildDevicesLost", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// ChildDevicesLost indicates an expected call of ChildDevicesLost.
+func (mr *MockCoreServiceClientMockRecorder) ChildDevicesLost(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChildDevicesLost", reflect.TypeOf((*MockCoreServiceClient)(nil).ChildDevicesLost), varargs...)
+}
+
+// ChildrenStateUpdate mocks base method.
+func (m *MockCoreServiceClient) ChildrenStateUpdate(ctx context.Context, in *inter_container.DeviceStateFilter, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "ChildrenStateUpdate", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// ChildrenStateUpdate indicates an expected call of ChildrenStateUpdate.
+func (mr *MockCoreServiceClientMockRecorder) ChildrenStateUpdate(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChildrenStateUpdate", reflect.TypeOf((*MockCoreServiceClient)(nil).ChildrenStateUpdate), varargs...)
+}
+
+// DeleteAllPorts mocks base method.
+func (m *MockCoreServiceClient) DeleteAllPorts(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "DeleteAllPorts", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// DeleteAllPorts indicates an expected call of DeleteAllPorts.
+func (mr *MockCoreServiceClientMockRecorder) DeleteAllPorts(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAllPorts", reflect.TypeOf((*MockCoreServiceClient)(nil).DeleteAllPorts), varargs...)
+}
+
+// DevicePMConfigUpdate mocks base method.
+func (m *MockCoreServiceClient) DevicePMConfigUpdate(ctx context.Context, in *voltha.PmConfigs, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "DevicePMConfigUpdate", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// DevicePMConfigUpdate indicates an expected call of DevicePMConfigUpdate.
+func (mr *MockCoreServiceClientMockRecorder) DevicePMConfigUpdate(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DevicePMConfigUpdate", reflect.TypeOf((*MockCoreServiceClient)(nil).DevicePMConfigUpdate), varargs...)
+}
+
+// DeviceReasonUpdate mocks base method.
+func (m *MockCoreServiceClient) DeviceReasonUpdate(ctx context.Context, in *inter_container.DeviceReason, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "DeviceReasonUpdate", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// DeviceReasonUpdate indicates an expected call of DeviceReasonUpdate.
+func (mr *MockCoreServiceClientMockRecorder) DeviceReasonUpdate(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeviceReasonUpdate", reflect.TypeOf((*MockCoreServiceClient)(nil).DeviceReasonUpdate), varargs...)
+}
+
+// DeviceStateUpdate mocks base method.
+func (m *MockCoreServiceClient) DeviceStateUpdate(ctx context.Context, in *inter_container.DeviceStateFilter, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "DeviceStateUpdate", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// DeviceStateUpdate indicates an expected call of DeviceStateUpdate.
+func (mr *MockCoreServiceClientMockRecorder) DeviceStateUpdate(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeviceStateUpdate", reflect.TypeOf((*MockCoreServiceClient)(nil).DeviceStateUpdate), varargs...)
+}
+
+// DeviceUpdate mocks base method.
+func (m *MockCoreServiceClient) DeviceUpdate(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "DeviceUpdate", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// DeviceUpdate indicates an expected call of DeviceUpdate.
+func (mr *MockCoreServiceClientMockRecorder) DeviceUpdate(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeviceUpdate", reflect.TypeOf((*MockCoreServiceClient)(nil).DeviceUpdate), varargs...)
+}
+
+// GetChildDevice mocks base method.
+func (m *MockCoreServiceClient) GetChildDevice(ctx context.Context, in *inter_container.ChildDeviceFilter, opts ...grpc.CallOption) (*voltha.Device, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "GetChildDevice", varargs...)
+	ret0, _ := ret[0].(*voltha.Device)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetChildDevice indicates an expected call of GetChildDevice.
+func (mr *MockCoreServiceClientMockRecorder) GetChildDevice(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChildDevice", reflect.TypeOf((*MockCoreServiceClient)(nil).GetChildDevice), varargs...)
+}
+
+// GetChildDeviceWithProxyAddress mocks base method.
+func (m *MockCoreServiceClient) GetChildDeviceWithProxyAddress(ctx context.Context, in *voltha.Device_ProxyAddress, opts ...grpc.CallOption) (*voltha.Device, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "GetChildDeviceWithProxyAddress", varargs...)
+	ret0, _ := ret[0].(*voltha.Device)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetChildDeviceWithProxyAddress indicates an expected call of GetChildDeviceWithProxyAddress.
+func (mr *MockCoreServiceClientMockRecorder) GetChildDeviceWithProxyAddress(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChildDeviceWithProxyAddress", reflect.TypeOf((*MockCoreServiceClient)(nil).GetChildDeviceWithProxyAddress), varargs...)
+}
+
+// GetChildDevices mocks base method.
+func (m *MockCoreServiceClient) GetChildDevices(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*voltha.Devices, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "GetChildDevices", varargs...)
+	ret0, _ := ret[0].(*voltha.Devices)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetChildDevices indicates an expected call of GetChildDevices.
+func (mr *MockCoreServiceClientMockRecorder) GetChildDevices(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChildDevices", reflect.TypeOf((*MockCoreServiceClient)(nil).GetChildDevices), varargs...)
+}
+
+// GetDevice mocks base method.
+func (m *MockCoreServiceClient) GetDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*voltha.Device, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "GetDevice", varargs...)
+	ret0, _ := ret[0].(*voltha.Device)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetDevice indicates an expected call of GetDevice.
+func (mr *MockCoreServiceClientMockRecorder) GetDevice(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDevice", reflect.TypeOf((*MockCoreServiceClient)(nil).GetDevice), varargs...)
+}
+
+// GetDevicePort mocks base method.
+func (m *MockCoreServiceClient) GetDevicePort(ctx context.Context, in *inter_container.PortFilter, opts ...grpc.CallOption) (*voltha.Port, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "GetDevicePort", varargs...)
+	ret0, _ := ret[0].(*voltha.Port)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetDevicePort indicates an expected call of GetDevicePort.
+func (mr *MockCoreServiceClientMockRecorder) GetDevicePort(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDevicePort", reflect.TypeOf((*MockCoreServiceClient)(nil).GetDevicePort), varargs...)
+}
+
+// GetHealthStatus mocks base method.
+func (m *MockCoreServiceClient) GetHealthStatus(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*voltha.HealthStatus, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "GetHealthStatus", varargs...)
+	ret0, _ := ret[0].(*voltha.HealthStatus)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetHealthStatus indicates an expected call of GetHealthStatus.
+func (mr *MockCoreServiceClientMockRecorder) GetHealthStatus(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHealthStatus", reflect.TypeOf((*MockCoreServiceClient)(nil).GetHealthStatus), varargs...)
+}
+
+// GetPorts mocks base method.
+func (m *MockCoreServiceClient) GetPorts(ctx context.Context, in *inter_container.PortFilter, opts ...grpc.CallOption) (*voltha.Ports, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "GetPorts", varargs...)
+	ret0, _ := ret[0].(*voltha.Ports)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetPorts indicates an expected call of GetPorts.
+func (mr *MockCoreServiceClientMockRecorder) GetPorts(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPorts", reflect.TypeOf((*MockCoreServiceClient)(nil).GetPorts), varargs...)
+}
+
+// ListDevicePorts mocks base method.
+func (m *MockCoreServiceClient) ListDevicePorts(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*voltha.Ports, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "ListDevicePorts", varargs...)
+	ret0, _ := ret[0].(*voltha.Ports)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// ListDevicePorts indicates an expected call of ListDevicePorts.
+func (mr *MockCoreServiceClientMockRecorder) ListDevicePorts(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListDevicePorts", reflect.TypeOf((*MockCoreServiceClient)(nil).ListDevicePorts), varargs...)
+}
+
+// PortCreated mocks base method.
+func (m *MockCoreServiceClient) PortCreated(ctx context.Context, in *voltha.Port, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "PortCreated", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// PortCreated indicates an expected call of PortCreated.
+func (mr *MockCoreServiceClientMockRecorder) PortCreated(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PortCreated", reflect.TypeOf((*MockCoreServiceClient)(nil).PortCreated), varargs...)
+}
+
+// PortStateUpdate mocks base method.
+func (m *MockCoreServiceClient) PortStateUpdate(ctx context.Context, in *inter_container.PortState, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "PortStateUpdate", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// PortStateUpdate indicates an expected call of PortStateUpdate.
+func (mr *MockCoreServiceClientMockRecorder) PortStateUpdate(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PortStateUpdate", reflect.TypeOf((*MockCoreServiceClient)(nil).PortStateUpdate), varargs...)
+}
+
+// PortsStateUpdate mocks base method.
+func (m *MockCoreServiceClient) PortsStateUpdate(ctx context.Context, in *inter_container.PortStateFilter, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "PortsStateUpdate", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// PortsStateUpdate indicates an expected call of PortsStateUpdate.
+func (mr *MockCoreServiceClientMockRecorder) PortsStateUpdate(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PortsStateUpdate", reflect.TypeOf((*MockCoreServiceClient)(nil).PortsStateUpdate), varargs...)
+}
+
+// ReconcileChildDevices mocks base method.
+func (m *MockCoreServiceClient) ReconcileChildDevices(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "ReconcileChildDevices", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// ReconcileChildDevices indicates an expected call of ReconcileChildDevices.
+func (mr *MockCoreServiceClientMockRecorder) ReconcileChildDevices(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReconcileChildDevices", reflect.TypeOf((*MockCoreServiceClient)(nil).ReconcileChildDevices), varargs...)
+}
+
+// RegisterAdapter mocks base method.
+func (m *MockCoreServiceClient) RegisterAdapter(ctx context.Context, in *inter_container.AdapterRegistration, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "RegisterAdapter", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// RegisterAdapter indicates an expected call of RegisterAdapter.
+func (mr *MockCoreServiceClientMockRecorder) RegisterAdapter(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterAdapter", reflect.TypeOf((*MockCoreServiceClient)(nil).RegisterAdapter), varargs...)
+}
+
+// SendPacketIn mocks base method.
+func (m *MockCoreServiceClient) SendPacketIn(ctx context.Context, in *inter_container.PacketIn, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "SendPacketIn", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// SendPacketIn indicates an expected call of SendPacketIn.
+func (mr *MockCoreServiceClientMockRecorder) SendPacketIn(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendPacketIn", reflect.TypeOf((*MockCoreServiceClient)(nil).SendPacketIn), varargs...)
+}
+
+// UpdateImageDownload mocks base method.
+func (m *MockCoreServiceClient) UpdateImageDownload(ctx context.Context, in *voltha.ImageDownload, opts ...grpc.CallOption) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	varargs := []interface{}{ctx, in}
+	for _, a := range opts {
+		varargs = append(varargs, a)
+	}
+	ret := m.ctrl.Call(m, "UpdateImageDownload", varargs...)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// UpdateImageDownload indicates an expected call of UpdateImageDownload.
+func (mr *MockCoreServiceClientMockRecorder) UpdateImageDownload(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	varargs := append([]interface{}{ctx, in}, opts...)
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateImageDownload", reflect.TypeOf((*MockCoreServiceClient)(nil).UpdateImageDownload), varargs...)
+}
+
+// MockCoreServiceServer is a mock of CoreServiceServer interface.
+type MockCoreServiceServer struct {
+	ctrl     *gomock.Controller
+	recorder *MockCoreServiceServerMockRecorder
+}
+
+// MockCoreServiceServerMockRecorder is the mock recorder for MockCoreServiceServer.
+type MockCoreServiceServerMockRecorder struct {
+	mock *MockCoreServiceServer
+}
+
+// NewMockCoreServiceServer creates a new mock instance.
+func NewMockCoreServiceServer(ctrl *gomock.Controller) *MockCoreServiceServer {
+	mock := &MockCoreServiceServer{ctrl: ctrl}
+	mock.recorder = &MockCoreServiceServerMockRecorder{mock}
+	return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockCoreServiceServer) EXPECT() *MockCoreServiceServerMockRecorder {
+	return m.recorder
+}
+
+// ChildDeviceDetected mocks base method.
+func (m *MockCoreServiceServer) ChildDeviceDetected(arg0 context.Context, arg1 *inter_container.DeviceDiscovery) (*voltha.Device, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "ChildDeviceDetected", arg0, arg1)
+	ret0, _ := ret[0].(*voltha.Device)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// ChildDeviceDetected indicates an expected call of ChildDeviceDetected.
+func (mr *MockCoreServiceServerMockRecorder) ChildDeviceDetected(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChildDeviceDetected", reflect.TypeOf((*MockCoreServiceServer)(nil).ChildDeviceDetected), arg0, arg1)
+}
+
+// ChildDevicesDetected mocks base method.
+func (m *MockCoreServiceServer) ChildDevicesDetected(arg0 context.Context, arg1 *common.ID) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "ChildDevicesDetected", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// ChildDevicesDetected indicates an expected call of ChildDevicesDetected.
+func (mr *MockCoreServiceServerMockRecorder) ChildDevicesDetected(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChildDevicesDetected", reflect.TypeOf((*MockCoreServiceServer)(nil).ChildDevicesDetected), arg0, arg1)
+}
+
+// ChildDevicesLost mocks base method.
+func (m *MockCoreServiceServer) ChildDevicesLost(arg0 context.Context, arg1 *common.ID) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "ChildDevicesLost", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// ChildDevicesLost indicates an expected call of ChildDevicesLost.
+func (mr *MockCoreServiceServerMockRecorder) ChildDevicesLost(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChildDevicesLost", reflect.TypeOf((*MockCoreServiceServer)(nil).ChildDevicesLost), arg0, arg1)
+}
+
+// ChildrenStateUpdate mocks base method.
+func (m *MockCoreServiceServer) ChildrenStateUpdate(arg0 context.Context, arg1 *inter_container.DeviceStateFilter) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "ChildrenStateUpdate", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// ChildrenStateUpdate indicates an expected call of ChildrenStateUpdate.
+func (mr *MockCoreServiceServerMockRecorder) ChildrenStateUpdate(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChildrenStateUpdate", reflect.TypeOf((*MockCoreServiceServer)(nil).ChildrenStateUpdate), arg0, arg1)
+}
+
+// DeleteAllPorts mocks base method.
+func (m *MockCoreServiceServer) DeleteAllPorts(arg0 context.Context, arg1 *common.ID) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "DeleteAllPorts", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// DeleteAllPorts indicates an expected call of DeleteAllPorts.
+func (mr *MockCoreServiceServerMockRecorder) DeleteAllPorts(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAllPorts", reflect.TypeOf((*MockCoreServiceServer)(nil).DeleteAllPorts), arg0, arg1)
+}
+
+// DevicePMConfigUpdate mocks base method.
+func (m *MockCoreServiceServer) DevicePMConfigUpdate(arg0 context.Context, arg1 *voltha.PmConfigs) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "DevicePMConfigUpdate", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// DevicePMConfigUpdate indicates an expected call of DevicePMConfigUpdate.
+func (mr *MockCoreServiceServerMockRecorder) DevicePMConfigUpdate(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DevicePMConfigUpdate", reflect.TypeOf((*MockCoreServiceServer)(nil).DevicePMConfigUpdate), arg0, arg1)
+}
+
+// DeviceReasonUpdate mocks base method.
+func (m *MockCoreServiceServer) DeviceReasonUpdate(arg0 context.Context, arg1 *inter_container.DeviceReason) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "DeviceReasonUpdate", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// DeviceReasonUpdate indicates an expected call of DeviceReasonUpdate.
+func (mr *MockCoreServiceServerMockRecorder) DeviceReasonUpdate(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeviceReasonUpdate", reflect.TypeOf((*MockCoreServiceServer)(nil).DeviceReasonUpdate), arg0, arg1)
+}
+
+// DeviceStateUpdate mocks base method.
+func (m *MockCoreServiceServer) DeviceStateUpdate(arg0 context.Context, arg1 *inter_container.DeviceStateFilter) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "DeviceStateUpdate", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// DeviceStateUpdate indicates an expected call of DeviceStateUpdate.
+func (mr *MockCoreServiceServerMockRecorder) DeviceStateUpdate(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeviceStateUpdate", reflect.TypeOf((*MockCoreServiceServer)(nil).DeviceStateUpdate), arg0, arg1)
+}
+
+// DeviceUpdate mocks base method.
+func (m *MockCoreServiceServer) DeviceUpdate(arg0 context.Context, arg1 *voltha.Device) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "DeviceUpdate", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// DeviceUpdate indicates an expected call of DeviceUpdate.
+func (mr *MockCoreServiceServerMockRecorder) DeviceUpdate(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeviceUpdate", reflect.TypeOf((*MockCoreServiceServer)(nil).DeviceUpdate), arg0, arg1)
+}
+
+// GetChildDevice mocks base method.
+func (m *MockCoreServiceServer) GetChildDevice(arg0 context.Context, arg1 *inter_container.ChildDeviceFilter) (*voltha.Device, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetChildDevice", arg0, arg1)
+	ret0, _ := ret[0].(*voltha.Device)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetChildDevice indicates an expected call of GetChildDevice.
+func (mr *MockCoreServiceServerMockRecorder) GetChildDevice(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChildDevice", reflect.TypeOf((*MockCoreServiceServer)(nil).GetChildDevice), arg0, arg1)
+}
+
+// GetChildDeviceWithProxyAddress mocks base method.
+func (m *MockCoreServiceServer) GetChildDeviceWithProxyAddress(arg0 context.Context, arg1 *voltha.Device_ProxyAddress) (*voltha.Device, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetChildDeviceWithProxyAddress", arg0, arg1)
+	ret0, _ := ret[0].(*voltha.Device)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetChildDeviceWithProxyAddress indicates an expected call of GetChildDeviceWithProxyAddress.
+func (mr *MockCoreServiceServerMockRecorder) GetChildDeviceWithProxyAddress(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChildDeviceWithProxyAddress", reflect.TypeOf((*MockCoreServiceServer)(nil).GetChildDeviceWithProxyAddress), arg0, arg1)
+}
+
+// GetChildDevices mocks base method.
+func (m *MockCoreServiceServer) GetChildDevices(arg0 context.Context, arg1 *common.ID) (*voltha.Devices, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetChildDevices", arg0, arg1)
+	ret0, _ := ret[0].(*voltha.Devices)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetChildDevices indicates an expected call of GetChildDevices.
+func (mr *MockCoreServiceServerMockRecorder) GetChildDevices(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChildDevices", reflect.TypeOf((*MockCoreServiceServer)(nil).GetChildDevices), arg0, arg1)
+}
+
+// GetDevice mocks base method.
+func (m *MockCoreServiceServer) GetDevice(arg0 context.Context, arg1 *common.ID) (*voltha.Device, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetDevice", arg0, arg1)
+	ret0, _ := ret[0].(*voltha.Device)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetDevice indicates an expected call of GetDevice.
+func (mr *MockCoreServiceServerMockRecorder) GetDevice(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDevice", reflect.TypeOf((*MockCoreServiceServer)(nil).GetDevice), arg0, arg1)
+}
+
+// GetDevicePort mocks base method.
+func (m *MockCoreServiceServer) GetDevicePort(arg0 context.Context, arg1 *inter_container.PortFilter) (*voltha.Port, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetDevicePort", arg0, arg1)
+	ret0, _ := ret[0].(*voltha.Port)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetDevicePort indicates an expected call of GetDevicePort.
+func (mr *MockCoreServiceServerMockRecorder) GetDevicePort(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDevicePort", reflect.TypeOf((*MockCoreServiceServer)(nil).GetDevicePort), arg0, arg1)
+}
+
+// GetHealthStatus mocks base method.
+func (m *MockCoreServiceServer) GetHealthStatus(arg0 context.Context, arg1 *empty.Empty) (*voltha.HealthStatus, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetHealthStatus", arg0, arg1)
+	ret0, _ := ret[0].(*voltha.HealthStatus)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetHealthStatus indicates an expected call of GetHealthStatus.
+func (mr *MockCoreServiceServerMockRecorder) GetHealthStatus(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHealthStatus", reflect.TypeOf((*MockCoreServiceServer)(nil).GetHealthStatus), arg0, arg1)
+}
+
+// GetPorts mocks base method.
+func (m *MockCoreServiceServer) GetPorts(arg0 context.Context, arg1 *inter_container.PortFilter) (*voltha.Ports, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "GetPorts", arg0, arg1)
+	ret0, _ := ret[0].(*voltha.Ports)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// GetPorts indicates an expected call of GetPorts.
+func (mr *MockCoreServiceServerMockRecorder) GetPorts(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPorts", reflect.TypeOf((*MockCoreServiceServer)(nil).GetPorts), arg0, arg1)
+}
+
+// ListDevicePorts mocks base method.
+func (m *MockCoreServiceServer) ListDevicePorts(arg0 context.Context, arg1 *common.ID) (*voltha.Ports, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "ListDevicePorts", arg0, arg1)
+	ret0, _ := ret[0].(*voltha.Ports)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// ListDevicePorts indicates an expected call of ListDevicePorts.
+func (mr *MockCoreServiceServerMockRecorder) ListDevicePorts(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListDevicePorts", reflect.TypeOf((*MockCoreServiceServer)(nil).ListDevicePorts), arg0, arg1)
+}
+
+// PortCreated mocks base method.
+func (m *MockCoreServiceServer) PortCreated(arg0 context.Context, arg1 *voltha.Port) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "PortCreated", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// PortCreated indicates an expected call of PortCreated.
+func (mr *MockCoreServiceServerMockRecorder) PortCreated(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PortCreated", reflect.TypeOf((*MockCoreServiceServer)(nil).PortCreated), arg0, arg1)
+}
+
+// PortStateUpdate mocks base method.
+func (m *MockCoreServiceServer) PortStateUpdate(arg0 context.Context, arg1 *inter_container.PortState) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "PortStateUpdate", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// PortStateUpdate indicates an expected call of PortStateUpdate.
+func (mr *MockCoreServiceServerMockRecorder) PortStateUpdate(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PortStateUpdate", reflect.TypeOf((*MockCoreServiceServer)(nil).PortStateUpdate), arg0, arg1)
+}
+
+// PortsStateUpdate mocks base method.
+func (m *MockCoreServiceServer) PortsStateUpdate(arg0 context.Context, arg1 *inter_container.PortStateFilter) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "PortsStateUpdate", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// PortsStateUpdate indicates an expected call of PortsStateUpdate.
+func (mr *MockCoreServiceServerMockRecorder) PortsStateUpdate(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PortsStateUpdate", reflect.TypeOf((*MockCoreServiceServer)(nil).PortsStateUpdate), arg0, arg1)
+}
+
+// ReconcileChildDevices mocks base method.
+func (m *MockCoreServiceServer) ReconcileChildDevices(arg0 context.Context, arg1 *common.ID) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "ReconcileChildDevices", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// ReconcileChildDevices indicates an expected call of ReconcileChildDevices.
+func (mr *MockCoreServiceServerMockRecorder) ReconcileChildDevices(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReconcileChildDevices", reflect.TypeOf((*MockCoreServiceServer)(nil).ReconcileChildDevices), arg0, arg1)
+}
+
+// RegisterAdapter mocks base method.
+func (m *MockCoreServiceServer) RegisterAdapter(arg0 context.Context, arg1 *inter_container.AdapterRegistration) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "RegisterAdapter", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// RegisterAdapter indicates an expected call of RegisterAdapter.
+func (mr *MockCoreServiceServerMockRecorder) RegisterAdapter(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterAdapter", reflect.TypeOf((*MockCoreServiceServer)(nil).RegisterAdapter), arg0, arg1)
+}
+
+// SendPacketIn mocks base method.
+func (m *MockCoreServiceServer) SendPacketIn(arg0 context.Context, arg1 *inter_container.PacketIn) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "SendPacketIn", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// SendPacketIn indicates an expected call of SendPacketIn.
+func (mr *MockCoreServiceServerMockRecorder) SendPacketIn(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendPacketIn", reflect.TypeOf((*MockCoreServiceServer)(nil).SendPacketIn), arg0, arg1)
+}
+
+// UpdateImageDownload mocks base method.
+func (m *MockCoreServiceServer) UpdateImageDownload(arg0 context.Context, arg1 *voltha.ImageDownload) (*empty.Empty, error) {
+	m.ctrl.T.Helper()
+	ret := m.ctrl.Call(m, "UpdateImageDownload", arg0, arg1)
+	ret0, _ := ret[0].(*empty.Empty)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+// UpdateImageDownload indicates an expected call of UpdateImageDownload.
+func (mr *MockCoreServiceServerMockRecorder) UpdateImageDownload(arg0, arg1 interface{}) *gomock.Call {
+	mr.mock.ctrl.T.Helper()
+	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateImageDownload", reflect.TypeOf((*MockCoreServiceServer)(nil).UpdateImageDownload), arg0, arg1)
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/mocks/grpc/mock_grpc_server.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/mocks/grpc/mock_grpc_server.go
new file mode 100644
index 0000000..789fc8b
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/mocks/grpc/mock_grpc_server.go
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2021-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package grpc
+
+import (
+	"context"
+	"strconv"
+
+	vgrpc "github.com/opencord/voltha-lib-go/v7/pkg/grpc"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+	"github.com/opencord/voltha-lib-go/v7/pkg/probe"
+	"github.com/opencord/voltha-protos/v5/go/adapter_services"
+	"github.com/opencord/voltha-protos/v5/go/core"
+	"github.com/phayes/freeport"
+	"google.golang.org/grpc"
+)
+
+const (
+	mockGrpcServer = "mock-grpc-server"
+)
+
+type MockGRPCServer struct {
+	ApiEndpoint string
+	server      *vgrpc.GrpcServer
+	probe       *probe.Probe
+}
+
+func NewMockGRPCServer(ctx context.Context) (*MockGRPCServer, error) {
+	grpcPort, err := freeport.GetFreePort()
+	if err != nil {
+		return nil, err
+	}
+	s := &MockGRPCServer{
+		ApiEndpoint: "127.0.0.1:" + strconv.Itoa(grpcPort),
+		probe:       &probe.Probe{},
+	}
+	probePort, err := freeport.GetFreePort()
+	if err != nil {
+		return nil, err
+	}
+	probeEndpoint := "127.0.0.1:" + strconv.Itoa(probePort)
+	go s.probe.ListenAndServe(ctx, probeEndpoint)
+	s.probe.RegisterService(ctx, mockGrpcServer)
+	s.server = vgrpc.NewGrpcServer(s.ApiEndpoint, nil, false, s.probe)
+
+	logger.Infow(ctx, "mock-grpc-server-created", log.Fields{"endpoint": s.ApiEndpoint})
+	return s, nil
+}
+
+func (s *MockGRPCServer) AddCoreService(ctx context.Context, srv core.CoreServiceServer) {
+	s.server.AddService(func(server *grpc.Server) {
+		core.RegisterCoreServiceServer(server, srv)
+	})
+}
+
+func (s *MockGRPCServer) AddAdapterService(ctx context.Context, srv adapter_services.AdapterServiceServer) {
+	s.server.AddService(func(server *grpc.Server) {
+		adapter_services.RegisterAdapterServiceServer(server, srv)
+	})
+}
+
+func (s *MockGRPCServer) Start(ctx context.Context) {
+	s.probe.UpdateStatus(ctx, mockGrpcServer, probe.ServiceStatusRunning)
+	s.server.Start(ctx)
+	s.probe.UpdateStatus(ctx, mockGrpcServer, probe.ServiceStatusStopped)
+}
+
+func (s *MockGRPCServer) Stop() {
+	if s.server != nil {
+		s.server.Stop()
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/pmmetrics/performance_metrics.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/pmmetrics/performance_metrics.go
similarity index 97%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/pmmetrics/performance_metrics.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/pmmetrics/performance_metrics.go
index 699e8f0..1f44fed 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/pmmetrics/performance_metrics.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/pmmetrics/performance_metrics.go
@@ -17,7 +17,7 @@
 package pmmetrics
 
 import (
-	"github.com/opencord/voltha-protos/v4/go/voltha"
+	"github.com/opencord/voltha-protos/v5/go/voltha"
 )
 
 // PmMetrics structure holds metric and device info
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/ponresourcemanager/common.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/ponresourcemanager/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/ponresourcemanager/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/ponresourcemanager/common.go
index 95ae664..b311bb3 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/ponresourcemanager/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/ponresourcemanager/common.go
@@ -16,7 +16,7 @@
 package ponresourcemanager
 
 import (
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
 )
 
 var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/ponresourcemanager/ponresourcemanager.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/ponresourcemanager/ponresourcemanager.go
similarity index 99%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/ponresourcemanager/ponresourcemanager.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/ponresourcemanager/ponresourcemanager.go
index 3970711..ba67aeb 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/ponresourcemanager/ponresourcemanager.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/ponresourcemanager/ponresourcemanager.go
@@ -25,9 +25,9 @@
 	"time"
 
 	bitmap "github.com/boljen/go-bitmap"
-	"github.com/opencord/voltha-lib-go/v6/pkg/db"
-	"github.com/opencord/voltha-lib-go/v6/pkg/db/kvstore"
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
+	"github.com/opencord/voltha-lib-go/v7/pkg/db"
+	"github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
 )
 
 const (
@@ -38,7 +38,7 @@
 	GEMPORT_ID = "GEMPORT_ID"
 	FLOW_ID    = "FLOW_ID"
 
-	//Constants for passing command line arugments
+	//Constants for passing command line arguments
 	OLT_MODEL_ARG = "--olt_model"
 
 	PATH_PREFIX = "%s/resource_manager/{%s}"
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/probe/common.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/probe/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/common.go
index c812019..6508fd4 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/probe/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/common.go
@@ -16,7 +16,7 @@
 package probe
 
 import (
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
 )
 
 var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/probe/probe.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/probe.go
similarity index 99%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/probe/probe.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/probe.go
index 3af4bc8..84a2d5f 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/probe/probe.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/probe.go
@@ -18,9 +18,10 @@
 import (
 	"context"
 	"fmt"
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
 	"net/http"
 	"sync"
+
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
 )
 
 // ProbeContextKey used to fetch the Probe instance from a context
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/techprofile/4QueueHybridProfileMap1.json b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/techprofile/4QueueHybridProfileMap1.json
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/techprofile/4QueueHybridProfileMap1.json
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/techprofile/4QueueHybridProfileMap1.json
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/techprofile/README.md b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/techprofile/README.md
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/techprofile/README.md
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/techprofile/README.md
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/techprofile/SingleQueueEponProfile.json b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/techprofile/SingleQueueEponProfile.json
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/techprofile/SingleQueueEponProfile.json
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/techprofile/SingleQueueEponProfile.json
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/techprofile/common.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/techprofile/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/techprofile/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/techprofile/common.go
index d2e06e4..df1b877 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/techprofile/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/techprofile/common.go
@@ -16,7 +16,7 @@
 package techprofile
 
 import (
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
 )
 
 var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/techprofile/config.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/techprofile/config.go
similarity index 98%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/techprofile/config.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/techprofile/config.go
index 394bff6..17980fa 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/techprofile/config.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/techprofile/config.go
@@ -17,8 +17,9 @@
 
 import (
 	"fmt"
-	"github.com/opencord/voltha-lib-go/v6/pkg/db"
 	"time"
+
+	"github.com/opencord/voltha-lib-go/v7/pkg/db"
 )
 
 // tech profile default constants
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/techprofile/tech_profile.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/techprofile/tech_profile.go
similarity index 99%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/techprofile/tech_profile.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/techprofile/tech_profile.go
index 50b3688..32f73b9 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/techprofile/tech_profile.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/techprofile/tech_profile.go
@@ -21,19 +21,18 @@
 	"context"
 	"errors"
 	"fmt"
-	"github.com/gogo/protobuf/proto"
-	"github.com/golang/protobuf/jsonpb"
-	"github.com/opencord/voltha-protos/v4/go/openolt"
 	"regexp"
 	"strconv"
 	"sync"
 	"time"
 
-	"github.com/opencord/voltha-lib-go/v6/pkg/db"
-
-	"github.com/opencord/voltha-lib-go/v6/pkg/db/kvstore"
-	"github.com/opencord/voltha-lib-go/v6/pkg/log"
-	tp_pb "github.com/opencord/voltha-protos/v4/go/tech_profile"
+	"github.com/golang/protobuf/jsonpb"
+	"github.com/golang/protobuf/proto"
+	"github.com/opencord/voltha-lib-go/v7/pkg/db"
+	"github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+	"github.com/opencord/voltha-protos/v5/go/openolt"
+	tp_pb "github.com/opencord/voltha-protos/v5/go/tech_profile"
 )
 
 // Interface to pon resource manager APIs
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/techprofile/tech_profile_if.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/techprofile/tech_profile_if.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/techprofile/tech_profile_if.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/techprofile/tech_profile_if.go
index a455327..89a66b2 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/techprofile/tech_profile_if.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/techprofile/tech_profile_if.go
@@ -18,8 +18,9 @@
 
 import (
 	"context"
-	"github.com/opencord/voltha-lib-go/v6/pkg/db"
-	tp_pb "github.com/opencord/voltha-protos/v4/go/tech_profile"
+
+	"github.com/opencord/voltha-lib-go/v7/pkg/db"
+	tp_pb "github.com/opencord/voltha-protos/v5/go/tech_profile"
 )
 
 type TechProfileIf interface {
diff --git a/vendor/github.com/opencord/voltha-lib-go/v6/pkg/version/version.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/version/version.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v6/pkg/version/version.go
rename to vendor/github.com/opencord/voltha-lib-go/v7/pkg/version/version.go
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/inter_container/inter_container.pb.go b/vendor/github.com/opencord/voltha-protos/v4/go/inter_container/inter_container.pb.go
deleted file mode 100644
index 403d9e6..0000000
--- a/vendor/github.com/opencord/voltha-protos/v4/go/inter_container/inter_container.pb.go
+++ /dev/null
@@ -1,1681 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: voltha_protos/inter_container.proto
-
-package inter_container
-
-import (
-	fmt "fmt"
-	proto "github.com/golang/protobuf/proto"
-	any "github.com/golang/protobuf/ptypes/any"
-	timestamp "github.com/golang/protobuf/ptypes/timestamp"
-	common "github.com/opencord/voltha-protos/v4/go/common"
-	openflow_13 "github.com/opencord/voltha-protos/v4/go/openflow_13"
-	tech_profile "github.com/opencord/voltha-protos/v4/go/tech_profile"
-	voltha "github.com/opencord/voltha-protos/v4/go/voltha"
-	math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-// ID from public import voltha_protos/common.proto
-type ID = common.ID
-
-// IDs from public import voltha_protos/common.proto
-type IDs = common.IDs
-
-// AdminState from public import voltha_protos/common.proto
-type AdminState = common.AdminState
-
-// OperStatus from public import voltha_protos/common.proto
-type OperStatus = common.OperStatus
-
-// ConnectStatus from public import voltha_protos/common.proto
-type ConnectStatus = common.ConnectStatus
-
-// OperationResp from public import voltha_protos/common.proto
-type OperationResp = common.OperationResp
-
-// ValueType from public import voltha_protos/common.proto
-type ValueType = common.ValueType
-
-// ValueSpecifier from public import voltha_protos/common.proto
-type ValueSpecifier = common.ValueSpecifier
-
-// ReturnValues from public import voltha_protos/common.proto
-type ReturnValues = common.ReturnValues
-
-// TestModeKeys from public import voltha_protos/common.proto
-type TestModeKeys = common.TestModeKeys
-
-var TestModeKeys_name = common.TestModeKeys_name
-var TestModeKeys_value = common.TestModeKeys_value
-
-const TestModeKeys_api_test = TestModeKeys(common.TestModeKeys_api_test)
-
-// AdminState_Types from public import voltha_protos/common.proto
-type AdminState_Types = common.AdminState_Types
-
-var AdminState_Types_name = common.AdminState_Types_name
-var AdminState_Types_value = common.AdminState_Types_value
-
-const AdminState_UNKNOWN = AdminState_Types(common.AdminState_UNKNOWN)
-const AdminState_PREPROVISIONED = AdminState_Types(common.AdminState_PREPROVISIONED)
-const AdminState_ENABLED = AdminState_Types(common.AdminState_ENABLED)
-const AdminState_DISABLED = AdminState_Types(common.AdminState_DISABLED)
-const AdminState_DOWNLOADING_IMAGE = AdminState_Types(common.AdminState_DOWNLOADING_IMAGE)
-
-// OperStatus_Types from public import voltha_protos/common.proto
-type OperStatus_Types = common.OperStatus_Types
-
-var OperStatus_Types_name = common.OperStatus_Types_name
-var OperStatus_Types_value = common.OperStatus_Types_value
-
-const OperStatus_UNKNOWN = OperStatus_Types(common.OperStatus_UNKNOWN)
-const OperStatus_DISCOVERED = OperStatus_Types(common.OperStatus_DISCOVERED)
-const OperStatus_ACTIVATING = OperStatus_Types(common.OperStatus_ACTIVATING)
-const OperStatus_TESTING = OperStatus_Types(common.OperStatus_TESTING)
-const OperStatus_ACTIVE = OperStatus_Types(common.OperStatus_ACTIVE)
-const OperStatus_FAILED = OperStatus_Types(common.OperStatus_FAILED)
-const OperStatus_RECONCILING = OperStatus_Types(common.OperStatus_RECONCILING)
-const OperStatus_RECONCILING_FAILED = OperStatus_Types(common.OperStatus_RECONCILING_FAILED)
-
-// ConnectStatus_Types from public import voltha_protos/common.proto
-type ConnectStatus_Types = common.ConnectStatus_Types
-
-var ConnectStatus_Types_name = common.ConnectStatus_Types_name
-var ConnectStatus_Types_value = common.ConnectStatus_Types_value
-
-const ConnectStatus_UNKNOWN = ConnectStatus_Types(common.ConnectStatus_UNKNOWN)
-const ConnectStatus_UNREACHABLE = ConnectStatus_Types(common.ConnectStatus_UNREACHABLE)
-const ConnectStatus_REACHABLE = ConnectStatus_Types(common.ConnectStatus_REACHABLE)
-
-// OperationResp_OperationReturnCode from public import voltha_protos/common.proto
-type OperationResp_OperationReturnCode = common.OperationResp_OperationReturnCode
-
-var OperationResp_OperationReturnCode_name = common.OperationResp_OperationReturnCode_name
-var OperationResp_OperationReturnCode_value = common.OperationResp_OperationReturnCode_value
-
-const OperationResp_OPERATION_SUCCESS = OperationResp_OperationReturnCode(common.OperationResp_OPERATION_SUCCESS)
-const OperationResp_OPERATION_FAILURE = OperationResp_OperationReturnCode(common.OperationResp_OPERATION_FAILURE)
-const OperationResp_OPERATION_UNSUPPORTED = OperationResp_OperationReturnCode(common.OperationResp_OPERATION_UNSUPPORTED)
-const OperationResp_OPERATION_IN_PROGRESS = OperationResp_OperationReturnCode(common.OperationResp_OPERATION_IN_PROGRESS)
-
-// ValueType_Type from public import voltha_protos/common.proto
-type ValueType_Type = common.ValueType_Type
-
-var ValueType_Type_name = common.ValueType_Type_name
-var ValueType_Type_value = common.ValueType_Type_value
-
-const ValueType_EMPTY = ValueType_Type(common.ValueType_EMPTY)
-const ValueType_DISTANCE = ValueType_Type(common.ValueType_DISTANCE)
-
-// DeviceGroup from public import voltha_protos/voltha.proto
-type DeviceGroup = voltha.DeviceGroup
-
-// DeviceGroups from public import voltha_protos/voltha.proto
-type DeviceGroups = voltha.DeviceGroups
-
-// EventFilterRuleKey from public import voltha_protos/voltha.proto
-type EventFilterRuleKey = voltha.EventFilterRuleKey
-
-// EventFilterRule from public import voltha_protos/voltha.proto
-type EventFilterRule = voltha.EventFilterRule
-
-// EventFilter from public import voltha_protos/voltha.proto
-type EventFilter = voltha.EventFilter
-
-// EventFilters from public import voltha_protos/voltha.proto
-type EventFilters = voltha.EventFilters
-
-// CoreInstance from public import voltha_protos/voltha.proto
-type CoreInstance = voltha.CoreInstance
-
-// CoreInstances from public import voltha_protos/voltha.proto
-type CoreInstances = voltha.CoreInstances
-
-// OmciTestRequest from public import voltha_protos/voltha.proto
-type OmciTestRequest = voltha.OmciTestRequest
-
-// TestResponse from public import voltha_protos/voltha.proto
-type TestResponse = voltha.TestResponse
-
-// ValueSet from public import voltha_protos/voltha.proto
-type ValueSet = voltha.ValueSet
-type ValueSet_AlarmConfig = voltha.ValueSet_AlarmConfig
-
-// Voltha from public import voltha_protos/voltha.proto
-type Voltha = voltha.Voltha
-
-// SelfTestResponse from public import voltha_protos/voltha.proto
-type SelfTestResponse = voltha.SelfTestResponse
-
-// OfAgentSubscriber from public import voltha_protos/voltha.proto
-type OfAgentSubscriber = voltha.OfAgentSubscriber
-
-// Membership from public import voltha_protos/voltha.proto
-type Membership = voltha.Membership
-
-// DeviceImageDownloadRequest from public import voltha_protos/voltha.proto
-type DeviceImageDownloadRequest = voltha.DeviceImageDownloadRequest
-
-// DeviceImageRequest from public import voltha_protos/voltha.proto
-type DeviceImageRequest = voltha.DeviceImageRequest
-
-// DeviceImageResponse from public import voltha_protos/voltha.proto
-type DeviceImageResponse = voltha.DeviceImageResponse
-
-// FlowMetadata from public import voltha_protos/voltha.proto
-type FlowMetadata = voltha.FlowMetadata
-
-// EventFilterRuleKey_EventFilterRuleType from public import voltha_protos/voltha.proto
-type EventFilterRuleKey_EventFilterRuleType = voltha.EventFilterRuleKey_EventFilterRuleType
-
-var EventFilterRuleKey_EventFilterRuleType_name = voltha.EventFilterRuleKey_EventFilterRuleType_name
-var EventFilterRuleKey_EventFilterRuleType_value = voltha.EventFilterRuleKey_EventFilterRuleType_value
-
-const EventFilterRuleKey_filter_all = EventFilterRuleKey_EventFilterRuleType(voltha.EventFilterRuleKey_filter_all)
-const EventFilterRuleKey_category = EventFilterRuleKey_EventFilterRuleType(voltha.EventFilterRuleKey_category)
-const EventFilterRuleKey_sub_category = EventFilterRuleKey_EventFilterRuleType(voltha.EventFilterRuleKey_sub_category)
-const EventFilterRuleKey_kpi_event_type = EventFilterRuleKey_EventFilterRuleType(voltha.EventFilterRuleKey_kpi_event_type)
-const EventFilterRuleKey_config_event_type = EventFilterRuleKey_EventFilterRuleType(voltha.EventFilterRuleKey_config_event_type)
-const EventFilterRuleKey_device_event_type = EventFilterRuleKey_EventFilterRuleType(voltha.EventFilterRuleKey_device_event_type)
-
-// TestResponse_TestResponseResult from public import voltha_protos/voltha.proto
-type TestResponse_TestResponseResult = voltha.TestResponse_TestResponseResult
-
-var TestResponse_TestResponseResult_name = voltha.TestResponse_TestResponseResult_name
-var TestResponse_TestResponseResult_value = voltha.TestResponse_TestResponseResult_value
-
-const TestResponse_SUCCESS = TestResponse_TestResponseResult(voltha.TestResponse_SUCCESS)
-const TestResponse_FAILURE = TestResponse_TestResponseResult(voltha.TestResponse_FAILURE)
-
-// SelfTestResponse_SelfTestResult from public import voltha_protos/voltha.proto
-type SelfTestResponse_SelfTestResult = voltha.SelfTestResponse_SelfTestResult
-
-var SelfTestResponse_SelfTestResult_name = voltha.SelfTestResponse_SelfTestResult_name
-var SelfTestResponse_SelfTestResult_value = voltha.SelfTestResponse_SelfTestResult_value
-
-const SelfTestResponse_SUCCESS = SelfTestResponse_SelfTestResult(voltha.SelfTestResponse_SUCCESS)
-const SelfTestResponse_FAILURE = SelfTestResponse_SelfTestResult(voltha.SelfTestResponse_FAILURE)
-const SelfTestResponse_NOT_SUPPORTED = SelfTestResponse_SelfTestResult(voltha.SelfTestResponse_NOT_SUPPORTED)
-const SelfTestResponse_UNKNOWN_ERROR = SelfTestResponse_SelfTestResult(voltha.SelfTestResponse_UNKNOWN_ERROR)
-
-// LogicalPortId from public import voltha_protos/logical_device.proto
-type LogicalPortId = voltha.LogicalPortId
-
-// LogicalPort from public import voltha_protos/logical_device.proto
-type LogicalPort = voltha.LogicalPort
-
-// LogicalPorts from public import voltha_protos/logical_device.proto
-type LogicalPorts = voltha.LogicalPorts
-
-// LogicalDevice from public import voltha_protos/logical_device.proto
-type LogicalDevice = voltha.LogicalDevice
-
-// LogicalDevices from public import voltha_protos/logical_device.proto
-type LogicalDevices = voltha.LogicalDevices
-
-type MessageType int32
-
-const (
-	MessageType_REQUEST           MessageType = 0
-	MessageType_RESPONSE          MessageType = 1
-	MessageType_DEVICE_DISCOVERED MessageType = 2
-)
-
-var MessageType_name = map[int32]string{
-	0: "REQUEST",
-	1: "RESPONSE",
-	2: "DEVICE_DISCOVERED",
-}
-
-var MessageType_value = map[string]int32{
-	"REQUEST":           0,
-	"RESPONSE":          1,
-	"DEVICE_DISCOVERED": 2,
-}
-
-func (x MessageType) String() string {
-	return proto.EnumName(MessageType_name, int32(x))
-}
-
-func (MessageType) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{0}
-}
-
-type ErrorCodeCodes int32
-
-const (
-	ErrorCode_UNSUPPORTED_REQUEST ErrorCodeCodes = 0
-	ErrorCode_INVALID_PARAMETERS  ErrorCodeCodes = 1
-	ErrorCode_DEADLINE_EXCEEDED   ErrorCodeCodes = 2
-)
-
-var ErrorCodeCodes_name = map[int32]string{
-	0: "UNSUPPORTED_REQUEST",
-	1: "INVALID_PARAMETERS",
-	2: "DEADLINE_EXCEEDED",
-}
-
-var ErrorCodeCodes_value = map[string]int32{
-	"UNSUPPORTED_REQUEST": 0,
-	"INVALID_PARAMETERS":  1,
-	"DEADLINE_EXCEEDED":   2,
-}
-
-func (x ErrorCodeCodes) String() string {
-	return proto.EnumName(ErrorCodeCodes_name, int32(x))
-}
-
-func (ErrorCodeCodes) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{4, 0}
-}
-
-type InterAdapterMessageType_Types int32
-
-const (
-	InterAdapterMessageType_FLOW_REQUEST                  InterAdapterMessageType_Types = 0
-	InterAdapterMessageType_FLOW_RESPONSE                 InterAdapterMessageType_Types = 1
-	InterAdapterMessageType_OMCI_REQUEST                  InterAdapterMessageType_Types = 2
-	InterAdapterMessageType_OMCI_RESPONSE                 InterAdapterMessageType_Types = 3
-	InterAdapterMessageType_METRICS_REQUEST               InterAdapterMessageType_Types = 4
-	InterAdapterMessageType_METRICS_RESPONSE              InterAdapterMessageType_Types = 5
-	InterAdapterMessageType_ONU_IND_REQUEST               InterAdapterMessageType_Types = 6
-	InterAdapterMessageType_ONU_IND_RESPONSE              InterAdapterMessageType_Types = 7
-	InterAdapterMessageType_TECH_PROFILE_DOWNLOAD_REQUEST InterAdapterMessageType_Types = 8
-	InterAdapterMessageType_DELETE_GEM_PORT_REQUEST       InterAdapterMessageType_Types = 9
-	InterAdapterMessageType_DELETE_TCONT_REQUEST          InterAdapterMessageType_Types = 10
-)
-
-var InterAdapterMessageType_Types_name = map[int32]string{
-	0:  "FLOW_REQUEST",
-	1:  "FLOW_RESPONSE",
-	2:  "OMCI_REQUEST",
-	3:  "OMCI_RESPONSE",
-	4:  "METRICS_REQUEST",
-	5:  "METRICS_RESPONSE",
-	6:  "ONU_IND_REQUEST",
-	7:  "ONU_IND_RESPONSE",
-	8:  "TECH_PROFILE_DOWNLOAD_REQUEST",
-	9:  "DELETE_GEM_PORT_REQUEST",
-	10: "DELETE_TCONT_REQUEST",
-}
-
-var InterAdapterMessageType_Types_value = map[string]int32{
-	"FLOW_REQUEST":                  0,
-	"FLOW_RESPONSE":                 1,
-	"OMCI_REQUEST":                  2,
-	"OMCI_RESPONSE":                 3,
-	"METRICS_REQUEST":               4,
-	"METRICS_RESPONSE":              5,
-	"ONU_IND_REQUEST":               6,
-	"ONU_IND_RESPONSE":              7,
-	"TECH_PROFILE_DOWNLOAD_REQUEST": 8,
-	"DELETE_GEM_PORT_REQUEST":       9,
-	"DELETE_TCONT_REQUEST":          10,
-}
-
-func (x InterAdapterMessageType_Types) String() string {
-	return proto.EnumName(InterAdapterMessageType_Types_name, int32(x))
-}
-
-func (InterAdapterMessageType_Types) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{13, 0}
-}
-
-type StrType struct {
-	Val                  string   `protobuf:"bytes,1,opt,name=val,proto3" json:"val,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *StrType) Reset()         { *m = StrType{} }
-func (m *StrType) String() string { return proto.CompactTextString(m) }
-func (*StrType) ProtoMessage()    {}
-func (*StrType) Descriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{0}
-}
-
-func (m *StrType) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_StrType.Unmarshal(m, b)
-}
-func (m *StrType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_StrType.Marshal(b, m, deterministic)
-}
-func (m *StrType) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_StrType.Merge(m, src)
-}
-func (m *StrType) XXX_Size() int {
-	return xxx_messageInfo_StrType.Size(m)
-}
-func (m *StrType) XXX_DiscardUnknown() {
-	xxx_messageInfo_StrType.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StrType proto.InternalMessageInfo
-
-func (m *StrType) GetVal() string {
-	if m != nil {
-		return m.Val
-	}
-	return ""
-}
-
-type IntType struct {
-	Val                  int64    `protobuf:"varint,1,opt,name=val,proto3" json:"val,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *IntType) Reset()         { *m = IntType{} }
-func (m *IntType) String() string { return proto.CompactTextString(m) }
-func (*IntType) ProtoMessage()    {}
-func (*IntType) Descriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{1}
-}
-
-func (m *IntType) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_IntType.Unmarshal(m, b)
-}
-func (m *IntType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_IntType.Marshal(b, m, deterministic)
-}
-func (m *IntType) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_IntType.Merge(m, src)
-}
-func (m *IntType) XXX_Size() int {
-	return xxx_messageInfo_IntType.Size(m)
-}
-func (m *IntType) XXX_DiscardUnknown() {
-	xxx_messageInfo_IntType.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IntType proto.InternalMessageInfo
-
-func (m *IntType) GetVal() int64 {
-	if m != nil {
-		return m.Val
-	}
-	return 0
-}
-
-type BoolType struct {
-	Val                  bool     `protobuf:"varint,1,opt,name=val,proto3" json:"val,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *BoolType) Reset()         { *m = BoolType{} }
-func (m *BoolType) String() string { return proto.CompactTextString(m) }
-func (*BoolType) ProtoMessage()    {}
-func (*BoolType) Descriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{2}
-}
-
-func (m *BoolType) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_BoolType.Unmarshal(m, b)
-}
-func (m *BoolType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_BoolType.Marshal(b, m, deterministic)
-}
-func (m *BoolType) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_BoolType.Merge(m, src)
-}
-func (m *BoolType) XXX_Size() int {
-	return xxx_messageInfo_BoolType.Size(m)
-}
-func (m *BoolType) XXX_DiscardUnknown() {
-	xxx_messageInfo_BoolType.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_BoolType proto.InternalMessageInfo
-
-func (m *BoolType) GetVal() bool {
-	if m != nil {
-		return m.Val
-	}
-	return false
-}
-
-type Packet struct {
-	Payload              []byte   `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *Packet) Reset()         { *m = Packet{} }
-func (m *Packet) String() string { return proto.CompactTextString(m) }
-func (*Packet) ProtoMessage()    {}
-func (*Packet) Descriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{3}
-}
-
-func (m *Packet) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Packet.Unmarshal(m, b)
-}
-func (m *Packet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Packet.Marshal(b, m, deterministic)
-}
-func (m *Packet) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Packet.Merge(m, src)
-}
-func (m *Packet) XXX_Size() int {
-	return xxx_messageInfo_Packet.Size(m)
-}
-func (m *Packet) XXX_DiscardUnknown() {
-	xxx_messageInfo_Packet.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Packet proto.InternalMessageInfo
-
-func (m *Packet) GetPayload() []byte {
-	if m != nil {
-		return m.Payload
-	}
-	return nil
-}
-
-type ErrorCode struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *ErrorCode) Reset()         { *m = ErrorCode{} }
-func (m *ErrorCode) String() string { return proto.CompactTextString(m) }
-func (*ErrorCode) ProtoMessage()    {}
-func (*ErrorCode) Descriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{4}
-}
-
-func (m *ErrorCode) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_ErrorCode.Unmarshal(m, b)
-}
-func (m *ErrorCode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_ErrorCode.Marshal(b, m, deterministic)
-}
-func (m *ErrorCode) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ErrorCode.Merge(m, src)
-}
-func (m *ErrorCode) XXX_Size() int {
-	return xxx_messageInfo_ErrorCode.Size(m)
-}
-func (m *ErrorCode) XXX_DiscardUnknown() {
-	xxx_messageInfo_ErrorCode.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ErrorCode proto.InternalMessageInfo
-
-type Error struct {
-	Code                 ErrorCodeCodes `protobuf:"varint,1,opt,name=code,proto3,enum=voltha.ErrorCodeCodes" json:"code,omitempty"`
-	Reason               string         `protobuf:"bytes,2,opt,name=reason,proto3" json:"reason,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
-	XXX_unrecognized     []byte         `json:"-"`
-	XXX_sizecache        int32          `json:"-"`
-}
-
-func (m *Error) Reset()         { *m = Error{} }
-func (m *Error) String() string { return proto.CompactTextString(m) }
-func (*Error) ProtoMessage()    {}
-func (*Error) Descriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{5}
-}
-
-func (m *Error) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Error.Unmarshal(m, b)
-}
-func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Error.Marshal(b, m, deterministic)
-}
-func (m *Error) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Error.Merge(m, src)
-}
-func (m *Error) XXX_Size() int {
-	return xxx_messageInfo_Error.Size(m)
-}
-func (m *Error) XXX_DiscardUnknown() {
-	xxx_messageInfo_Error.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Error proto.InternalMessageInfo
-
-func (m *Error) GetCode() ErrorCodeCodes {
-	if m != nil {
-		return m.Code
-	}
-	return ErrorCode_UNSUPPORTED_REQUEST
-}
-
-func (m *Error) GetReason() string {
-	if m != nil {
-		return m.Reason
-	}
-	return ""
-}
-
-type Header struct {
-	Id                   string               `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
-	Type                 MessageType          `protobuf:"varint,2,opt,name=type,proto3,enum=voltha.MessageType" json:"type,omitempty"`
-	FromTopic            string               `protobuf:"bytes,3,opt,name=from_topic,json=fromTopic,proto3" json:"from_topic,omitempty"`
-	ToTopic              string               `protobuf:"bytes,4,opt,name=to_topic,json=toTopic,proto3" json:"to_topic,omitempty"`
-	KeyTopic             string               `protobuf:"bytes,5,opt,name=key_topic,json=keyTopic,proto3" json:"key_topic,omitempty"`
-	Timestamp            *timestamp.Timestamp `protobuf:"bytes,6,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
-	XXX_unrecognized     []byte               `json:"-"`
-	XXX_sizecache        int32                `json:"-"`
-}
-
-func (m *Header) Reset()         { *m = Header{} }
-func (m *Header) String() string { return proto.CompactTextString(m) }
-func (*Header) ProtoMessage()    {}
-func (*Header) Descriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{6}
-}
-
-func (m *Header) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Header.Unmarshal(m, b)
-}
-func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Header.Marshal(b, m, deterministic)
-}
-func (m *Header) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Header.Merge(m, src)
-}
-func (m *Header) XXX_Size() int {
-	return xxx_messageInfo_Header.Size(m)
-}
-func (m *Header) XXX_DiscardUnknown() {
-	xxx_messageInfo_Header.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Header proto.InternalMessageInfo
-
-func (m *Header) GetId() string {
-	if m != nil {
-		return m.Id
-	}
-	return ""
-}
-
-func (m *Header) GetType() MessageType {
-	if m != nil {
-		return m.Type
-	}
-	return MessageType_REQUEST
-}
-
-func (m *Header) GetFromTopic() string {
-	if m != nil {
-		return m.FromTopic
-	}
-	return ""
-}
-
-func (m *Header) GetToTopic() string {
-	if m != nil {
-		return m.ToTopic
-	}
-	return ""
-}
-
-func (m *Header) GetKeyTopic() string {
-	if m != nil {
-		return m.KeyTopic
-	}
-	return ""
-}
-
-func (m *Header) GetTimestamp() *timestamp.Timestamp {
-	if m != nil {
-		return m.Timestamp
-	}
-	return nil
-}
-
-type Argument struct {
-	Key                  string   `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
-	Value                *any.Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *Argument) Reset()         { *m = Argument{} }
-func (m *Argument) String() string { return proto.CompactTextString(m) }
-func (*Argument) ProtoMessage()    {}
-func (*Argument) Descriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{7}
-}
-
-func (m *Argument) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Argument.Unmarshal(m, b)
-}
-func (m *Argument) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Argument.Marshal(b, m, deterministic)
-}
-func (m *Argument) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Argument.Merge(m, src)
-}
-func (m *Argument) XXX_Size() int {
-	return xxx_messageInfo_Argument.Size(m)
-}
-func (m *Argument) XXX_DiscardUnknown() {
-	xxx_messageInfo_Argument.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Argument proto.InternalMessageInfo
-
-func (m *Argument) GetKey() string {
-	if m != nil {
-		return m.Key
-	}
-	return ""
-}
-
-func (m *Argument) GetValue() *any.Any {
-	if m != nil {
-		return m.Value
-	}
-	return nil
-}
-
-type InterContainerMessage struct {
-	Header               *Header  `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	Body                 *any.Any `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *InterContainerMessage) Reset()         { *m = InterContainerMessage{} }
-func (m *InterContainerMessage) String() string { return proto.CompactTextString(m) }
-func (*InterContainerMessage) ProtoMessage()    {}
-func (*InterContainerMessage) Descriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{8}
-}
-
-func (m *InterContainerMessage) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_InterContainerMessage.Unmarshal(m, b)
-}
-func (m *InterContainerMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_InterContainerMessage.Marshal(b, m, deterministic)
-}
-func (m *InterContainerMessage) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_InterContainerMessage.Merge(m, src)
-}
-func (m *InterContainerMessage) XXX_Size() int {
-	return xxx_messageInfo_InterContainerMessage.Size(m)
-}
-func (m *InterContainerMessage) XXX_DiscardUnknown() {
-	xxx_messageInfo_InterContainerMessage.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InterContainerMessage proto.InternalMessageInfo
-
-func (m *InterContainerMessage) GetHeader() *Header {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *InterContainerMessage) GetBody() *any.Any {
-	if m != nil {
-		return m.Body
-	}
-	return nil
-}
-
-type InterContainerRequestBody struct {
-	Rpc                  string      `protobuf:"bytes,2,opt,name=rpc,proto3" json:"rpc,omitempty"`
-	Args                 []*Argument `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"`
-	ResponseRequired     bool        `protobuf:"varint,4,opt,name=response_required,json=responseRequired,proto3" json:"response_required,omitempty"`
-	ReplyToTopic         string      `protobuf:"bytes,5,opt,name=reply_to_topic,json=replyToTopic,proto3" json:"reply_to_topic,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
-	XXX_unrecognized     []byte      `json:"-"`
-	XXX_sizecache        int32       `json:"-"`
-}
-
-func (m *InterContainerRequestBody) Reset()         { *m = InterContainerRequestBody{} }
-func (m *InterContainerRequestBody) String() string { return proto.CompactTextString(m) }
-func (*InterContainerRequestBody) ProtoMessage()    {}
-func (*InterContainerRequestBody) Descriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{9}
-}
-
-func (m *InterContainerRequestBody) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_InterContainerRequestBody.Unmarshal(m, b)
-}
-func (m *InterContainerRequestBody) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_InterContainerRequestBody.Marshal(b, m, deterministic)
-}
-func (m *InterContainerRequestBody) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_InterContainerRequestBody.Merge(m, src)
-}
-func (m *InterContainerRequestBody) XXX_Size() int {
-	return xxx_messageInfo_InterContainerRequestBody.Size(m)
-}
-func (m *InterContainerRequestBody) XXX_DiscardUnknown() {
-	xxx_messageInfo_InterContainerRequestBody.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InterContainerRequestBody proto.InternalMessageInfo
-
-func (m *InterContainerRequestBody) GetRpc() string {
-	if m != nil {
-		return m.Rpc
-	}
-	return ""
-}
-
-func (m *InterContainerRequestBody) GetArgs() []*Argument {
-	if m != nil {
-		return m.Args
-	}
-	return nil
-}
-
-func (m *InterContainerRequestBody) GetResponseRequired() bool {
-	if m != nil {
-		return m.ResponseRequired
-	}
-	return false
-}
-
-func (m *InterContainerRequestBody) GetReplyToTopic() string {
-	if m != nil {
-		return m.ReplyToTopic
-	}
-	return ""
-}
-
-type InterContainerResponseBody struct {
-	Success              bool     `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"`
-	Result               *any.Any `protobuf:"bytes,3,opt,name=result,proto3" json:"result,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *InterContainerResponseBody) Reset()         { *m = InterContainerResponseBody{} }
-func (m *InterContainerResponseBody) String() string { return proto.CompactTextString(m) }
-func (*InterContainerResponseBody) ProtoMessage()    {}
-func (*InterContainerResponseBody) Descriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{10}
-}
-
-func (m *InterContainerResponseBody) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_InterContainerResponseBody.Unmarshal(m, b)
-}
-func (m *InterContainerResponseBody) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_InterContainerResponseBody.Marshal(b, m, deterministic)
-}
-func (m *InterContainerResponseBody) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_InterContainerResponseBody.Merge(m, src)
-}
-func (m *InterContainerResponseBody) XXX_Size() int {
-	return xxx_messageInfo_InterContainerResponseBody.Size(m)
-}
-func (m *InterContainerResponseBody) XXX_DiscardUnknown() {
-	xxx_messageInfo_InterContainerResponseBody.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InterContainerResponseBody proto.InternalMessageInfo
-
-func (m *InterContainerResponseBody) GetSuccess() bool {
-	if m != nil {
-		return m.Success
-	}
-	return false
-}
-
-func (m *InterContainerResponseBody) GetResult() *any.Any {
-	if m != nil {
-		return m.Result
-	}
-	return nil
-}
-
-type SwitchCapability struct {
-	Desc                 *openflow_13.OfpDesc           `protobuf:"bytes,1,opt,name=desc,proto3" json:"desc,omitempty"`
-	SwitchFeatures       *openflow_13.OfpSwitchFeatures `protobuf:"bytes,2,opt,name=switch_features,json=switchFeatures,proto3" json:"switch_features,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}                       `json:"-"`
-	XXX_unrecognized     []byte                         `json:"-"`
-	XXX_sizecache        int32                          `json:"-"`
-}
-
-func (m *SwitchCapability) Reset()         { *m = SwitchCapability{} }
-func (m *SwitchCapability) String() string { return proto.CompactTextString(m) }
-func (*SwitchCapability) ProtoMessage()    {}
-func (*SwitchCapability) Descriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{11}
-}
-
-func (m *SwitchCapability) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_SwitchCapability.Unmarshal(m, b)
-}
-func (m *SwitchCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_SwitchCapability.Marshal(b, m, deterministic)
-}
-func (m *SwitchCapability) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_SwitchCapability.Merge(m, src)
-}
-func (m *SwitchCapability) XXX_Size() int {
-	return xxx_messageInfo_SwitchCapability.Size(m)
-}
-func (m *SwitchCapability) XXX_DiscardUnknown() {
-	xxx_messageInfo_SwitchCapability.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SwitchCapability proto.InternalMessageInfo
-
-func (m *SwitchCapability) GetDesc() *openflow_13.OfpDesc {
-	if m != nil {
-		return m.Desc
-	}
-	return nil
-}
-
-func (m *SwitchCapability) GetSwitchFeatures() *openflow_13.OfpSwitchFeatures {
-	if m != nil {
-		return m.SwitchFeatures
-	}
-	return nil
-}
-
-type DeviceDiscovered struct {
-	Id                   string   `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
-	ParentId             string   `protobuf:"bytes,2,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"`
-	DeviceType           string   `protobuf:"bytes,3,opt,name=device_type,json=deviceType,proto3" json:"device_type,omitempty"`
-	Publisher            string   `protobuf:"bytes,4,opt,name=publisher,proto3" json:"publisher,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *DeviceDiscovered) Reset()         { *m = DeviceDiscovered{} }
-func (m *DeviceDiscovered) String() string { return proto.CompactTextString(m) }
-func (*DeviceDiscovered) ProtoMessage()    {}
-func (*DeviceDiscovered) Descriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{12}
-}
-
-func (m *DeviceDiscovered) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_DeviceDiscovered.Unmarshal(m, b)
-}
-func (m *DeviceDiscovered) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_DeviceDiscovered.Marshal(b, m, deterministic)
-}
-func (m *DeviceDiscovered) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_DeviceDiscovered.Merge(m, src)
-}
-func (m *DeviceDiscovered) XXX_Size() int {
-	return xxx_messageInfo_DeviceDiscovered.Size(m)
-}
-func (m *DeviceDiscovered) XXX_DiscardUnknown() {
-	xxx_messageInfo_DeviceDiscovered.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceDiscovered proto.InternalMessageInfo
-
-func (m *DeviceDiscovered) GetId() string {
-	if m != nil {
-		return m.Id
-	}
-	return ""
-}
-
-func (m *DeviceDiscovered) GetParentId() string {
-	if m != nil {
-		return m.ParentId
-	}
-	return ""
-}
-
-func (m *DeviceDiscovered) GetDeviceType() string {
-	if m != nil {
-		return m.DeviceType
-	}
-	return ""
-}
-
-func (m *DeviceDiscovered) GetPublisher() string {
-	if m != nil {
-		return m.Publisher
-	}
-	return ""
-}
-
-type InterAdapterMessageType struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *InterAdapterMessageType) Reset()         { *m = InterAdapterMessageType{} }
-func (m *InterAdapterMessageType) String() string { return proto.CompactTextString(m) }
-func (*InterAdapterMessageType) ProtoMessage()    {}
-func (*InterAdapterMessageType) Descriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{13}
-}
-
-func (m *InterAdapterMessageType) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_InterAdapterMessageType.Unmarshal(m, b)
-}
-func (m *InterAdapterMessageType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_InterAdapterMessageType.Marshal(b, m, deterministic)
-}
-func (m *InterAdapterMessageType) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_InterAdapterMessageType.Merge(m, src)
-}
-func (m *InterAdapterMessageType) XXX_Size() int {
-	return xxx_messageInfo_InterAdapterMessageType.Size(m)
-}
-func (m *InterAdapterMessageType) XXX_DiscardUnknown() {
-	xxx_messageInfo_InterAdapterMessageType.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InterAdapterMessageType proto.InternalMessageInfo
-
-type InterAdapterHeader struct {
-	Id                   string                        `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
-	Type                 InterAdapterMessageType_Types `protobuf:"varint,2,opt,name=type,proto3,enum=voltha.InterAdapterMessageType_Types" json:"type,omitempty"`
-	FromTopic            string                        `protobuf:"bytes,3,opt,name=from_topic,json=fromTopic,proto3" json:"from_topic,omitempty"`
-	ToTopic              string                        `protobuf:"bytes,4,opt,name=to_topic,json=toTopic,proto3" json:"to_topic,omitempty"`
-	ToDeviceId           string                        `protobuf:"bytes,5,opt,name=to_device_id,json=toDeviceId,proto3" json:"to_device_id,omitempty"`
-	ProxyDeviceId        string                        `protobuf:"bytes,6,opt,name=proxy_device_id,json=proxyDeviceId,proto3" json:"proxy_device_id,omitempty"`
-	Timestamp            *timestamp.Timestamp          `protobuf:"bytes,7,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}                      `json:"-"`
-	XXX_unrecognized     []byte                        `json:"-"`
-	XXX_sizecache        int32                         `json:"-"`
-}
-
-func (m *InterAdapterHeader) Reset()         { *m = InterAdapterHeader{} }
-func (m *InterAdapterHeader) String() string { return proto.CompactTextString(m) }
-func (*InterAdapterHeader) ProtoMessage()    {}
-func (*InterAdapterHeader) Descriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{14}
-}
-
-func (m *InterAdapterHeader) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_InterAdapterHeader.Unmarshal(m, b)
-}
-func (m *InterAdapterHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_InterAdapterHeader.Marshal(b, m, deterministic)
-}
-func (m *InterAdapterHeader) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_InterAdapterHeader.Merge(m, src)
-}
-func (m *InterAdapterHeader) XXX_Size() int {
-	return xxx_messageInfo_InterAdapterHeader.Size(m)
-}
-func (m *InterAdapterHeader) XXX_DiscardUnknown() {
-	xxx_messageInfo_InterAdapterHeader.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InterAdapterHeader proto.InternalMessageInfo
-
-func (m *InterAdapterHeader) GetId() string {
-	if m != nil {
-		return m.Id
-	}
-	return ""
-}
-
-func (m *InterAdapterHeader) GetType() InterAdapterMessageType_Types {
-	if m != nil {
-		return m.Type
-	}
-	return InterAdapterMessageType_FLOW_REQUEST
-}
-
-func (m *InterAdapterHeader) GetFromTopic() string {
-	if m != nil {
-		return m.FromTopic
-	}
-	return ""
-}
-
-func (m *InterAdapterHeader) GetToTopic() string {
-	if m != nil {
-		return m.ToTopic
-	}
-	return ""
-}
-
-func (m *InterAdapterHeader) GetToDeviceId() string {
-	if m != nil {
-		return m.ToDeviceId
-	}
-	return ""
-}
-
-func (m *InterAdapterHeader) GetProxyDeviceId() string {
-	if m != nil {
-		return m.ProxyDeviceId
-	}
-	return ""
-}
-
-func (m *InterAdapterHeader) GetTimestamp() *timestamp.Timestamp {
-	if m != nil {
-		return m.Timestamp
-	}
-	return nil
-}
-
-type InterAdapterOmciMessage struct {
-	Message              []byte                      `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"`
-	ConnectStatus        common.ConnectStatus_Types  `protobuf:"varint,2,opt,name=connect_status,json=connectStatus,proto3,enum=common.ConnectStatus_Types" json:"connect_status,omitempty"`
-	ProxyAddress         *voltha.Device_ProxyAddress `protobuf:"bytes,3,opt,name=proxy_address,json=proxyAddress,proto3" json:"proxy_address,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}                    `json:"-"`
-	XXX_unrecognized     []byte                      `json:"-"`
-	XXX_sizecache        int32                       `json:"-"`
-}
-
-func (m *InterAdapterOmciMessage) Reset()         { *m = InterAdapterOmciMessage{} }
-func (m *InterAdapterOmciMessage) String() string { return proto.CompactTextString(m) }
-func (*InterAdapterOmciMessage) ProtoMessage()    {}
-func (*InterAdapterOmciMessage) Descriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{15}
-}
-
-func (m *InterAdapterOmciMessage) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_InterAdapterOmciMessage.Unmarshal(m, b)
-}
-func (m *InterAdapterOmciMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_InterAdapterOmciMessage.Marshal(b, m, deterministic)
-}
-func (m *InterAdapterOmciMessage) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_InterAdapterOmciMessage.Merge(m, src)
-}
-func (m *InterAdapterOmciMessage) XXX_Size() int {
-	return xxx_messageInfo_InterAdapterOmciMessage.Size(m)
-}
-func (m *InterAdapterOmciMessage) XXX_DiscardUnknown() {
-	xxx_messageInfo_InterAdapterOmciMessage.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InterAdapterOmciMessage proto.InternalMessageInfo
-
-func (m *InterAdapterOmciMessage) GetMessage() []byte {
-	if m != nil {
-		return m.Message
-	}
-	return nil
-}
-
-func (m *InterAdapterOmciMessage) GetConnectStatus() common.ConnectStatus_Types {
-	if m != nil {
-		return m.ConnectStatus
-	}
-	return common.ConnectStatus_UNKNOWN
-}
-
-func (m *InterAdapterOmciMessage) GetProxyAddress() *voltha.Device_ProxyAddress {
-	if m != nil {
-		return m.ProxyAddress
-	}
-	return nil
-}
-
-type InterAdapterTechProfileInstanceRequestMessage struct {
-	TpInstancePath       string   `protobuf:"bytes,1,opt,name=tp_instance_path,json=tpInstancePath,proto3" json:"tp_instance_path,omitempty"`
-	ParentDeviceId       string   `protobuf:"bytes,2,opt,name=parent_device_id,json=parentDeviceId,proto3" json:"parent_device_id,omitempty"`
-	ParentPonPort        uint32   `protobuf:"varint,3,opt,name=parent_pon_port,json=parentPonPort,proto3" json:"parent_pon_port,omitempty"`
-	OnuId                uint32   `protobuf:"varint,4,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
-	UniId                uint32   `protobuf:"varint,5,opt,name=uni_id,json=uniId,proto3" json:"uni_id,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *InterAdapterTechProfileInstanceRequestMessage) Reset() {
-	*m = InterAdapterTechProfileInstanceRequestMessage{}
-}
-func (m *InterAdapterTechProfileInstanceRequestMessage) String() string {
-	return proto.CompactTextString(m)
-}
-func (*InterAdapterTechProfileInstanceRequestMessage) ProtoMessage() {}
-func (*InterAdapterTechProfileInstanceRequestMessage) Descriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{16}
-}
-
-func (m *InterAdapterTechProfileInstanceRequestMessage) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_InterAdapterTechProfileInstanceRequestMessage.Unmarshal(m, b)
-}
-func (m *InterAdapterTechProfileInstanceRequestMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_InterAdapterTechProfileInstanceRequestMessage.Marshal(b, m, deterministic)
-}
-func (m *InterAdapterTechProfileInstanceRequestMessage) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_InterAdapterTechProfileInstanceRequestMessage.Merge(m, src)
-}
-func (m *InterAdapterTechProfileInstanceRequestMessage) XXX_Size() int {
-	return xxx_messageInfo_InterAdapterTechProfileInstanceRequestMessage.Size(m)
-}
-func (m *InterAdapterTechProfileInstanceRequestMessage) XXX_DiscardUnknown() {
-	xxx_messageInfo_InterAdapterTechProfileInstanceRequestMessage.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InterAdapterTechProfileInstanceRequestMessage proto.InternalMessageInfo
-
-func (m *InterAdapterTechProfileInstanceRequestMessage) GetTpInstancePath() string {
-	if m != nil {
-		return m.TpInstancePath
-	}
-	return ""
-}
-
-func (m *InterAdapterTechProfileInstanceRequestMessage) GetParentDeviceId() string {
-	if m != nil {
-		return m.ParentDeviceId
-	}
-	return ""
-}
-
-func (m *InterAdapterTechProfileInstanceRequestMessage) GetParentPonPort() uint32 {
-	if m != nil {
-		return m.ParentPonPort
-	}
-	return 0
-}
-
-func (m *InterAdapterTechProfileInstanceRequestMessage) GetOnuId() uint32 {
-	if m != nil {
-		return m.OnuId
-	}
-	return 0
-}
-
-func (m *InterAdapterTechProfileInstanceRequestMessage) GetUniId() uint32 {
-	if m != nil {
-		return m.UniId
-	}
-	return 0
-}
-
-type InterAdapterTechProfileDownloadMessage struct {
-	UniId          uint32 `protobuf:"varint,1,opt,name=uni_id,json=uniId,proto3" json:"uni_id,omitempty"`
-	TpInstancePath string `protobuf:"bytes,2,opt,name=tp_instance_path,json=tpInstancePath,proto3" json:"tp_instance_path,omitempty"`
-	// Types that are valid to be assigned to TechTpInstance:
-	//	*InterAdapterTechProfileDownloadMessage_TpInstance
-	//	*InterAdapterTechProfileDownloadMessage_EponTpInstance
-	TechTpInstance       isInterAdapterTechProfileDownloadMessage_TechTpInstance `protobuf_oneof:"tech_tp_instance"`
-	XXX_NoUnkeyedLiteral struct{}                                                `json:"-"`
-	XXX_unrecognized     []byte                                                  `json:"-"`
-	XXX_sizecache        int32                                                   `json:"-"`
-}
-
-func (m *InterAdapterTechProfileDownloadMessage) Reset() {
-	*m = InterAdapterTechProfileDownloadMessage{}
-}
-func (m *InterAdapterTechProfileDownloadMessage) String() string { return proto.CompactTextString(m) }
-func (*InterAdapterTechProfileDownloadMessage) ProtoMessage()    {}
-func (*InterAdapterTechProfileDownloadMessage) Descriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{17}
-}
-
-func (m *InterAdapterTechProfileDownloadMessage) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_InterAdapterTechProfileDownloadMessage.Unmarshal(m, b)
-}
-func (m *InterAdapterTechProfileDownloadMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_InterAdapterTechProfileDownloadMessage.Marshal(b, m, deterministic)
-}
-func (m *InterAdapterTechProfileDownloadMessage) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_InterAdapterTechProfileDownloadMessage.Merge(m, src)
-}
-func (m *InterAdapterTechProfileDownloadMessage) XXX_Size() int {
-	return xxx_messageInfo_InterAdapterTechProfileDownloadMessage.Size(m)
-}
-func (m *InterAdapterTechProfileDownloadMessage) XXX_DiscardUnknown() {
-	xxx_messageInfo_InterAdapterTechProfileDownloadMessage.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InterAdapterTechProfileDownloadMessage proto.InternalMessageInfo
-
-func (m *InterAdapterTechProfileDownloadMessage) GetUniId() uint32 {
-	if m != nil {
-		return m.UniId
-	}
-	return 0
-}
-
-func (m *InterAdapterTechProfileDownloadMessage) GetTpInstancePath() string {
-	if m != nil {
-		return m.TpInstancePath
-	}
-	return ""
-}
-
-type isInterAdapterTechProfileDownloadMessage_TechTpInstance interface {
-	isInterAdapterTechProfileDownloadMessage_TechTpInstance()
-}
-
-type InterAdapterTechProfileDownloadMessage_TpInstance struct {
-	TpInstance *tech_profile.TechProfileInstance `protobuf:"bytes,3,opt,name=tp_instance,json=tpInstance,proto3,oneof"`
-}
-
-type InterAdapterTechProfileDownloadMessage_EponTpInstance struct {
-	EponTpInstance *tech_profile.EponTechProfileInstance `protobuf:"bytes,4,opt,name=epon_tp_instance,json=eponTpInstance,proto3,oneof"`
-}
-
-func (*InterAdapterTechProfileDownloadMessage_TpInstance) isInterAdapterTechProfileDownloadMessage_TechTpInstance() {
-}
-
-func (*InterAdapterTechProfileDownloadMessage_EponTpInstance) isInterAdapterTechProfileDownloadMessage_TechTpInstance() {
-}
-
-func (m *InterAdapterTechProfileDownloadMessage) GetTechTpInstance() isInterAdapterTechProfileDownloadMessage_TechTpInstance {
-	if m != nil {
-		return m.TechTpInstance
-	}
-	return nil
-}
-
-func (m *InterAdapterTechProfileDownloadMessage) GetTpInstance() *tech_profile.TechProfileInstance {
-	if x, ok := m.GetTechTpInstance().(*InterAdapterTechProfileDownloadMessage_TpInstance); ok {
-		return x.TpInstance
-	}
-	return nil
-}
-
-func (m *InterAdapterTechProfileDownloadMessage) GetEponTpInstance() *tech_profile.EponTechProfileInstance {
-	if x, ok := m.GetTechTpInstance().(*InterAdapterTechProfileDownloadMessage_EponTpInstance); ok {
-		return x.EponTpInstance
-	}
-	return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*InterAdapterTechProfileDownloadMessage) XXX_OneofWrappers() []interface{} {
-	return []interface{}{
-		(*InterAdapterTechProfileDownloadMessage_TpInstance)(nil),
-		(*InterAdapterTechProfileDownloadMessage_EponTpInstance)(nil),
-	}
-}
-
-type InterAdapterDeleteGemPortMessage struct {
-	UniId                uint32   `protobuf:"varint,1,opt,name=uni_id,json=uniId,proto3" json:"uni_id,omitempty"`
-	TpInstancePath       string   `protobuf:"bytes,2,opt,name=tp_instance_path,json=tpInstancePath,proto3" json:"tp_instance_path,omitempty"`
-	GemPortId            uint32   `protobuf:"varint,3,opt,name=gem_port_id,json=gemPortId,proto3" json:"gem_port_id,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *InterAdapterDeleteGemPortMessage) Reset()         { *m = InterAdapterDeleteGemPortMessage{} }
-func (m *InterAdapterDeleteGemPortMessage) String() string { return proto.CompactTextString(m) }
-func (*InterAdapterDeleteGemPortMessage) ProtoMessage()    {}
-func (*InterAdapterDeleteGemPortMessage) Descriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{18}
-}
-
-func (m *InterAdapterDeleteGemPortMessage) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_InterAdapterDeleteGemPortMessage.Unmarshal(m, b)
-}
-func (m *InterAdapterDeleteGemPortMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_InterAdapterDeleteGemPortMessage.Marshal(b, m, deterministic)
-}
-func (m *InterAdapterDeleteGemPortMessage) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_InterAdapterDeleteGemPortMessage.Merge(m, src)
-}
-func (m *InterAdapterDeleteGemPortMessage) XXX_Size() int {
-	return xxx_messageInfo_InterAdapterDeleteGemPortMessage.Size(m)
-}
-func (m *InterAdapterDeleteGemPortMessage) XXX_DiscardUnknown() {
-	xxx_messageInfo_InterAdapterDeleteGemPortMessage.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InterAdapterDeleteGemPortMessage proto.InternalMessageInfo
-
-func (m *InterAdapterDeleteGemPortMessage) GetUniId() uint32 {
-	if m != nil {
-		return m.UniId
-	}
-	return 0
-}
-
-func (m *InterAdapterDeleteGemPortMessage) GetTpInstancePath() string {
-	if m != nil {
-		return m.TpInstancePath
-	}
-	return ""
-}
-
-func (m *InterAdapterDeleteGemPortMessage) GetGemPortId() uint32 {
-	if m != nil {
-		return m.GemPortId
-	}
-	return 0
-}
-
-type InterAdapterDeleteTcontMessage struct {
-	UniId                uint32   `protobuf:"varint,1,opt,name=uni_id,json=uniId,proto3" json:"uni_id,omitempty"`
-	TpInstancePath       string   `protobuf:"bytes,2,opt,name=tp_instance_path,json=tpInstancePath,proto3" json:"tp_instance_path,omitempty"`
-	AllocId              uint32   `protobuf:"varint,3,opt,name=alloc_id,json=allocId,proto3" json:"alloc_id,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *InterAdapterDeleteTcontMessage) Reset()         { *m = InterAdapterDeleteTcontMessage{} }
-func (m *InterAdapterDeleteTcontMessage) String() string { return proto.CompactTextString(m) }
-func (*InterAdapterDeleteTcontMessage) ProtoMessage()    {}
-func (*InterAdapterDeleteTcontMessage) Descriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{19}
-}
-
-func (m *InterAdapterDeleteTcontMessage) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_InterAdapterDeleteTcontMessage.Unmarshal(m, b)
-}
-func (m *InterAdapterDeleteTcontMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_InterAdapterDeleteTcontMessage.Marshal(b, m, deterministic)
-}
-func (m *InterAdapterDeleteTcontMessage) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_InterAdapterDeleteTcontMessage.Merge(m, src)
-}
-func (m *InterAdapterDeleteTcontMessage) XXX_Size() int {
-	return xxx_messageInfo_InterAdapterDeleteTcontMessage.Size(m)
-}
-func (m *InterAdapterDeleteTcontMessage) XXX_DiscardUnknown() {
-	xxx_messageInfo_InterAdapterDeleteTcontMessage.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InterAdapterDeleteTcontMessage proto.InternalMessageInfo
-
-func (m *InterAdapterDeleteTcontMessage) GetUniId() uint32 {
-	if m != nil {
-		return m.UniId
-	}
-	return 0
-}
-
-func (m *InterAdapterDeleteTcontMessage) GetTpInstancePath() string {
-	if m != nil {
-		return m.TpInstancePath
-	}
-	return ""
-}
-
-func (m *InterAdapterDeleteTcontMessage) GetAllocId() uint32 {
-	if m != nil {
-		return m.AllocId
-	}
-	return 0
-}
-
-type InterAdapterResponseBody struct {
-	Status bool `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"`
-	// Types that are valid to be assigned to Payload:
-	//	*InterAdapterResponseBody_Body
-	//	*InterAdapterResponseBody_Omci
-	Payload              isInterAdapterResponseBody_Payload `protobuf_oneof:"payload"`
-	XXX_NoUnkeyedLiteral struct{}                           `json:"-"`
-	XXX_unrecognized     []byte                             `json:"-"`
-	XXX_sizecache        int32                              `json:"-"`
-}
-
-func (m *InterAdapterResponseBody) Reset()         { *m = InterAdapterResponseBody{} }
-func (m *InterAdapterResponseBody) String() string { return proto.CompactTextString(m) }
-func (*InterAdapterResponseBody) ProtoMessage()    {}
-func (*InterAdapterResponseBody) Descriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{20}
-}
-
-func (m *InterAdapterResponseBody) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_InterAdapterResponseBody.Unmarshal(m, b)
-}
-func (m *InterAdapterResponseBody) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_InterAdapterResponseBody.Marshal(b, m, deterministic)
-}
-func (m *InterAdapterResponseBody) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_InterAdapterResponseBody.Merge(m, src)
-}
-func (m *InterAdapterResponseBody) XXX_Size() int {
-	return xxx_messageInfo_InterAdapterResponseBody.Size(m)
-}
-func (m *InterAdapterResponseBody) XXX_DiscardUnknown() {
-	xxx_messageInfo_InterAdapterResponseBody.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InterAdapterResponseBody proto.InternalMessageInfo
-
-func (m *InterAdapterResponseBody) GetStatus() bool {
-	if m != nil {
-		return m.Status
-	}
-	return false
-}
-
-type isInterAdapterResponseBody_Payload interface {
-	isInterAdapterResponseBody_Payload()
-}
-
-type InterAdapterResponseBody_Body struct {
-	Body *any.Any `protobuf:"bytes,2,opt,name=body,proto3,oneof"`
-}
-
-type InterAdapterResponseBody_Omci struct {
-	Omci *InterAdapterOmciMessage `protobuf:"bytes,3,opt,name=omci,proto3,oneof"`
-}
-
-func (*InterAdapterResponseBody_Body) isInterAdapterResponseBody_Payload() {}
-
-func (*InterAdapterResponseBody_Omci) isInterAdapterResponseBody_Payload() {}
-
-func (m *InterAdapterResponseBody) GetPayload() isInterAdapterResponseBody_Payload {
-	if m != nil {
-		return m.Payload
-	}
-	return nil
-}
-
-func (m *InterAdapterResponseBody) GetBody() *any.Any {
-	if x, ok := m.GetPayload().(*InterAdapterResponseBody_Body); ok {
-		return x.Body
-	}
-	return nil
-}
-
-func (m *InterAdapterResponseBody) GetOmci() *InterAdapterOmciMessage {
-	if x, ok := m.GetPayload().(*InterAdapterResponseBody_Omci); ok {
-		return x.Omci
-	}
-	return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*InterAdapterResponseBody) XXX_OneofWrappers() []interface{} {
-	return []interface{}{
-		(*InterAdapterResponseBody_Body)(nil),
-		(*InterAdapterResponseBody_Omci)(nil),
-	}
-}
-
-type InterAdapterMessage struct {
-	Header               *InterAdapterHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	Body                 *any.Any            `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
-	XXX_unrecognized     []byte              `json:"-"`
-	XXX_sizecache        int32               `json:"-"`
-}
-
-func (m *InterAdapterMessage) Reset()         { *m = InterAdapterMessage{} }
-func (m *InterAdapterMessage) String() string { return proto.CompactTextString(m) }
-func (*InterAdapterMessage) ProtoMessage()    {}
-func (*InterAdapterMessage) Descriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{21}
-}
-
-func (m *InterAdapterMessage) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_InterAdapterMessage.Unmarshal(m, b)
-}
-func (m *InterAdapterMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_InterAdapterMessage.Marshal(b, m, deterministic)
-}
-func (m *InterAdapterMessage) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_InterAdapterMessage.Merge(m, src)
-}
-func (m *InterAdapterMessage) XXX_Size() int {
-	return xxx_messageInfo_InterAdapterMessage.Size(m)
-}
-func (m *InterAdapterMessage) XXX_DiscardUnknown() {
-	xxx_messageInfo_InterAdapterMessage.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InterAdapterMessage proto.InternalMessageInfo
-
-func (m *InterAdapterMessage) GetHeader() *InterAdapterHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *InterAdapterMessage) GetBody() *any.Any {
-	if m != nil {
-		return m.Body
-	}
-	return nil
-}
-
-func init() {
-	proto.RegisterEnum("voltha.MessageType", MessageType_name, MessageType_value)
-	proto.RegisterEnum("voltha.ErrorCodeCodes", ErrorCodeCodes_name, ErrorCodeCodes_value)
-	proto.RegisterEnum("voltha.InterAdapterMessageType_Types", InterAdapterMessageType_Types_name, InterAdapterMessageType_Types_value)
-	proto.RegisterType((*StrType)(nil), "voltha.StrType")
-	proto.RegisterType((*IntType)(nil), "voltha.IntType")
-	proto.RegisterType((*BoolType)(nil), "voltha.BoolType")
-	proto.RegisterType((*Packet)(nil), "voltha.Packet")
-	proto.RegisterType((*ErrorCode)(nil), "voltha.ErrorCode")
-	proto.RegisterType((*Error)(nil), "voltha.Error")
-	proto.RegisterType((*Header)(nil), "voltha.Header")
-	proto.RegisterType((*Argument)(nil), "voltha.Argument")
-	proto.RegisterType((*InterContainerMessage)(nil), "voltha.InterContainerMessage")
-	proto.RegisterType((*InterContainerRequestBody)(nil), "voltha.InterContainerRequestBody")
-	proto.RegisterType((*InterContainerResponseBody)(nil), "voltha.InterContainerResponseBody")
-	proto.RegisterType((*SwitchCapability)(nil), "voltha.SwitchCapability")
-	proto.RegisterType((*DeviceDiscovered)(nil), "voltha.DeviceDiscovered")
-	proto.RegisterType((*InterAdapterMessageType)(nil), "voltha.InterAdapterMessageType")
-	proto.RegisterType((*InterAdapterHeader)(nil), "voltha.InterAdapterHeader")
-	proto.RegisterType((*InterAdapterOmciMessage)(nil), "voltha.InterAdapterOmciMessage")
-	proto.RegisterType((*InterAdapterTechProfileInstanceRequestMessage)(nil), "voltha.InterAdapterTechProfileInstanceRequestMessage")
-	proto.RegisterType((*InterAdapterTechProfileDownloadMessage)(nil), "voltha.InterAdapterTechProfileDownloadMessage")
-	proto.RegisterType((*InterAdapterDeleteGemPortMessage)(nil), "voltha.InterAdapterDeleteGemPortMessage")
-	proto.RegisterType((*InterAdapterDeleteTcontMessage)(nil), "voltha.InterAdapterDeleteTcontMessage")
-	proto.RegisterType((*InterAdapterResponseBody)(nil), "voltha.InterAdapterResponseBody")
-	proto.RegisterType((*InterAdapterMessage)(nil), "voltha.InterAdapterMessage")
-}
-
-func init() {
-	proto.RegisterFile("voltha_protos/inter_container.proto", fileDescriptor_941f0031a549667f)
-}
-
-var fileDescriptor_941f0031a549667f = []byte{
-	// 1468 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xdd, 0x72, 0xdb, 0x44,
-	0x14, 0x8e, 0x1d, 0xff, 0x1e, 0x27, 0xae, 0xbb, 0x69, 0x1a, 0x27, 0xe9, 0x4f, 0x2a, 0xda, 0x12,
-	0x5a, 0xea, 0x0c, 0x29, 0x0c, 0x70, 0x05, 0x8e, 0xad, 0x36, 0x9a, 0x49, 0x6c, 0x55, 0x56, 0x5a,
-	0x86, 0x61, 0x46, 0xa3, 0x48, 0x1b, 0x5b, 0x13, 0x59, 0xab, 0x4a, 0xab, 0x14, 0xcf, 0x30, 0xcc,
-	0x30, 0xdc, 0xf0, 0x04, 0xdc, 0x31, 0xc3, 0x15, 0xef, 0xc0, 0x6b, 0x70, 0xc7, 0xdb, 0x30, 0xfb,
-	0x23, 0x5b, 0x76, 0x13, 0x18, 0xa0, 0x77, 0xda, 0xf3, 0x7d, 0xfb, 0xed, 0xee, 0x39, 0x7b, 0xce,
-	0x1e, 0xc1, 0x7b, 0x17, 0xc4, 0xa7, 0x23, 0xdb, 0x0a, 0x23, 0x42, 0x49, 0xbc, 0xe7, 0x05, 0x14,
-	0x47, 0x96, 0x43, 0x02, 0x6a, 0x7b, 0x01, 0x8e, 0x5a, 0xdc, 0x8c, 0x4a, 0x82, 0xb4, 0xb5, 0x35,
-	0x4f, 0x76, 0xc8, 0x78, 0x4c, 0x02, 0xc1, 0x59, 0xc4, 0xc4, 0x48, 0x62, 0x9b, 0x43, 0x42, 0x86,
-	0x3e, 0xde, 0xe3, 0xa3, 0xd3, 0xe4, 0x6c, 0xcf, 0x0e, 0x26, 0x12, 0xba, 0x3b, 0x3f, 0x8d, 0x84,
-	0x38, 0x38, 0xf3, 0xc9, 0x1b, 0xeb, 0xa3, 0xa7, 0x92, 0xa0, 0xcc, 0x13, 0x7c, 0x32, 0xf4, 0x1c,
-	0xdb, 0xb7, 0x5c, 0x7c, 0xe1, 0x39, 0x38, 0x15, 0x59, 0xd4, 0xa7, 0xde, 0x18, 0xc7, 0xd4, 0x1e,
-	0x87, 0x92, 0xb0, 0x33, 0x2f, 0x42, 0xb1, 0x33, 0x62, 0xdf, 0x67, 0x9e, 0x2f, 0x25, 0x94, 0x6d,
-	0x28, 0x0f, 0x68, 0x64, 0x4e, 0x42, 0x8c, 0x1a, 0xb0, 0x7c, 0x61, 0xfb, 0xcd, 0xdc, 0x4e, 0x6e,
-	0xb7, 0x6a, 0xb0, 0x4f, 0x06, 0x6a, 0x01, 0x5d, 0x04, 0x97, 0x05, 0x78, 0x0b, 0x2a, 0x07, 0x84,
-	0xf8, 0x8b, 0x68, 0x45, 0xa0, 0x0a, 0x94, 0x74, 0xdb, 0x39, 0xc7, 0x14, 0x35, 0xa1, 0x1c, 0xda,
-	0x13, 0x9f, 0xd8, 0x2e, 0xc7, 0x57, 0x8c, 0x74, 0xa8, 0x7c, 0x03, 0x55, 0x35, 0x8a, 0x48, 0xd4,
-	0x21, 0x2e, 0x56, 0xfa, 0x50, 0x74, 0x88, 0x8b, 0x63, 0xb4, 0x01, 0x6b, 0x27, 0xbd, 0xc1, 0x89,
-	0xae, 0xf7, 0x0d, 0x53, 0xed, 0x5a, 0x86, 0xfa, 0xe2, 0x44, 0x1d, 0x98, 0x8d, 0x25, 0x74, 0x13,
-	0x90, 0xd6, 0x7b, 0xd9, 0x3e, 0xd2, 0xba, 0x96, 0xde, 0x36, 0xda, 0xc7, 0xaa, 0xa9, 0x1a, 0x83,
-	0x46, 0x0e, 0xad, 0xc3, 0xf5, 0xae, 0xda, 0xee, 0x1e, 0x69, 0x3d, 0xd5, 0x52, 0xbf, 0xea, 0xa8,
-	0x6a, 0x57, 0xed, 0x36, 0xf2, 0xca, 0x11, 0x14, 0xb9, 0x3a, 0x7a, 0x0c, 0x05, 0xa6, 0xcc, 0x57,
-	0xaf, 0xef, 0x6f, 0xb4, 0x64, 0x88, 0xa6, 0x4b, 0xb7, 0xf8, 0xba, 0x06, 0x27, 0xa1, 0x9b, 0x50,
-	0x8a, 0xb0, 0x1d, 0x93, 0xa0, 0x99, 0xe7, 0x7e, 0x90, 0x23, 0xe5, 0x8f, 0x1c, 0x94, 0x0e, 0xb1,
-	0xed, 0xe2, 0x08, 0xd5, 0x21, 0xef, 0xb9, 0xd2, 0x4d, 0x79, 0xcf, 0x45, 0xef, 0x43, 0x81, 0x4e,
-	0x42, 0xcc, 0x27, 0xd4, 0xf7, 0xd7, 0x52, 0xfd, 0x63, 0x1c, 0xc7, 0xf6, 0x10, 0x33, 0xff, 0x18,
-	0x9c, 0x80, 0x6e, 0x03, 0x9c, 0x45, 0x64, 0x6c, 0x51, 0x12, 0x7a, 0x4e, 0x73, 0x99, 0x0b, 0x54,
-	0x99, 0xc5, 0x64, 0x06, 0xb4, 0x09, 0x15, 0x4a, 0x24, 0x58, 0xe0, 0x60, 0x99, 0x12, 0x01, 0x6d,
-	0x43, 0xf5, 0x1c, 0x4f, 0x24, 0x56, 0xe4, 0x58, 0xe5, 0x1c, 0x4f, 0x04, 0xf8, 0x19, 0x54, 0xa7,
-	0x71, 0x6f, 0x96, 0x76, 0x72, 0xbb, 0xb5, 0xfd, 0xad, 0x96, 0xb8, 0x19, 0xad, 0xf4, 0x66, 0xb4,
-	0xcc, 0x94, 0x61, 0xcc, 0xc8, 0xca, 0x21, 0x54, 0xda, 0xd1, 0x30, 0x19, 0xe3, 0x80, 0xb2, 0x10,
-	0x9e, 0xe3, 0x49, 0x1a, 0xfd, 0x73, 0x3c, 0x41, 0x8f, 0xa0, 0x78, 0x61, 0xfb, 0x89, 0x38, 0x58,
-	0x6d, 0xff, 0xc6, 0x5b, 0x9a, 0xed, 0x60, 0x62, 0x08, 0x8a, 0xe2, 0xc1, 0xba, 0xc6, 0x52, 0xa8,
-	0x93, 0x66, 0x90, 0x3c, 0x3d, 0x7a, 0x08, 0xa5, 0x11, 0x77, 0x1b, 0x57, 0xae, 0xed, 0xd7, 0x53,
-	0xf7, 0x08, 0x67, 0x1a, 0x12, 0x45, 0xbb, 0x50, 0x38, 0x25, 0xee, 0xe4, 0x6f, 0xd7, 0xe2, 0x0c,
-	0xe5, 0xb7, 0x1c, 0x6c, 0xce, 0xaf, 0x65, 0xe0, 0xd7, 0x09, 0x8e, 0xe9, 0x01, 0x71, 0x27, 0xec,
-	0x18, 0x51, 0xe8, 0xc8, 0xe0, 0xb1, 0x4f, 0x74, 0x1f, 0x0a, 0x76, 0x34, 0x8c, 0x9b, 0xcb, 0x3b,
-	0xcb, 0xbb, 0xb5, 0xfd, 0x46, 0xba, 0x7e, 0x7a, 0x70, 0x83, 0xa3, 0xe8, 0x31, 0x5c, 0x8f, 0x70,
-	0x1c, 0x92, 0x20, 0xc6, 0x56, 0x84, 0x5f, 0x27, 0x5e, 0x84, 0x5d, 0x1e, 0x85, 0x8a, 0xd1, 0x48,
-	0x01, 0x43, 0xda, 0xd1, 0x7d, 0xa8, 0x47, 0x38, 0xf4, 0x59, 0x40, 0xe6, 0x62, 0xb2, 0xc2, 0xad,
-	0xa6, 0x08, 0x9a, 0xe2, 0xc2, 0xd6, 0xe2, 0x3e, 0x85, 0x0e, 0xdf, 0x68, 0x13, 0xca, 0x71, 0xe2,
-	0x38, 0x38, 0x8e, 0x65, 0xda, 0xa4, 0x43, 0xf4, 0x21, 0xbb, 0x82, 0x71, 0xe2, 0x53, 0x7e, 0x45,
-	0xae, 0x72, 0x86, 0xe4, 0x28, 0x3f, 0xe5, 0xa0, 0x31, 0x78, 0xe3, 0x51, 0x67, 0xd4, 0xb1, 0x43,
-	0xfb, 0xd4, 0xf3, 0x3d, 0x3a, 0x41, 0x1f, 0x40, 0xc1, 0xc5, 0xb1, 0x23, 0x7d, 0xbe, 0xde, 0xca,
-	0x96, 0x17, 0x72, 0x16, 0x5a, 0x0c, 0x34, 0x38, 0x05, 0x69, 0x70, 0x2d, 0xe6, 0xd3, 0xad, 0x33,
-	0x6c, 0xd3, 0x24, 0xc2, 0xb1, 0x8c, 0xc1, 0xce, 0x5b, 0xb3, 0x16, 0x78, 0x46, 0x5d, 0x18, 0x9e,
-	0xc9, 0xb1, 0xf2, 0x3d, 0x34, 0xba, 0xbc, 0x3c, 0x75, 0xbd, 0xd8, 0x21, 0x17, 0x98, 0xb9, 0x6a,
-	0x31, 0x59, 0xb6, 0xa1, 0x1a, 0xda, 0x11, 0x0e, 0xa8, 0xe5, 0xb9, 0x32, 0x4a, 0x15, 0x61, 0xd0,
-	0x5c, 0x74, 0x17, 0x6a, 0xa2, 0xbe, 0x59, 0x3c, 0xa1, 0x44, 0x86, 0x80, 0x30, 0xf1, 0x3a, 0x73,
-	0x0b, 0xaa, 0x61, 0x72, 0xea, 0x7b, 0xf1, 0x08, 0x47, 0x32, 0x47, 0x66, 0x06, 0xe5, 0x97, 0x3c,
-	0x6c, 0x70, 0x8f, 0xb7, 0x5d, 0x3b, 0xa4, 0xd3, 0x3b, 0xc8, 0x66, 0x2a, 0x3f, 0xe4, 0xa1, 0xc8,
-	0x3e, 0x62, 0xd4, 0x80, 0x95, 0x67, 0x47, 0xfd, 0x57, 0x99, 0xc2, 0x72, 0x1d, 0x56, 0xa5, 0x65,
-	0xa0, 0xf7, 0x7b, 0x03, 0xb5, 0x91, 0x63, 0xa4, 0xfe, 0x71, 0x47, 0x9b, 0x92, 0xf2, 0x8c, 0x24,
-	0x2d, 0x92, 0xb4, 0x8c, 0xd6, 0xe0, 0xda, 0xb1, 0x6a, 0x1a, 0x5a, 0x67, 0x30, 0xe5, 0x15, 0xd0,
-	0x0d, 0x68, 0xcc, 0x8c, 0x92, 0x5a, 0x64, 0xd4, 0x7e, 0xef, 0xc4, 0xd2, 0x7a, 0xb3, 0x82, 0x56,
-	0x62, 0xd4, 0x99, 0x51, 0x52, 0xcb, 0xe8, 0x1e, 0xdc, 0x36, 0xd5, 0xce, 0xa1, 0xa5, 0x1b, 0xfd,
-	0x67, 0xda, 0x91, 0x6a, 0x75, 0xfb, 0xaf, 0x7a, 0x47, 0xfd, 0xf6, 0x6c, 0x62, 0x05, 0x6d, 0xc3,
-	0x46, 0x57, 0x3d, 0x52, 0x4d, 0xd5, 0x7a, 0xae, 0x1e, 0x5b, 0xac, 0x50, 0x4e, 0xc1, 0x2a, 0x6a,
-	0xc2, 0x0d, 0x09, 0x9a, 0x9d, 0x7e, 0x6f, 0x86, 0x00, 0xf3, 0x0f, 0xca, 0xfa, 0xe7, 0x8a, 0x7a,
-	0xf6, 0xf9, 0x5c, 0x3d, 0x7b, 0x90, 0x26, 0xcc, 0x15, 0x9e, 0x6d, 0x71, 0xaf, 0xfe, 0xef, 0x0a,
-	0xb7, 0x03, 0x2b, 0x94, 0xc8, 0xd7, 0x8d, 0x5d, 0x0d, 0x91, 0x50, 0x40, 0x89, 0xb8, 0x51, 0x9a,
-	0x8b, 0x1e, 0xc2, 0xb5, 0x30, 0x22, 0xdf, 0x4e, 0x32, 0xa4, 0x12, 0x27, 0xad, 0x72, 0xf3, 0x94,
-	0x37, 0x57, 0x0e, 0xcb, 0xff, 0xa6, 0x1c, 0xfe, 0x9e, 0x9b, 0xbf, 0x3f, 0xfd, 0xb1, 0xe3, 0xa5,
-	0x75, 0xac, 0x09, 0xe5, 0xb1, 0xf8, 0x4c, 0x5f, 0x31, 0x39, 0x44, 0x07, 0x50, 0x77, 0x48, 0x10,
-	0x60, 0x87, 0x5a, 0x31, 0xb5, 0x69, 0x12, 0x4b, 0xc7, 0x6d, 0xb7, 0x64, 0x9f, 0xd0, 0x11, 0xe8,
-	0x80, 0x83, 0xd2, 0x5d, 0xab, 0x4e, 0xd6, 0x88, 0xbe, 0x04, 0x71, 0x08, 0xcb, 0x76, 0xdd, 0x88,
-	0x95, 0x04, 0x91, 0xf9, 0xdb, 0xa9, 0xef, 0xc5, 0xe1, 0x5a, 0x3a, 0xe3, 0xb4, 0x05, 0xc5, 0x58,
-	0x09, 0x33, 0x23, 0xe5, 0xcf, 0x1c, 0x3c, 0xc9, 0xee, 0xdd, 0xc4, 0xce, 0x48, 0x17, 0x2f, 0xbd,
-	0x16, 0xc4, 0xd4, 0x0e, 0x1c, 0x2c, 0xcb, 0x64, 0x7a, 0xa2, 0x5d, 0x68, 0xd0, 0xd0, 0xf2, 0x24,
-	0x68, 0x85, 0x36, 0x1d, 0xc9, 0x4b, 0x50, 0xa7, 0x61, 0x3a, 0x47, 0xb7, 0xe9, 0x88, 0x31, 0x65,
-	0xce, 0xce, 0x5c, 0x2f, 0x52, 0xb7, 0x2e, 0xec, 0x73, 0x31, 0x12, 0xcc, 0x90, 0x04, 0x56, 0x48,
-	0x22, 0x51, 0xc3, 0x56, 0x8d, 0x55, 0x61, 0xd6, 0x49, 0xa0, 0x93, 0x88, 0xa2, 0x75, 0x28, 0x91,
-	0x20, 0x61, 0x3a, 0x05, 0x0e, 0x17, 0x49, 0x90, 0x68, 0x2e, 0x33, 0x27, 0x81, 0x97, 0x86, 0x7f,
-	0xd5, 0x28, 0x26, 0x81, 0xa7, 0xb9, 0xca, 0xcf, 0x79, 0x78, 0x78, 0xc5, 0xd9, 0xba, 0xe4, 0x4d,
-	0xc0, 0x7a, 0x89, 0xf4, 0x50, 0x33, 0x85, 0x5c, 0x46, 0xe1, 0xd2, 0xb3, 0xe6, 0x2f, 0x3d, 0x6b,
-	0x17, 0x6a, 0x19, 0xa6, 0x8c, 0xc3, 0xbd, 0xd6, 0x5c, 0xe7, 0x74, 0x89, 0x6f, 0x0f, 0x97, 0x0c,
-	0x98, 0x29, 0xa1, 0x17, 0xd0, 0xc0, 0xcc, 0x03, 0x59, 0xa9, 0x02, 0x97, 0x7a, 0x30, 0x2f, 0xa5,
-	0x86, 0x24, 0xb8, 0x5c, 0xae, 0xce, 0x04, 0xcc, 0xa9, 0xe4, 0x01, 0x82, 0x06, 0x9f, 0x99, 0x91,
-	0x54, 0x7e, 0xcc, 0xc1, 0x4e, 0xd6, 0x31, 0x5d, 0xec, 0x63, 0x8a, 0x9f, 0xe3, 0x31, 0x73, 0xf2,
-	0x3b, 0x73, 0xc9, 0x1d, 0xa8, 0x0d, 0xf1, 0x98, 0x47, 0x93, 0xa9, 0x88, 0x80, 0x56, 0x87, 0x62,
-	0x15, 0xcd, 0x55, 0xbe, 0x83, 0x3b, 0x6f, 0x6f, 0xc2, 0x64, 0xbd, 0xf4, 0x3b, 0xdb, 0xc2, 0x26,
-	0x54, 0x6c, 0xdf, 0x27, 0xce, 0x6c, 0xfd, 0x32, 0x1f, 0x6b, 0xae, 0xf2, 0x6b, 0x0e, 0x9a, 0xd9,
-	0xe5, 0xe7, 0x1e, 0xd9, 0x9b, 0x50, 0x92, 0x39, 0x29, 0xde, 0x58, 0x39, 0x42, 0x8f, 0xfe, 0xb9,
-	0xdb, 0x38, 0x5c, 0x12, 0xfd, 0x06, 0xfa, 0x04, 0x0a, 0x64, 0xec, 0x78, 0xf2, 0x2a, 0xdc, 0xbd,
-	0xac, 0x1c, 0x66, 0x0a, 0x05, 0x9b, 0xc6, 0xe8, 0x07, 0xd5, 0x69, 0xdb, 0xab, 0xc4, 0xb0, 0x76,
-	0x49, 0xf1, 0x44, 0xfb, 0x0b, 0xad, 0xd1, 0xd6, 0x65, 0xd2, 0xff, 0xb5, 0x4d, 0x7a, 0xf4, 0x05,
-	0xd4, 0x32, 0x55, 0x1a, 0xd5, 0xa0, 0x3c, 0x7b, 0xf0, 0x56, 0xa0, 0x92, 0x79, 0xeb, 0x78, 0xff,
-	0xfc, 0x52, 0xeb, 0xa8, 0x56, 0x57, 0x1b, 0x74, 0xfa, 0x2f, 0x55, 0x83, 0xf5, 0xcf, 0x07, 0x3d,
-	0x58, 0x23, 0xd1, 0x90, 0x37, 0x01, 0x0e, 0x89, 0x5c, 0xb9, 0xb9, 0xaf, 0x3f, 0x1d, 0x7a, 0x74,
-	0x94, 0x9c, 0xb2, 0xe2, 0xb6, 0x97, 0x62, 0xf2, 0xaf, 0xe7, 0x49, 0xfa, 0x0f, 0xf4, 0xf1, 0xde,
-	0x90, 0x2c, 0xfe, 0x52, 0xe9, 0x4b, 0x7a, 0x4e, 0x2f, 0x9c, 0x96, 0x38, 0xe7, 0xe9, 0x5f, 0x01,
-	0x00, 0x00, 0xff, 0xff, 0xb3, 0x9d, 0x9e, 0x3e, 0x80, 0x0d, 0x00, 0x00,
-}
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/voltha/core.pb.go b/vendor/github.com/opencord/voltha-protos/v4/go/voltha/core.pb.go
deleted file mode 100644
index 64d42d0..0000000
--- a/vendor/github.com/opencord/voltha-protos/v4/go/voltha/core.pb.go
+++ /dev/null
@@ -1,138 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: voltha_protos/core.proto
-
-package voltha
-
-import (
-	fmt "fmt"
-	proto "github.com/golang/protobuf/proto"
-	math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-// Transient State for devices
-type DeviceTransientState_Types int32
-
-const (
-	// The transient state of the device is not set
-	DeviceTransientState_NONE DeviceTransientState_Types = 0
-	// The state of the device in core is any state, i.e DELETING, DELETED, DELETE_FAILED, NONE.
-	// This state is only used for transitions.
-	DeviceTransientState_ANY DeviceTransientState_Types = 1
-	// The device is in FORCE_DELETING state
-	DeviceTransientState_FORCE_DELETING DeviceTransientState_Types = 2
-	// The device is getting deleted from adapter state
-	DeviceTransientState_DELETING_FROM_ADAPTER DeviceTransientState_Types = 3
-	// The device is deleted from adapter and is getting deleted in core.
-	DeviceTransientState_DELETING_POST_ADAPTER_RESPONSE DeviceTransientState_Types = 4
-	// State to represent that the device deletion is failed
-	DeviceTransientState_DELETE_FAILED DeviceTransientState_Types = 5
-	// State to represent that reconcile is in progress
-	DeviceTransientState_RECONCILE_IN_PROGRESS DeviceTransientState_Types = 6
-)
-
-var DeviceTransientState_Types_name = map[int32]string{
-	0: "NONE",
-	1: "ANY",
-	2: "FORCE_DELETING",
-	3: "DELETING_FROM_ADAPTER",
-	4: "DELETING_POST_ADAPTER_RESPONSE",
-	5: "DELETE_FAILED",
-	6: "RECONCILE_IN_PROGRESS",
-}
-
-var DeviceTransientState_Types_value = map[string]int32{
-	"NONE":                           0,
-	"ANY":                            1,
-	"FORCE_DELETING":                 2,
-	"DELETING_FROM_ADAPTER":          3,
-	"DELETING_POST_ADAPTER_RESPONSE": 4,
-	"DELETE_FAILED":                  5,
-	"RECONCILE_IN_PROGRESS":          6,
-}
-
-func (x DeviceTransientState_Types) String() string {
-	return proto.EnumName(DeviceTransientState_Types_name, int32(x))
-}
-
-func (DeviceTransientState_Types) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_39634f15fb8a505e, []int{0, 0}
-}
-
-type DeviceTransientState struct {
-	TransientState       DeviceTransientState_Types `protobuf:"varint,1,opt,name=transient_state,json=transientState,proto3,enum=voltha.DeviceTransientState_Types" json:"transient_state,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}                   `json:"-"`
-	XXX_unrecognized     []byte                     `json:"-"`
-	XXX_sizecache        int32                      `json:"-"`
-}
-
-func (m *DeviceTransientState) Reset()         { *m = DeviceTransientState{} }
-func (m *DeviceTransientState) String() string { return proto.CompactTextString(m) }
-func (*DeviceTransientState) ProtoMessage()    {}
-func (*DeviceTransientState) Descriptor() ([]byte, []int) {
-	return fileDescriptor_39634f15fb8a505e, []int{0}
-}
-
-func (m *DeviceTransientState) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_DeviceTransientState.Unmarshal(m, b)
-}
-func (m *DeviceTransientState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_DeviceTransientState.Marshal(b, m, deterministic)
-}
-func (m *DeviceTransientState) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_DeviceTransientState.Merge(m, src)
-}
-func (m *DeviceTransientState) XXX_Size() int {
-	return xxx_messageInfo_DeviceTransientState.Size(m)
-}
-func (m *DeviceTransientState) XXX_DiscardUnknown() {
-	xxx_messageInfo_DeviceTransientState.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceTransientState proto.InternalMessageInfo
-
-func (m *DeviceTransientState) GetTransientState() DeviceTransientState_Types {
-	if m != nil {
-		return m.TransientState
-	}
-	return DeviceTransientState_NONE
-}
-
-func init() {
-	proto.RegisterEnum("voltha.DeviceTransientState_Types", DeviceTransientState_Types_name, DeviceTransientState_Types_value)
-	proto.RegisterType((*DeviceTransientState)(nil), "voltha.DeviceTransientState")
-}
-
-func init() { proto.RegisterFile("voltha_protos/core.proto", fileDescriptor_39634f15fb8a505e) }
-
-var fileDescriptor_39634f15fb8a505e = []byte{
-	// 284 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4d, 0x4b, 0xf4, 0x30,
-	0x10, 0xc7, 0x9f, 0xee, 0xdb, 0x23, 0x01, 0xd7, 0x18, 0x15, 0xd6, 0x8b, 0x48, 0x4f, 0x5e, 0x4c,
-	0x41, 0xfd, 0x02, 0x75, 0x3b, 0xbb, 0x14, 0xd7, 0xa4, 0x24, 0xbd, 0xe8, 0x25, 0x74, 0x6b, 0xe8,
-	0x16, 0xb4, 0x29, 0x6d, 0x2c, 0x78, 0xf4, 0x73, 0xf8, 0x65, 0x65, 0xfb, 0x22, 0x08, 0xde, 0x66,
-	0x7e, 0xbf, 0xf9, 0x0f, 0xcc, 0xa0, 0x45, 0x63, 0x5e, 0xed, 0x2e, 0x51, 0x65, 0x65, 0xac, 0xa9,
-	0xbd, 0xd4, 0x54, 0x9a, 0xb6, 0x35, 0x99, 0x75, 0xc6, 0xfd, 0x1c, 0xa1, 0xd3, 0x40, 0x37, 0x79,
-	0xaa, 0xe3, 0x2a, 0x29, 0xea, 0x5c, 0x17, 0x56, 0xda, 0xc4, 0x6a, 0xf2, 0x80, 0x8e, 0xec, 0x40,
-	0x54, 0xbd, 0x47, 0x0b, 0xe7, 0xd2, 0xb9, 0x9a, 0xdf, 0xb8, 0xb4, 0x8b, 0xd2, 0xbf, 0x62, 0x34,
-	0xfe, 0x28, 0x75, 0x2d, 0xe6, 0xf6, 0x17, 0x75, 0xbf, 0x1c, 0x34, 0x6d, 0x0d, 0x39, 0x40, 0x13,
-	0xc6, 0x19, 0xe0, 0x7f, 0xe4, 0x3f, 0x1a, 0xfb, 0xec, 0x09, 0x3b, 0x84, 0xa0, 0xf9, 0x8a, 0x8b,
-	0x25, 0xa8, 0x00, 0x36, 0x10, 0x87, 0x6c, 0x8d, 0x47, 0xe4, 0x1c, 0x9d, 0x0d, 0x9d, 0x5a, 0x09,
-	0xfe, 0xa8, 0xfc, 0xc0, 0x8f, 0x62, 0x10, 0x78, 0x4c, 0x5c, 0x74, 0xf1, 0xa3, 0x22, 0x2e, 0xe3,
-	0x41, 0x29, 0x01, 0x32, 0xe2, 0x4c, 0x02, 0x9e, 0x90, 0x63, 0x74, 0xd8, 0xce, 0x80, 0x5a, 0xf9,
-	0xe1, 0x06, 0x02, 0x3c, 0xdd, 0x6f, 0x14, 0xb0, 0xe4, 0x6c, 0x19, 0x6e, 0x40, 0x85, 0x4c, 0x45,
-	0x82, 0xaf, 0x05, 0x48, 0x89, 0x67, 0xf7, 0x80, 0x4e, 0x4c, 0x95, 0x51, 0x53, 0xea, 0x22, 0x35,
-	0xd5, 0x4b, 0x7f, 0xdf, 0x33, 0xcd, 0x72, 0xbb, 0x7b, 0xdf, 0xd2, 0xd4, 0xbc, 0x79, 0x83, 0xf3,
-	0x3a, 0x77, 0xdd, 0x3f, 0xb4, 0xb9, 0xf3, 0x32, 0xd3, 0xb3, 0xed, 0xac, 0x85, 0xb7, 0xdf, 0x01,
-	0x00, 0x00, 0xff, 0xff, 0x1a, 0x0c, 0x99, 0x87, 0x75, 0x01, 0x00, 0x00,
-}
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/voltha/ponsim.pb.go b/vendor/github.com/opencord/voltha-protos/v4/go/voltha/ponsim.pb.go
deleted file mode 100644
index e2abe7f..0000000
--- a/vendor/github.com/opencord/voltha-protos/v4/go/voltha/ponsim.pb.go
+++ /dev/null
@@ -1,708 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: voltha_protos/ponsim.proto
-
-package voltha
-
-import (
-	context "context"
-	fmt "fmt"
-	proto "github.com/golang/protobuf/proto"
-	empty "github.com/golang/protobuf/ptypes/empty"
-	openflow_13 "github.com/opencord/voltha-protos/v4/go/openflow_13"
-	grpc "google.golang.org/grpc"
-	codes "google.golang.org/grpc/codes"
-	status "google.golang.org/grpc/status"
-	math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-type PonSimOnuDeviceInfo struct {
-	UniPort              int32    `protobuf:"varint,1,opt,name=uni_port,json=uniPort,proto3" json:"uni_port,omitempty"`
-	SerialNumber         string   `protobuf:"bytes,2,opt,name=serial_number,json=serialNumber,proto3" json:"serial_number,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *PonSimOnuDeviceInfo) Reset()         { *m = PonSimOnuDeviceInfo{} }
-func (m *PonSimOnuDeviceInfo) String() string { return proto.CompactTextString(m) }
-func (*PonSimOnuDeviceInfo) ProtoMessage()    {}
-func (*PonSimOnuDeviceInfo) Descriptor() ([]byte, []int) {
-	return fileDescriptor_352253851b8ea7c0, []int{0}
-}
-
-func (m *PonSimOnuDeviceInfo) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_PonSimOnuDeviceInfo.Unmarshal(m, b)
-}
-func (m *PonSimOnuDeviceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_PonSimOnuDeviceInfo.Marshal(b, m, deterministic)
-}
-func (m *PonSimOnuDeviceInfo) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_PonSimOnuDeviceInfo.Merge(m, src)
-}
-func (m *PonSimOnuDeviceInfo) XXX_Size() int {
-	return xxx_messageInfo_PonSimOnuDeviceInfo.Size(m)
-}
-func (m *PonSimOnuDeviceInfo) XXX_DiscardUnknown() {
-	xxx_messageInfo_PonSimOnuDeviceInfo.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PonSimOnuDeviceInfo proto.InternalMessageInfo
-
-func (m *PonSimOnuDeviceInfo) GetUniPort() int32 {
-	if m != nil {
-		return m.UniPort
-	}
-	return 0
-}
-
-func (m *PonSimOnuDeviceInfo) GetSerialNumber() string {
-	if m != nil {
-		return m.SerialNumber
-	}
-	return ""
-}
-
-type PonSimDeviceInfo struct {
-	NniPort              int32                  `protobuf:"varint,1,opt,name=nni_port,json=nniPort,proto3" json:"nni_port,omitempty"`
-	Onus                 []*PonSimOnuDeviceInfo `protobuf:"bytes,2,rep,name=onus,proto3" json:"onus,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
-	XXX_unrecognized     []byte                 `json:"-"`
-	XXX_sizecache        int32                  `json:"-"`
-}
-
-func (m *PonSimDeviceInfo) Reset()         { *m = PonSimDeviceInfo{} }
-func (m *PonSimDeviceInfo) String() string { return proto.CompactTextString(m) }
-func (*PonSimDeviceInfo) ProtoMessage()    {}
-func (*PonSimDeviceInfo) Descriptor() ([]byte, []int) {
-	return fileDescriptor_352253851b8ea7c0, []int{1}
-}
-
-func (m *PonSimDeviceInfo) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_PonSimDeviceInfo.Unmarshal(m, b)
-}
-func (m *PonSimDeviceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_PonSimDeviceInfo.Marshal(b, m, deterministic)
-}
-func (m *PonSimDeviceInfo) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_PonSimDeviceInfo.Merge(m, src)
-}
-func (m *PonSimDeviceInfo) XXX_Size() int {
-	return xxx_messageInfo_PonSimDeviceInfo.Size(m)
-}
-func (m *PonSimDeviceInfo) XXX_DiscardUnknown() {
-	xxx_messageInfo_PonSimDeviceInfo.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PonSimDeviceInfo proto.InternalMessageInfo
-
-func (m *PonSimDeviceInfo) GetNniPort() int32 {
-	if m != nil {
-		return m.NniPort
-	}
-	return 0
-}
-
-func (m *PonSimDeviceInfo) GetOnus() []*PonSimOnuDeviceInfo {
-	if m != nil {
-		return m.Onus
-	}
-	return nil
-}
-
-type FlowTable struct {
-	Port                 int32                       `protobuf:"varint,1,opt,name=port,proto3" json:"port,omitempty"`
-	Flows                []*openflow_13.OfpFlowStats `protobuf:"bytes,2,rep,name=flows,proto3" json:"flows,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}                    `json:"-"`
-	XXX_unrecognized     []byte                      `json:"-"`
-	XXX_sizecache        int32                       `json:"-"`
-}
-
-func (m *FlowTable) Reset()         { *m = FlowTable{} }
-func (m *FlowTable) String() string { return proto.CompactTextString(m) }
-func (*FlowTable) ProtoMessage()    {}
-func (*FlowTable) Descriptor() ([]byte, []int) {
-	return fileDescriptor_352253851b8ea7c0, []int{2}
-}
-
-func (m *FlowTable) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_FlowTable.Unmarshal(m, b)
-}
-func (m *FlowTable) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_FlowTable.Marshal(b, m, deterministic)
-}
-func (m *FlowTable) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_FlowTable.Merge(m, src)
-}
-func (m *FlowTable) XXX_Size() int {
-	return xxx_messageInfo_FlowTable.Size(m)
-}
-func (m *FlowTable) XXX_DiscardUnknown() {
-	xxx_messageInfo_FlowTable.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FlowTable proto.InternalMessageInfo
-
-func (m *FlowTable) GetPort() int32 {
-	if m != nil {
-		return m.Port
-	}
-	return 0
-}
-
-func (m *FlowTable) GetFlows() []*openflow_13.OfpFlowStats {
-	if m != nil {
-		return m.Flows
-	}
-	return nil
-}
-
-type PonSimFrame struct {
-	Id                   string   `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
-	Payload              []byte   `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"`
-	OutPort              int32    `protobuf:"varint,3,opt,name=out_port,json=outPort,proto3" json:"out_port,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *PonSimFrame) Reset()         { *m = PonSimFrame{} }
-func (m *PonSimFrame) String() string { return proto.CompactTextString(m) }
-func (*PonSimFrame) ProtoMessage()    {}
-func (*PonSimFrame) Descriptor() ([]byte, []int) {
-	return fileDescriptor_352253851b8ea7c0, []int{3}
-}
-
-func (m *PonSimFrame) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_PonSimFrame.Unmarshal(m, b)
-}
-func (m *PonSimFrame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_PonSimFrame.Marshal(b, m, deterministic)
-}
-func (m *PonSimFrame) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_PonSimFrame.Merge(m, src)
-}
-func (m *PonSimFrame) XXX_Size() int {
-	return xxx_messageInfo_PonSimFrame.Size(m)
-}
-func (m *PonSimFrame) XXX_DiscardUnknown() {
-	xxx_messageInfo_PonSimFrame.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PonSimFrame proto.InternalMessageInfo
-
-func (m *PonSimFrame) GetId() string {
-	if m != nil {
-		return m.Id
-	}
-	return ""
-}
-
-func (m *PonSimFrame) GetPayload() []byte {
-	if m != nil {
-		return m.Payload
-	}
-	return nil
-}
-
-func (m *PonSimFrame) GetOutPort() int32 {
-	if m != nil {
-		return m.OutPort
-	}
-	return 0
-}
-
-type PonSimPacketCounter struct {
-	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	Value                int64    `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *PonSimPacketCounter) Reset()         { *m = PonSimPacketCounter{} }
-func (m *PonSimPacketCounter) String() string { return proto.CompactTextString(m) }
-func (*PonSimPacketCounter) ProtoMessage()    {}
-func (*PonSimPacketCounter) Descriptor() ([]byte, []int) {
-	return fileDescriptor_352253851b8ea7c0, []int{4}
-}
-
-func (m *PonSimPacketCounter) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_PonSimPacketCounter.Unmarshal(m, b)
-}
-func (m *PonSimPacketCounter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_PonSimPacketCounter.Marshal(b, m, deterministic)
-}
-func (m *PonSimPacketCounter) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_PonSimPacketCounter.Merge(m, src)
-}
-func (m *PonSimPacketCounter) XXX_Size() int {
-	return xxx_messageInfo_PonSimPacketCounter.Size(m)
-}
-func (m *PonSimPacketCounter) XXX_DiscardUnknown() {
-	xxx_messageInfo_PonSimPacketCounter.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PonSimPacketCounter proto.InternalMessageInfo
-
-func (m *PonSimPacketCounter) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *PonSimPacketCounter) GetValue() int64 {
-	if m != nil {
-		return m.Value
-	}
-	return 0
-}
-
-type PonSimPortMetrics struct {
-	PortName             string                 `protobuf:"bytes,1,opt,name=port_name,json=portName,proto3" json:"port_name,omitempty"`
-	Packets              []*PonSimPacketCounter `protobuf:"bytes,2,rep,name=packets,proto3" json:"packets,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
-	XXX_unrecognized     []byte                 `json:"-"`
-	XXX_sizecache        int32                  `json:"-"`
-}
-
-func (m *PonSimPortMetrics) Reset()         { *m = PonSimPortMetrics{} }
-func (m *PonSimPortMetrics) String() string { return proto.CompactTextString(m) }
-func (*PonSimPortMetrics) ProtoMessage()    {}
-func (*PonSimPortMetrics) Descriptor() ([]byte, []int) {
-	return fileDescriptor_352253851b8ea7c0, []int{5}
-}
-
-func (m *PonSimPortMetrics) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_PonSimPortMetrics.Unmarshal(m, b)
-}
-func (m *PonSimPortMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_PonSimPortMetrics.Marshal(b, m, deterministic)
-}
-func (m *PonSimPortMetrics) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_PonSimPortMetrics.Merge(m, src)
-}
-func (m *PonSimPortMetrics) XXX_Size() int {
-	return xxx_messageInfo_PonSimPortMetrics.Size(m)
-}
-func (m *PonSimPortMetrics) XXX_DiscardUnknown() {
-	xxx_messageInfo_PonSimPortMetrics.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PonSimPortMetrics proto.InternalMessageInfo
-
-func (m *PonSimPortMetrics) GetPortName() string {
-	if m != nil {
-		return m.PortName
-	}
-	return ""
-}
-
-func (m *PonSimPortMetrics) GetPackets() []*PonSimPacketCounter {
-	if m != nil {
-		return m.Packets
-	}
-	return nil
-}
-
-type PonSimMetrics struct {
-	Device               string               `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"`
-	Metrics              []*PonSimPortMetrics `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
-	XXX_unrecognized     []byte               `json:"-"`
-	XXX_sizecache        int32                `json:"-"`
-}
-
-func (m *PonSimMetrics) Reset()         { *m = PonSimMetrics{} }
-func (m *PonSimMetrics) String() string { return proto.CompactTextString(m) }
-func (*PonSimMetrics) ProtoMessage()    {}
-func (*PonSimMetrics) Descriptor() ([]byte, []int) {
-	return fileDescriptor_352253851b8ea7c0, []int{6}
-}
-
-func (m *PonSimMetrics) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_PonSimMetrics.Unmarshal(m, b)
-}
-func (m *PonSimMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_PonSimMetrics.Marshal(b, m, deterministic)
-}
-func (m *PonSimMetrics) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_PonSimMetrics.Merge(m, src)
-}
-func (m *PonSimMetrics) XXX_Size() int {
-	return xxx_messageInfo_PonSimMetrics.Size(m)
-}
-func (m *PonSimMetrics) XXX_DiscardUnknown() {
-	xxx_messageInfo_PonSimMetrics.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PonSimMetrics proto.InternalMessageInfo
-
-func (m *PonSimMetrics) GetDevice() string {
-	if m != nil {
-		return m.Device
-	}
-	return ""
-}
-
-func (m *PonSimMetrics) GetMetrics() []*PonSimPortMetrics {
-	if m != nil {
-		return m.Metrics
-	}
-	return nil
-}
-
-type PonSimMetricsRequest struct {
-	Port                 int32    `protobuf:"varint,1,opt,name=port,proto3" json:"port,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *PonSimMetricsRequest) Reset()         { *m = PonSimMetricsRequest{} }
-func (m *PonSimMetricsRequest) String() string { return proto.CompactTextString(m) }
-func (*PonSimMetricsRequest) ProtoMessage()    {}
-func (*PonSimMetricsRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_352253851b8ea7c0, []int{7}
-}
-
-func (m *PonSimMetricsRequest) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_PonSimMetricsRequest.Unmarshal(m, b)
-}
-func (m *PonSimMetricsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_PonSimMetricsRequest.Marshal(b, m, deterministic)
-}
-func (m *PonSimMetricsRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_PonSimMetricsRequest.Merge(m, src)
-}
-func (m *PonSimMetricsRequest) XXX_Size() int {
-	return xxx_messageInfo_PonSimMetricsRequest.Size(m)
-}
-func (m *PonSimMetricsRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_PonSimMetricsRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PonSimMetricsRequest proto.InternalMessageInfo
-
-func (m *PonSimMetricsRequest) GetPort() int32 {
-	if m != nil {
-		return m.Port
-	}
-	return 0
-}
-
-func init() {
-	proto.RegisterType((*PonSimOnuDeviceInfo)(nil), "voltha.PonSimOnuDeviceInfo")
-	proto.RegisterType((*PonSimDeviceInfo)(nil), "voltha.PonSimDeviceInfo")
-	proto.RegisterType((*FlowTable)(nil), "voltha.FlowTable")
-	proto.RegisterType((*PonSimFrame)(nil), "voltha.PonSimFrame")
-	proto.RegisterType((*PonSimPacketCounter)(nil), "voltha.PonSimPacketCounter")
-	proto.RegisterType((*PonSimPortMetrics)(nil), "voltha.PonSimPortMetrics")
-	proto.RegisterType((*PonSimMetrics)(nil), "voltha.PonSimMetrics")
-	proto.RegisterType((*PonSimMetricsRequest)(nil), "voltha.PonSimMetricsRequest")
-}
-
-func init() { proto.RegisterFile("voltha_protos/ponsim.proto", fileDescriptor_352253851b8ea7c0) }
-
-var fileDescriptor_352253851b8ea7c0 = []byte{
-	// 565 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x94, 0x5f, 0x6f, 0xd3, 0x3c,
-	0x14, 0xc6, 0xdb, 0x6e, 0xeb, 0x9f, 0xb3, 0xf5, 0x7d, 0x99, 0x3b, 0xa6, 0xae, 0xbd, 0xa0, 0x32,
-	0x37, 0x15, 0x12, 0x09, 0x5b, 0xe1, 0x06, 0x24, 0x40, 0x8c, 0x6d, 0xe2, 0x82, 0x51, 0xb9, 0xec,
-	0x06, 0x21, 0xa2, 0x34, 0x71, 0xb3, 0x88, 0xc4, 0x27, 0x24, 0x76, 0xa7, 0x7d, 0x43, 0x3e, 0x16,
-	0x8a, 0x9d, 0xd0, 0x66, 0x6a, 0xb9, 0xb3, 0x8f, 0x9f, 0xfe, 0x9e, 0xf3, 0x9c, 0x1c, 0x15, 0x06,
-	0x4b, 0x8c, 0xe4, 0xad, 0xeb, 0x24, 0x29, 0x4a, 0xcc, 0xec, 0x04, 0x45, 0x16, 0xc6, 0x96, 0xbe,
-	0x91, 0xa6, 0x79, 0x1b, 0x0c, 0x03, 0xc4, 0x20, 0xe2, 0xb6, 0xae, 0xce, 0xd5, 0xc2, 0xe6, 0x71,
-	0x22, 0xef, 0x8d, 0x68, 0xf0, 0xa4, 0x0a, 0xc0, 0x84, 0x8b, 0x45, 0x84, 0x77, 0xce, 0xe9, 0xc4,
-	0x08, 0xe8, 0x0d, 0xf4, 0xa6, 0x28, 0x66, 0x61, 0xfc, 0x45, 0xa8, 0x8f, 0x7c, 0x19, 0x7a, 0xfc,
-	0x93, 0x58, 0x20, 0x39, 0x81, 0xb6, 0x12, 0xa1, 0x93, 0x60, 0x2a, 0xfb, 0xf5, 0x51, 0x7d, 0xbc,
-	0xc7, 0x5a, 0x4a, 0x84, 0x53, 0x4c, 0x25, 0x79, 0x0a, 0xdd, 0x8c, 0xa7, 0xa1, 0x1b, 0x39, 0x42,
-	0xc5, 0x73, 0x9e, 0xf6, 0x1b, 0xa3, 0xfa, 0xb8, 0xc3, 0x0e, 0x4c, 0xf1, 0x5a, 0xd7, 0xe8, 0x0f,
-	0x78, 0x64, 0xb0, 0x55, 0xa6, 0x78, 0xc0, 0x14, 0x05, 0xd3, 0x86, 0x5d, 0x14, 0x2a, 0xeb, 0x37,
-	0x46, 0x3b, 0xe3, 0xfd, 0xb3, 0xa1, 0x65, 0xba, 0xb6, 0x36, 0x74, 0xc6, 0xb4, 0x90, 0x32, 0xe8,
-	0x5c, 0x46, 0x78, 0xf7, 0xd5, 0x9d, 0x47, 0x9c, 0x10, 0xd8, 0x5d, 0x83, 0xea, 0x33, 0x39, 0x85,
-	0xbd, 0x3c, 0xe8, 0x0a, 0xb9, 0x1e, 0x1d, 0x17, 0x89, 0xa3, 0xcf, 0x99, 0x74, 0x65, 0xc6, 0x8c,
-	0x92, 0x32, 0xd8, 0x37, 0x86, 0x97, 0xa9, 0x1b, 0x73, 0xf2, 0x1f, 0x34, 0x42, 0x5f, 0x33, 0x3b,
-	0xac, 0x11, 0xfa, 0xa4, 0x0f, 0xad, 0xc4, 0xbd, 0x8f, 0xd0, 0xf5, 0x75, 0xe2, 0x03, 0x56, 0x5e,
-	0xf3, 0x60, 0xa8, 0xa4, 0x09, 0xb6, 0x63, 0x82, 0xa1, 0x92, 0x79, 0x30, 0xfa, 0xae, 0x1c, 0xef,
-	0xd4, 0xf5, 0x7e, 0x72, 0x79, 0x8e, 0x4a, 0x48, 0x9e, 0xe6, 0x1d, 0x0b, 0x37, 0xe6, 0x05, 0x5d,
-	0x9f, 0xc9, 0x11, 0xec, 0x2d, 0xdd, 0x48, 0x71, 0x4d, 0xdf, 0x61, 0xe6, 0x42, 0x03, 0x38, 0x2c,
-	0x00, 0x98, 0xca, 0xcf, 0x5c, 0xa6, 0xa1, 0x97, 0x91, 0x21, 0x74, 0x72, 0x33, 0x67, 0x8d, 0xd1,
-	0xce, 0x0b, 0xd7, 0x39, 0xe7, 0x55, 0xde, 0x67, 0x6e, 0xb6, 0x65, 0x9c, 0x95, 0x4e, 0x58, 0xa9,
-	0xa5, 0xdf, 0xa1, 0x6b, 0xde, 0x4b, 0x93, 0x63, 0x68, 0xfa, 0x7a, 0xec, 0x85, 0x43, 0x71, 0x23,
-	0x13, 0x68, 0xc5, 0x46, 0x52, 0xf0, 0x4f, 0x1e, 0xf0, 0x57, 0x8d, 0xb2, 0x52, 0x49, 0x9f, 0xc1,
-	0x51, 0x85, 0xce, 0xf8, 0x2f, 0xc5, 0x33, 0xb9, 0xe9, 0xd3, 0x9d, 0xfd, 0x6e, 0x40, 0xd3, 0x88,
-	0xc9, 0x6b, 0xe8, 0xcc, 0xb8, 0xf0, 0xcd, 0x07, 0xe9, 0x55, 0x7d, 0x74, 0x71, 0x70, 0x6c, 0x99,
-	0xf5, 0xb7, 0xca, 0xf5, 0xb7, 0x2e, 0xf2, 0xf5, 0xa7, 0x35, 0xf2, 0x1e, 0xba, 0x8c, 0x7b, 0x3c,
-	0x5c, 0x72, 0xad, 0xcc, 0xc8, 0x16, 0xe9, 0x60, 0x13, 0x97, 0xd6, 0x5e, 0xd4, 0xc9, 0x39, 0x74,
-	0xaf, 0xb8, 0x5c, 0xdb, 0xe0, 0x6d, 0x84, 0x7e, 0x95, 0xb0, 0xfa, 0x05, 0xad, 0x91, 0xb7, 0xf0,
-	0xff, 0x4d, 0xe2, 0xbb, 0x92, 0xaf, 0xf6, 0xf5, 0xb0, 0x94, 0xff, 0x2d, 0xfd, 0x23, 0xc6, 0x1b,
-	0x68, 0x5f, 0x71, 0x39, 0xcb, 0x17, 0x75, 0xab, 0xff, 0xe3, 0xaa, 0x7f, 0x31, 0x63, 0x5a, 0xfb,
-	0x70, 0x01, 0x3d, 0x4c, 0x03, 0xbd, 0xfb, 0x1e, 0xa6, 0x7e, 0x21, 0xfb, 0x66, 0x05, 0xa1, 0xbc,
-	0x55, 0x73, 0xcb, 0xc3, 0xd8, 0x2e, 0xdf, 0x6c, 0xf3, 0xf6, 0xbc, 0xf8, 0xa7, 0x58, 0xbe, 0xb4,
-	0x03, 0x2c, 0x6a, 0xf3, 0xa6, 0x2e, 0x4e, 0xfe, 0x04, 0x00, 0x00, 0xff, 0xff, 0xf9, 0xf9, 0xa0,
-	0x81, 0x8f, 0x04, 0x00, 0x00,
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// PonSimClient is the client API for PonSim service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type PonSimClient interface {
-	SendFrame(ctx context.Context, in *PonSimFrame, opts ...grpc.CallOption) (*empty.Empty, error)
-	ReceiveFrames(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (PonSim_ReceiveFramesClient, error)
-	GetDeviceInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*PonSimDeviceInfo, error)
-	UpdateFlowTable(ctx context.Context, in *FlowTable, opts ...grpc.CallOption) (*empty.Empty, error)
-	GetStats(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*PonSimMetrics, error)
-}
-
-type ponSimClient struct {
-	cc *grpc.ClientConn
-}
-
-func NewPonSimClient(cc *grpc.ClientConn) PonSimClient {
-	return &ponSimClient{cc}
-}
-
-func (c *ponSimClient) SendFrame(ctx context.Context, in *PonSimFrame, opts ...grpc.CallOption) (*empty.Empty, error) {
-	out := new(empty.Empty)
-	err := c.cc.Invoke(ctx, "/voltha.PonSim/SendFrame", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *ponSimClient) ReceiveFrames(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (PonSim_ReceiveFramesClient, error) {
-	stream, err := c.cc.NewStream(ctx, &_PonSim_serviceDesc.Streams[0], "/voltha.PonSim/ReceiveFrames", opts...)
-	if err != nil {
-		return nil, err
-	}
-	x := &ponSimReceiveFramesClient{stream}
-	if err := x.ClientStream.SendMsg(in); err != nil {
-		return nil, err
-	}
-	if err := x.ClientStream.CloseSend(); err != nil {
-		return nil, err
-	}
-	return x, nil
-}
-
-type PonSim_ReceiveFramesClient interface {
-	Recv() (*PonSimFrame, error)
-	grpc.ClientStream
-}
-
-type ponSimReceiveFramesClient struct {
-	grpc.ClientStream
-}
-
-func (x *ponSimReceiveFramesClient) Recv() (*PonSimFrame, error) {
-	m := new(PonSimFrame)
-	if err := x.ClientStream.RecvMsg(m); err != nil {
-		return nil, err
-	}
-	return m, nil
-}
-
-func (c *ponSimClient) GetDeviceInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*PonSimDeviceInfo, error) {
-	out := new(PonSimDeviceInfo)
-	err := c.cc.Invoke(ctx, "/voltha.PonSim/GetDeviceInfo", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *ponSimClient) UpdateFlowTable(ctx context.Context, in *FlowTable, opts ...grpc.CallOption) (*empty.Empty, error) {
-	out := new(empty.Empty)
-	err := c.cc.Invoke(ctx, "/voltha.PonSim/UpdateFlowTable", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *ponSimClient) GetStats(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*PonSimMetrics, error) {
-	out := new(PonSimMetrics)
-	err := c.cc.Invoke(ctx, "/voltha.PonSim/GetStats", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-// PonSimServer is the server API for PonSim service.
-type PonSimServer interface {
-	SendFrame(context.Context, *PonSimFrame) (*empty.Empty, error)
-	ReceiveFrames(*empty.Empty, PonSim_ReceiveFramesServer) error
-	GetDeviceInfo(context.Context, *empty.Empty) (*PonSimDeviceInfo, error)
-	UpdateFlowTable(context.Context, *FlowTable) (*empty.Empty, error)
-	GetStats(context.Context, *empty.Empty) (*PonSimMetrics, error)
-}
-
-// UnimplementedPonSimServer can be embedded to have forward compatible implementations.
-type UnimplementedPonSimServer struct {
-}
-
-func (*UnimplementedPonSimServer) SendFrame(ctx context.Context, req *PonSimFrame) (*empty.Empty, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method SendFrame not implemented")
-}
-func (*UnimplementedPonSimServer) ReceiveFrames(req *empty.Empty, srv PonSim_ReceiveFramesServer) error {
-	return status.Errorf(codes.Unimplemented, "method ReceiveFrames not implemented")
-}
-func (*UnimplementedPonSimServer) GetDeviceInfo(ctx context.Context, req *empty.Empty) (*PonSimDeviceInfo, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method GetDeviceInfo not implemented")
-}
-func (*UnimplementedPonSimServer) UpdateFlowTable(ctx context.Context, req *FlowTable) (*empty.Empty, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method UpdateFlowTable not implemented")
-}
-func (*UnimplementedPonSimServer) GetStats(ctx context.Context, req *empty.Empty) (*PonSimMetrics, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method GetStats not implemented")
-}
-
-func RegisterPonSimServer(s *grpc.Server, srv PonSimServer) {
-	s.RegisterService(&_PonSim_serviceDesc, srv)
-}
-
-func _PonSim_SendFrame_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(PonSimFrame)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(PonSimServer).SendFrame(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/voltha.PonSim/SendFrame",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(PonSimServer).SendFrame(ctx, req.(*PonSimFrame))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _PonSim_ReceiveFrames_Handler(srv interface{}, stream grpc.ServerStream) error {
-	m := new(empty.Empty)
-	if err := stream.RecvMsg(m); err != nil {
-		return err
-	}
-	return srv.(PonSimServer).ReceiveFrames(m, &ponSimReceiveFramesServer{stream})
-}
-
-type PonSim_ReceiveFramesServer interface {
-	Send(*PonSimFrame) error
-	grpc.ServerStream
-}
-
-type ponSimReceiveFramesServer struct {
-	grpc.ServerStream
-}
-
-func (x *ponSimReceiveFramesServer) Send(m *PonSimFrame) error {
-	return x.ServerStream.SendMsg(m)
-}
-
-func _PonSim_GetDeviceInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(empty.Empty)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(PonSimServer).GetDeviceInfo(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/voltha.PonSim/GetDeviceInfo",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(PonSimServer).GetDeviceInfo(ctx, req.(*empty.Empty))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _PonSim_UpdateFlowTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(FlowTable)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(PonSimServer).UpdateFlowTable(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/voltha.PonSim/UpdateFlowTable",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(PonSimServer).UpdateFlowTable(ctx, req.(*FlowTable))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _PonSim_GetStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(empty.Empty)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(PonSimServer).GetStats(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/voltha.PonSim/GetStats",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(PonSimServer).GetStats(ctx, req.(*empty.Empty))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-var _PonSim_serviceDesc = grpc.ServiceDesc{
-	ServiceName: "voltha.PonSim",
-	HandlerType: (*PonSimServer)(nil),
-	Methods: []grpc.MethodDesc{
-		{
-			MethodName: "SendFrame",
-			Handler:    _PonSim_SendFrame_Handler,
-		},
-		{
-			MethodName: "GetDeviceInfo",
-			Handler:    _PonSim_GetDeviceInfo_Handler,
-		},
-		{
-			MethodName: "UpdateFlowTable",
-			Handler:    _PonSim_UpdateFlowTable_Handler,
-		},
-		{
-			MethodName: "GetStats",
-			Handler:    _PonSim_GetStats_Handler,
-		},
-	},
-	Streams: []grpc.StreamDesc{
-		{
-			StreamName:    "ReceiveFrames",
-			Handler:       _PonSim_ReceiveFrames_Handler,
-			ServerStreams: true,
-		},
-	},
-	Metadata: "voltha_protos/ponsim.proto",
-}
diff --git a/vendor/github.com/opencord/voltha-protos/v5/go/adapter_services/adapter_services.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/adapter_services/adapter_services.pb.go
new file mode 100644
index 0000000..3f2ca0c
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/adapter_services/adapter_services.pb.go
@@ -0,0 +1,1832 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/adapter_services.proto
+
+package adapter_services
+
+import (
+	context "context"
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	empty "github.com/golang/protobuf/ptypes/empty"
+	common "github.com/opencord/voltha-protos/v5/go/common"
+	extension "github.com/opencord/voltha-protos/v5/go/extension"
+	inter_container "github.com/opencord/voltha-protos/v5/go/inter_container"
+	_ "github.com/opencord/voltha-protos/v5/go/openolt"
+	voltha "github.com/opencord/voltha-protos/v5/go/voltha"
+	grpc "google.golang.org/grpc"
+	codes "google.golang.org/grpc/codes"
+	status "google.golang.org/grpc/status"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+func init() {
+	proto.RegisterFile("voltha_protos/adapter_services.proto", fileDescriptor_5826bd5d7bb77df1)
+}
+
+var fileDescriptor_5826bd5d7bb77df1 = []byte{
+	// 965 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x97, 0x4b, 0x6f, 0xe3, 0x36,
+	0x10, 0xc7, 0x81, 0x16, 0xd8, 0xc3, 0x24, 0x76, 0x12, 0xe5, 0x81, 0x85, 0xb2, 0x2d, 0xfa, 0xba,
+	0xf4, 0xb0, 0x0a, 0x90, 0xc5, 0x3e, 0x5a, 0xa4, 0x0d, 0x1c, 0xdb, 0xeb, 0x75, 0x9b, 0xc0, 0xa9,
+	0x95, 0xec, 0xa1, 0x97, 0x05, 0x2d, 0x8d, 0x65, 0x62, 0x29, 0x52, 0x15, 0x47, 0x79, 0x9c, 0xfb,
+	0x75, 0xfa, 0x11, 0xfa, 0xe1, 0x0a, 0xea, 0x65, 0x29, 0xb1, 0xd7, 0xb1, 0xd3, 0x3d, 0xf2, 0x3f,
+	0x33, 0x3f, 0x52, 0x33, 0x43, 0x8a, 0x84, 0x1f, 0xae, 0x94, 0xa0, 0x09, 0xfb, 0x10, 0xc5, 0x8a,
+	0x94, 0x3e, 0x60, 0x3e, 0x8b, 0x08, 0xe3, 0x0f, 0x1a, 0xe3, 0x2b, 0xee, 0xa1, 0x76, 0x52, 0xdd,
+	0x7a, 0x92, 0x79, 0xd9, 0xfb, 0x81, 0x52, 0x81, 0xc0, 0x83, 0x54, 0x1d, 0x25, 0xe3, 0x03, 0x0c,
+	0x23, 0xba, 0xcd, 0x9c, 0x6c, 0xbb, 0x8e, 0xf2, 0x54, 0x18, 0x2a, 0x99, 0xdb, 0xbe, 0xaf, 0xdb,
+	0xb8, 0x34, 0x93, 0x78, 0x4a, 0x12, 0xe3, 0x12, 0xe3, 0xd9, 0x00, 0x1f, 0xcd, 0x12, 0x72, 0xdb,
+	0xd7, 0x75, 0x1b, 0xde, 0x10, 0x4a, 0xcd, 0x95, 0xcc, 0x57, 0x68, 0xef, 0xd7, 0xed, 0x2a, 0x42,
+	0xa9, 0x04, 0xcd, 0x06, 0x4f, 0x90, 0x09, 0x9a, 0xcc, 0xb6, 0x65, 0xa3, 0xcc, 0x76, 0xf8, 0x8f,
+	0x05, 0xcd, 0x56, 0x96, 0x11, 0x37, 0x4b, 0x88, 0x75, 0x0c, 0x1b, 0x3d, 0xa4, 0x77, 0x29, 0xc1,
+	0x25, 0x46, 0x89, 0xb6, 0xf6, 0x9c, 0x2c, 0x2b, 0x4e, 0x91, 0x15, 0xa7, 0x6b, 0xb2, 0x62, 0xef,
+	0x38, 0x39, 0xac, 0xe6, 0xfd, 0x12, 0xd6, 0x5a, 0xbe, 0x8a, 0xa8, 0x93, 0x7e, 0x9d, 0xd5, 0x2c,
+	0x9c, 0xb2, 0xb1, 0x3d, 0x07, 0x66, 0xfd, 0x04, 0x1b, 0x43, 0xf4, 0x94, 0xf4, 0xb8, 0xc0, 0x25,
+	0x43, 0x5f, 0xc1, 0x7a, 0x07, 0x05, 0xd2, 0xb2, 0x71, 0xaf, 0xa1, 0xd1, 0xe1, 0x9a, 0x8d, 0x96,
+	0x9e, 0xf0, 0x0d, 0x34, 0x87, 0xd8, 0x95, 0x2b, 0x44, 0xbe, 0x82, 0xf5, 0x21, 0x8e, 0x94, 0xa2,
+	0xe5, 0x67, 0x74, 0x51, 0x8c, 0x2f, 0x50, 0x2f, 0x1b, 0x79, 0x04, 0x9b, 0x3d, 0xa4, 0xc1, 0x38,
+	0xca, 0xfc, 0xfa, 0x72, 0xac, 0xee, 0xc5, 0x3e, 0x2d, 0xc6, 0xee, 0x35, 0x27, 0x6f, 0xd2, 0x66,
+	0x11, 0x1b, 0x71, 0xc1, 0xb3, 0xaa, 0xb4, 0x27, 0x5c, 0xf8, 0x99, 0xe3, 0xa9, 0xd2, 0xf4, 0xe0,
+	0x89, 0x0f, 0x01, 0xb2, 0x14, 0x9d, 0xab, 0x98, 0xac, 0xf5, 0x22, 0xca, 0x8c, 0xe6, 0xc6, 0xbc,
+	0x80, 0xb5, 0xbc, 0x22, 0x4b, 0x04, 0x1d, 0xc1, 0xc6, 0x65, 0xe4, 0x33, 0xc2, 0xb7, 0x42, 0x5d,
+	0xeb, 0x93, 0x44, 0x7c, 0xb4, 0xb6, 0x8a, 0x40, 0x33, 0x4a, 0xe5, 0xb9, 0xd1, 0xa7, 0xf0, 0xb4,
+	0x12, 0xdd, 0x97, 0x5e, 0x8c, 0x21, 0x4a, 0x62, 0x42, 0xdc, 0x5a, 0x65, 0x5e, 0x2a, 0xf2, 0xa7,
+	0x69, 0x3f, 0x43, 0xc3, 0x45, 0xe9, 0x9f, 0x33, 0xef, 0x23, 0xd2, 0x20, 0xa1, 0xe9, 0x4a, 0x4a,
+	0x69, 0x6e, 0xec, 0x31, 0x34, 0xb3, 0x95, 0x9c, 0x87, 0x6d, 0x25, 0xc7, 0x3c, 0xb0, 0x76, 0xcb,
+	0xe0, 0x5c, 0xd1, 0xa6, 0x7c, 0x73, 0x01, 0x2e, 0x6c, 0x76, 0xd4, 0xb5, 0x14, 0x8a, 0xf9, 0x03,
+	0x99, 0xf4, 0x43, 0x16, 0xa0, 0xf5, 0x5d, 0xbd, 0x5a, 0xa9, 0x58, 0x38, 0x0d, 0xf1, 0xaf, 0x04,
+	0x35, 0xd9, 0xfb, 0x33, 0x7c, 0x86, 0xa8, 0x23, 0x25, 0x35, 0x5a, 0xa7, 0xb0, 0x65, 0xfa, 0x27,
+	0xe7, 0xe5, 0x7b, 0xdc, 0x9e, 0x19, 0xf1, 0x00, 0xda, 0x00, 0x76, 0x5a, 0x23, 0x15, 0x97, 0xbc,
+	0xcb, 0x28, 0x88, 0x99, 0x8f, 0xab, 0x03, 0x9f, 0xc3, 0x7a, 0x65, 0x79, 0xda, 0x02, 0x27, 0x3f,
+	0x96, 0xfb, 0x1d, 0xbb, 0xcc, 0xfd, 0xd4, 0xfc, 0x3b, 0x6c, 0xb6, 0x3c, 0xe2, 0x57, 0x8c, 0xb0,
+	0x4c, 0xd1, 0xca, 0x73, 0xf7, 0xa1, 0xd9, 0x56, 0x61, 0xc8, 0xe9, 0xf1, 0xa8, 0x0e, 0x34, 0x8a,
+	0xaa, 0x64, 0xa4, 0x67, 0x65, 0xeb, 0x55, 0x2b, 0x76, 0x86, 0x5a, 0xb3, 0x00, 0xed, 0xdd, 0x99,
+	0x56, 0xeb, 0x0c, 0xf6, 0x7a, 0x48, 0x35, 0x2d, 0x2f, 0xd8, 0x4a, 0xb8, 0xdf, 0x60, 0xbb, 0xcd,
+	0xa4, 0x87, 0xa2, 0x2e, 0xaf, 0xca, 0x2a, 0x12, 0x9f, 0x17, 0xde, 0x74, 0xfa, 0x6a, 0xac, 0x77,
+	0xb0, 0x35, 0xc4, 0x2b, 0x8c, 0xe9, 0xd1, 0xa4, 0xd7, 0xd0, 0x70, 0x89, 0xc5, 0x34, 0x08, 0x3d,
+	0x6e, 0xce, 0x56, 0x6b, 0xb3, 0x6c, 0x99, 0xb3, 0x76, 0xdf, 0x28, 0xd3, 0x9f, 0x9c, 0x19, 0x55,
+	0xeb, 0xe5, 0xf2, 0x30, 0x11, 0x8c, 0xb0, 0x25, 0x58, 0x1c, 0x4e, 0xa7, 0xaf, 0xc9, 0xd3, 0xe9,
+	0xf3, 0xae, 0x1c, 0x44, 0x18, 0x33, 0xe2, 0x4a, 0x1a, 0x92, 0x75, 0x04, 0x0d, 0x37, 0x89, 0xa2,
+	0x18, 0xb5, 0xee, 0x5e, 0xa1, 0x24, 0x6b, 0xbb, 0xa0, 0xa4, 0xc3, 0xb7, 0x5c, 0x10, 0xc6, 0x73,
+	0xb7, 0xfb, 0xaf, 0xb0, 0x71, 0x29, 0x1f, 0x11, 0x7f, 0x0c, 0x6b, 0x3d, 0xa4, 0xee, 0x0d, 0xbd,
+	0x67, 0x22, 0xa9, 0xf4, 0x6e, 0x45, 0x2c, 0xd6, 0xbf, 0x53, 0xac, 0x7f, 0x88, 0x94, 0xc4, 0x32,
+	0xb5, 0x69, 0xab, 0x05, 0x6b, 0xee, 0x2c, 0x80, 0x7b, 0x1f, 0x30, 0x6f, 0x0d, 0x97, 0xd0, 0xec,
+	0x21, 0xb9, 0x5c, 0x06, 0x02, 0x33, 0xca, 0x37, 0x4e, 0x79, 0xf5, 0x71, 0x32, 0xbd, 0x87, 0x19,
+	0xab, 0xd8, 0x48, 0xdf, 0x7e, 0xc2, 0x23, 0x2f, 0xcf, 0xa5, 0xf9, 0x5d, 0x2e, 0xc0, 0xba, 0x0b,
+	0xb1, 0xee, 0x1d, 0xec, 0xe1, 0xbf, 0x5f, 0xc2, 0x9e, 0xd9, 0xeb, 0xe6, 0x72, 0xf7, 0x7f, 0x5f,
+	0x9b, 0xba, 0xd0, 0x48, 0xd1, 0x3e, 0xf7, 0xd2, 0x06, 0x99, 0x76, 0x54, 0x4d, 0x5e, 0x94, 0xd0,
+	0x5f, 0xa0, 0x69, 0x9a, 0xb9, 0xc2, 0x29, 0x7b, 0xc2, 0xe8, 0x8b, 0xc2, 0xff, 0x80, 0xed, 0x62,
+	0x73, 0x5c, 0xa0, 0x37, 0x39, 0x8f, 0xd5, 0x98, 0x8b, 0xca, 0x5f, 0xa4, 0x22, 0xde, 0xdd, 0x62,
+	0xf3, 0x90, 0x5d, 0x68, 0x64, 0xb7, 0xb3, 0x1e, 0x86, 0xe9, 0x5f, 0xfd, 0xd9, 0xf4, 0x20, 0xac,
+	0xc8, 0x8b, 0x30, 0x2d, 0x58, 0xcb, 0xfc, 0x2f, 0xda, 0x4a, 0x52, 0xf5, 0xa4, 0x4d, 0x45, 0x73,
+	0xd5, 0x5e, 0x80, 0x38, 0xfc, 0xfb, 0x0b, 0xd8, 0x1b, 0x08, 0xfa, 0x2c, 0xe5, 0x3b, 0x86, 0xcd,
+	0xf3, 0x58, 0xdd, 0xdc, 0x9a, 0x24, 0xe7, 0x1d, 0xb5, 0x5c, 0xe6, 0x83, 0xf4, 0xec, 0xae, 0xe4,
+	0xb7, 0x2f, 0x35, 0x99, 0xd3, 0xd7, 0xfa, 0x71, 0x46, 0xf2, 0x0b, 0x63, 0x3e, 0x55, 0x01, 0x7f,
+	0x40, 0x9d, 0x4e, 0x22, 0xf8, 0x4a, 0xc5, 0x81, 0x63, 0x1e, 0x10, 0x9e, 0x8a, 0x7d, 0xe7, 0xee,
+	0x8b, 0xe8, 0x64, 0xf7, 0x7d, 0xca, 0xa8, 0x67, 0x48, 0xff, 0xf9, 0x26, 0xe0, 0x34, 0x49, 0x46,
+	0xe6, 0x24, 0x38, 0x28, 0x82, 0xf3, 0xd7, 0xc4, 0xf3, 0xe2, 0x6d, 0xf1, 0xf2, 0x20, 0x50, 0xf7,
+	0x9e, 0x58, 0xa3, 0x27, 0xa9, 0xf9, 0xc5, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x62, 0xe3, 0x4e,
+	0xd4, 0x8b, 0x0d, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// AdapterServiceClient is the client API for AdapterService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type AdapterServiceClient interface {
+	// GetHealthStatus is used by an AdapterService client to verify connectivity
+	// to the gRPC server hosting the AdapterService service
+	GetHealthStatus(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*voltha.HealthStatus, error)
+	// Device
+	AdoptDevice(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error)
+	ReconcileDevice(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error)
+	DeleteDevice(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error)
+	DisableDevice(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error)
+	ReEnableDevice(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error)
+	RebootDevice(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error)
+	SelfTestDevice(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error)
+	GetOfpDeviceInfo(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*inter_container.SwitchCapability, error)
+	ChildDeviceLost(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error)
+	// Ports
+	EnablePort(ctx context.Context, in *voltha.Port, opts ...grpc.CallOption) (*empty.Empty, error)
+	DisablePort(ctx context.Context, in *voltha.Port, opts ...grpc.CallOption) (*empty.Empty, error)
+	// Flows
+	UpdateFlowsBulk(ctx context.Context, in *inter_container.BulkFlows, opts ...grpc.CallOption) (*empty.Empty, error)
+	UpdateFlowsIncrementally(ctx context.Context, in *inter_container.IncrementalFlows, opts ...grpc.CallOption) (*empty.Empty, error)
+	//Packets
+	SendPacketOut(ctx context.Context, in *inter_container.PacketOut, opts ...grpc.CallOption) (*empty.Empty, error)
+	// PM
+	UpdatePmConfig(ctx context.Context, in *inter_container.PmConfigsInfo, opts ...grpc.CallOption) (*empty.Empty, error)
+	// Image
+	DownloadOnuImage(ctx context.Context, in *voltha.DeviceImageDownloadRequest, opts ...grpc.CallOption) (*voltha.DeviceImageResponse, error)
+	GetOnuImageStatus(ctx context.Context, in *voltha.DeviceImageRequest, opts ...grpc.CallOption) (*voltha.DeviceImageResponse, error)
+	AbortOnuImageUpgrade(ctx context.Context, in *voltha.DeviceImageRequest, opts ...grpc.CallOption) (*voltha.DeviceImageResponse, error)
+	GetOnuImages(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*voltha.OnuImages, error)
+	ActivateOnuImage(ctx context.Context, in *voltha.DeviceImageRequest, opts ...grpc.CallOption) (*voltha.DeviceImageResponse, error)
+	CommitOnuImage(ctx context.Context, in *voltha.DeviceImageRequest, opts ...grpc.CallOption) (*voltha.DeviceImageResponse, error)
+	// Deprecated Image APIs
+	DownloadImage(ctx context.Context, in *inter_container.ImageDownloadMessage, opts ...grpc.CallOption) (*voltha.ImageDownload, error)
+	GetImageDownloadStatus(ctx context.Context, in *inter_container.ImageDownloadMessage, opts ...grpc.CallOption) (*voltha.ImageDownload, error)
+	CancelImageDownload(ctx context.Context, in *inter_container.ImageDownloadMessage, opts ...grpc.CallOption) (*voltha.ImageDownload, error)
+	ActivateImageUpdate(ctx context.Context, in *inter_container.ImageDownloadMessage, opts ...grpc.CallOption) (*voltha.ImageDownload, error)
+	RevertImageUpdate(ctx context.Context, in *inter_container.ImageDownloadMessage, opts ...grpc.CallOption) (*voltha.ImageDownload, error)
+	// Tests
+	StartOmciTest(ctx context.Context, in *inter_container.OMCITest, opts ...grpc.CallOption) (*voltha.TestResponse, error)
+	SimulateAlarm(ctx context.Context, in *inter_container.SimulateAlarmMessage, opts ...grpc.CallOption) (*common.OperationResp, error)
+	// Events
+	SuppressEvent(ctx context.Context, in *voltha.EventFilter, opts ...grpc.CallOption) (*empty.Empty, error)
+	UnSuppressEvent(ctx context.Context, in *voltha.EventFilter, opts ...grpc.CallOption) (*empty.Empty, error)
+	// Get/Set
+	GetExtValue(ctx context.Context, in *inter_container.GetExtValueMessage, opts ...grpc.CallOption) (*common.ReturnValues, error)
+	SetExtValue(ctx context.Context, in *inter_container.SetExtValueMessage, opts ...grpc.CallOption) (*empty.Empty, error)
+	GetSingleValue(ctx context.Context, in *extension.SingleGetValueRequest, opts ...grpc.CallOption) (*extension.SingleGetValueResponse, error)
+	SetSingleValue(ctx context.Context, in *extension.SingleSetValueRequest, opts ...grpc.CallOption) (*extension.SingleSetValueResponse, error)
+}
+
+type adapterServiceClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewAdapterServiceClient(cc *grpc.ClientConn) AdapterServiceClient {
+	return &adapterServiceClient{cc}
+}
+
+func (c *adapterServiceClient) GetHealthStatus(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*voltha.HealthStatus, error) {
+	out := new(voltha.HealthStatus)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/GetHealthStatus", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) AdoptDevice(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/AdoptDevice", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) ReconcileDevice(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/ReconcileDevice", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) DeleteDevice(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/DeleteDevice", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) DisableDevice(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/DisableDevice", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) ReEnableDevice(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/ReEnableDevice", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) RebootDevice(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/RebootDevice", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) SelfTestDevice(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/SelfTestDevice", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) GetOfpDeviceInfo(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*inter_container.SwitchCapability, error) {
+	out := new(inter_container.SwitchCapability)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/GetOfpDeviceInfo", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) ChildDeviceLost(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/ChildDeviceLost", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) EnablePort(ctx context.Context, in *voltha.Port, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/EnablePort", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) DisablePort(ctx context.Context, in *voltha.Port, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/DisablePort", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) UpdateFlowsBulk(ctx context.Context, in *inter_container.BulkFlows, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/UpdateFlowsBulk", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) UpdateFlowsIncrementally(ctx context.Context, in *inter_container.IncrementalFlows, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/UpdateFlowsIncrementally", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) SendPacketOut(ctx context.Context, in *inter_container.PacketOut, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/SendPacketOut", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) UpdatePmConfig(ctx context.Context, in *inter_container.PmConfigsInfo, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/UpdatePmConfig", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) DownloadOnuImage(ctx context.Context, in *voltha.DeviceImageDownloadRequest, opts ...grpc.CallOption) (*voltha.DeviceImageResponse, error) {
+	out := new(voltha.DeviceImageResponse)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/DownloadOnuImage", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) GetOnuImageStatus(ctx context.Context, in *voltha.DeviceImageRequest, opts ...grpc.CallOption) (*voltha.DeviceImageResponse, error) {
+	out := new(voltha.DeviceImageResponse)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/GetOnuImageStatus", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) AbortOnuImageUpgrade(ctx context.Context, in *voltha.DeviceImageRequest, opts ...grpc.CallOption) (*voltha.DeviceImageResponse, error) {
+	out := new(voltha.DeviceImageResponse)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/AbortOnuImageUpgrade", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) GetOnuImages(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*voltha.OnuImages, error) {
+	out := new(voltha.OnuImages)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/GetOnuImages", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) ActivateOnuImage(ctx context.Context, in *voltha.DeviceImageRequest, opts ...grpc.CallOption) (*voltha.DeviceImageResponse, error) {
+	out := new(voltha.DeviceImageResponse)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/ActivateOnuImage", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) CommitOnuImage(ctx context.Context, in *voltha.DeviceImageRequest, opts ...grpc.CallOption) (*voltha.DeviceImageResponse, error) {
+	out := new(voltha.DeviceImageResponse)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/CommitOnuImage", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) DownloadImage(ctx context.Context, in *inter_container.ImageDownloadMessage, opts ...grpc.CallOption) (*voltha.ImageDownload, error) {
+	out := new(voltha.ImageDownload)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/DownloadImage", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) GetImageDownloadStatus(ctx context.Context, in *inter_container.ImageDownloadMessage, opts ...grpc.CallOption) (*voltha.ImageDownload, error) {
+	out := new(voltha.ImageDownload)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/GetImageDownloadStatus", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) CancelImageDownload(ctx context.Context, in *inter_container.ImageDownloadMessage, opts ...grpc.CallOption) (*voltha.ImageDownload, error) {
+	out := new(voltha.ImageDownload)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/CancelImageDownload", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) ActivateImageUpdate(ctx context.Context, in *inter_container.ImageDownloadMessage, opts ...grpc.CallOption) (*voltha.ImageDownload, error) {
+	out := new(voltha.ImageDownload)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/ActivateImageUpdate", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) RevertImageUpdate(ctx context.Context, in *inter_container.ImageDownloadMessage, opts ...grpc.CallOption) (*voltha.ImageDownload, error) {
+	out := new(voltha.ImageDownload)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/RevertImageUpdate", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) StartOmciTest(ctx context.Context, in *inter_container.OMCITest, opts ...grpc.CallOption) (*voltha.TestResponse, error) {
+	out := new(voltha.TestResponse)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/StartOmciTest", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) SimulateAlarm(ctx context.Context, in *inter_container.SimulateAlarmMessage, opts ...grpc.CallOption) (*common.OperationResp, error) {
+	out := new(common.OperationResp)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/SimulateAlarm", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) SuppressEvent(ctx context.Context, in *voltha.EventFilter, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/SuppressEvent", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) UnSuppressEvent(ctx context.Context, in *voltha.EventFilter, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/UnSuppressEvent", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) GetExtValue(ctx context.Context, in *inter_container.GetExtValueMessage, opts ...grpc.CallOption) (*common.ReturnValues, error) {
+	out := new(common.ReturnValues)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/GetExtValue", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) SetExtValue(ctx context.Context, in *inter_container.SetExtValueMessage, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/SetExtValue", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) GetSingleValue(ctx context.Context, in *extension.SingleGetValueRequest, opts ...grpc.CallOption) (*extension.SingleGetValueResponse, error) {
+	out := new(extension.SingleGetValueResponse)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/GetSingleValue", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *adapterServiceClient) SetSingleValue(ctx context.Context, in *extension.SingleSetValueRequest, opts ...grpc.CallOption) (*extension.SingleSetValueResponse, error) {
+	out := new(extension.SingleSetValueResponse)
+	err := c.cc.Invoke(ctx, "/voltha.AdapterService/SetSingleValue", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// AdapterServiceServer is the server API for AdapterService service.
+type AdapterServiceServer interface {
+	// GetHealthStatus is used by an AdapterService client to verify connectivity
+	// to the gRPC server hosting the AdapterService service
+	GetHealthStatus(context.Context, *empty.Empty) (*voltha.HealthStatus, error)
+	// Device
+	AdoptDevice(context.Context, *voltha.Device) (*empty.Empty, error)
+	ReconcileDevice(context.Context, *voltha.Device) (*empty.Empty, error)
+	DeleteDevice(context.Context, *voltha.Device) (*empty.Empty, error)
+	DisableDevice(context.Context, *voltha.Device) (*empty.Empty, error)
+	ReEnableDevice(context.Context, *voltha.Device) (*empty.Empty, error)
+	RebootDevice(context.Context, *voltha.Device) (*empty.Empty, error)
+	SelfTestDevice(context.Context, *voltha.Device) (*empty.Empty, error)
+	GetOfpDeviceInfo(context.Context, *voltha.Device) (*inter_container.SwitchCapability, error)
+	ChildDeviceLost(context.Context, *voltha.Device) (*empty.Empty, error)
+	// Ports
+	EnablePort(context.Context, *voltha.Port) (*empty.Empty, error)
+	DisablePort(context.Context, *voltha.Port) (*empty.Empty, error)
+	// Flows
+	UpdateFlowsBulk(context.Context, *inter_container.BulkFlows) (*empty.Empty, error)
+	UpdateFlowsIncrementally(context.Context, *inter_container.IncrementalFlows) (*empty.Empty, error)
+	//Packets
+	SendPacketOut(context.Context, *inter_container.PacketOut) (*empty.Empty, error)
+	// PM
+	UpdatePmConfig(context.Context, *inter_container.PmConfigsInfo) (*empty.Empty, error)
+	// Image
+	DownloadOnuImage(context.Context, *voltha.DeviceImageDownloadRequest) (*voltha.DeviceImageResponse, error)
+	GetOnuImageStatus(context.Context, *voltha.DeviceImageRequest) (*voltha.DeviceImageResponse, error)
+	AbortOnuImageUpgrade(context.Context, *voltha.DeviceImageRequest) (*voltha.DeviceImageResponse, error)
+	GetOnuImages(context.Context, *common.ID) (*voltha.OnuImages, error)
+	ActivateOnuImage(context.Context, *voltha.DeviceImageRequest) (*voltha.DeviceImageResponse, error)
+	CommitOnuImage(context.Context, *voltha.DeviceImageRequest) (*voltha.DeviceImageResponse, error)
+	// Deprecated Image APIs
+	DownloadImage(context.Context, *inter_container.ImageDownloadMessage) (*voltha.ImageDownload, error)
+	GetImageDownloadStatus(context.Context, *inter_container.ImageDownloadMessage) (*voltha.ImageDownload, error)
+	CancelImageDownload(context.Context, *inter_container.ImageDownloadMessage) (*voltha.ImageDownload, error)
+	ActivateImageUpdate(context.Context, *inter_container.ImageDownloadMessage) (*voltha.ImageDownload, error)
+	RevertImageUpdate(context.Context, *inter_container.ImageDownloadMessage) (*voltha.ImageDownload, error)
+	// Tests
+	StartOmciTest(context.Context, *inter_container.OMCITest) (*voltha.TestResponse, error)
+	SimulateAlarm(context.Context, *inter_container.SimulateAlarmMessage) (*common.OperationResp, error)
+	// Events
+	SuppressEvent(context.Context, *voltha.EventFilter) (*empty.Empty, error)
+	UnSuppressEvent(context.Context, *voltha.EventFilter) (*empty.Empty, error)
+	// Get/Set
+	GetExtValue(context.Context, *inter_container.GetExtValueMessage) (*common.ReturnValues, error)
+	SetExtValue(context.Context, *inter_container.SetExtValueMessage) (*empty.Empty, error)
+	GetSingleValue(context.Context, *extension.SingleGetValueRequest) (*extension.SingleGetValueResponse, error)
+	SetSingleValue(context.Context, *extension.SingleSetValueRequest) (*extension.SingleSetValueResponse, error)
+}
+
+// UnimplementedAdapterServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedAdapterServiceServer struct {
+}
+
+func (*UnimplementedAdapterServiceServer) GetHealthStatus(ctx context.Context, req *empty.Empty) (*voltha.HealthStatus, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetHealthStatus not implemented")
+}
+func (*UnimplementedAdapterServiceServer) AdoptDevice(ctx context.Context, req *voltha.Device) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method AdoptDevice not implemented")
+}
+func (*UnimplementedAdapterServiceServer) ReconcileDevice(ctx context.Context, req *voltha.Device) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ReconcileDevice not implemented")
+}
+func (*UnimplementedAdapterServiceServer) DeleteDevice(ctx context.Context, req *voltha.Device) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DeleteDevice not implemented")
+}
+func (*UnimplementedAdapterServiceServer) DisableDevice(ctx context.Context, req *voltha.Device) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DisableDevice not implemented")
+}
+func (*UnimplementedAdapterServiceServer) ReEnableDevice(ctx context.Context, req *voltha.Device) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ReEnableDevice not implemented")
+}
+func (*UnimplementedAdapterServiceServer) RebootDevice(ctx context.Context, req *voltha.Device) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method RebootDevice not implemented")
+}
+func (*UnimplementedAdapterServiceServer) SelfTestDevice(ctx context.Context, req *voltha.Device) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method SelfTestDevice not implemented")
+}
+func (*UnimplementedAdapterServiceServer) GetOfpDeviceInfo(ctx context.Context, req *voltha.Device) (*inter_container.SwitchCapability, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetOfpDeviceInfo not implemented")
+}
+func (*UnimplementedAdapterServiceServer) ChildDeviceLost(ctx context.Context, req *voltha.Device) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ChildDeviceLost not implemented")
+}
+func (*UnimplementedAdapterServiceServer) EnablePort(ctx context.Context, req *voltha.Port) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method EnablePort not implemented")
+}
+func (*UnimplementedAdapterServiceServer) DisablePort(ctx context.Context, req *voltha.Port) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DisablePort not implemented")
+}
+func (*UnimplementedAdapterServiceServer) UpdateFlowsBulk(ctx context.Context, req *inter_container.BulkFlows) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UpdateFlowsBulk not implemented")
+}
+func (*UnimplementedAdapterServiceServer) UpdateFlowsIncrementally(ctx context.Context, req *inter_container.IncrementalFlows) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UpdateFlowsIncrementally not implemented")
+}
+func (*UnimplementedAdapterServiceServer) SendPacketOut(ctx context.Context, req *inter_container.PacketOut) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method SendPacketOut not implemented")
+}
+func (*UnimplementedAdapterServiceServer) UpdatePmConfig(ctx context.Context, req *inter_container.PmConfigsInfo) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UpdatePmConfig not implemented")
+}
+func (*UnimplementedAdapterServiceServer) DownloadOnuImage(ctx context.Context, req *voltha.DeviceImageDownloadRequest) (*voltha.DeviceImageResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DownloadOnuImage not implemented")
+}
+func (*UnimplementedAdapterServiceServer) GetOnuImageStatus(ctx context.Context, req *voltha.DeviceImageRequest) (*voltha.DeviceImageResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetOnuImageStatus not implemented")
+}
+func (*UnimplementedAdapterServiceServer) AbortOnuImageUpgrade(ctx context.Context, req *voltha.DeviceImageRequest) (*voltha.DeviceImageResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method AbortOnuImageUpgrade not implemented")
+}
+func (*UnimplementedAdapterServiceServer) GetOnuImages(ctx context.Context, req *common.ID) (*voltha.OnuImages, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetOnuImages not implemented")
+}
+func (*UnimplementedAdapterServiceServer) ActivateOnuImage(ctx context.Context, req *voltha.DeviceImageRequest) (*voltha.DeviceImageResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ActivateOnuImage not implemented")
+}
+func (*UnimplementedAdapterServiceServer) CommitOnuImage(ctx context.Context, req *voltha.DeviceImageRequest) (*voltha.DeviceImageResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method CommitOnuImage not implemented")
+}
+func (*UnimplementedAdapterServiceServer) DownloadImage(ctx context.Context, req *inter_container.ImageDownloadMessage) (*voltha.ImageDownload, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DownloadImage not implemented")
+}
+func (*UnimplementedAdapterServiceServer) GetImageDownloadStatus(ctx context.Context, req *inter_container.ImageDownloadMessage) (*voltha.ImageDownload, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetImageDownloadStatus not implemented")
+}
+func (*UnimplementedAdapterServiceServer) CancelImageDownload(ctx context.Context, req *inter_container.ImageDownloadMessage) (*voltha.ImageDownload, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method CancelImageDownload not implemented")
+}
+func (*UnimplementedAdapterServiceServer) ActivateImageUpdate(ctx context.Context, req *inter_container.ImageDownloadMessage) (*voltha.ImageDownload, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ActivateImageUpdate not implemented")
+}
+func (*UnimplementedAdapterServiceServer) RevertImageUpdate(ctx context.Context, req *inter_container.ImageDownloadMessage) (*voltha.ImageDownload, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method RevertImageUpdate not implemented")
+}
+func (*UnimplementedAdapterServiceServer) StartOmciTest(ctx context.Context, req *inter_container.OMCITest) (*voltha.TestResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method StartOmciTest not implemented")
+}
+func (*UnimplementedAdapterServiceServer) SimulateAlarm(ctx context.Context, req *inter_container.SimulateAlarmMessage) (*common.OperationResp, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method SimulateAlarm not implemented")
+}
+func (*UnimplementedAdapterServiceServer) SuppressEvent(ctx context.Context, req *voltha.EventFilter) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method SuppressEvent not implemented")
+}
+func (*UnimplementedAdapterServiceServer) UnSuppressEvent(ctx context.Context, req *voltha.EventFilter) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UnSuppressEvent not implemented")
+}
+func (*UnimplementedAdapterServiceServer) GetExtValue(ctx context.Context, req *inter_container.GetExtValueMessage) (*common.ReturnValues, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetExtValue not implemented")
+}
+func (*UnimplementedAdapterServiceServer) SetExtValue(ctx context.Context, req *inter_container.SetExtValueMessage) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method SetExtValue not implemented")
+}
+func (*UnimplementedAdapterServiceServer) GetSingleValue(ctx context.Context, req *extension.SingleGetValueRequest) (*extension.SingleGetValueResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetSingleValue not implemented")
+}
+func (*UnimplementedAdapterServiceServer) SetSingleValue(ctx context.Context, req *extension.SingleSetValueRequest) (*extension.SingleSetValueResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method SetSingleValue not implemented")
+}
+
+func RegisterAdapterServiceServer(s *grpc.Server, srv AdapterServiceServer) {
+	s.RegisterService(&_AdapterService_serviceDesc, srv)
+}
+
+func _AdapterService_GetHealthStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(empty.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).GetHealthStatus(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/GetHealthStatus",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).GetHealthStatus(ctx, req.(*empty.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_AdoptDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(voltha.Device)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).AdoptDevice(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/AdoptDevice",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).AdoptDevice(ctx, req.(*voltha.Device))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_ReconcileDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(voltha.Device)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).ReconcileDevice(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/ReconcileDevice",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).ReconcileDevice(ctx, req.(*voltha.Device))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_DeleteDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(voltha.Device)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).DeleteDevice(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/DeleteDevice",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).DeleteDevice(ctx, req.(*voltha.Device))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_DisableDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(voltha.Device)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).DisableDevice(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/DisableDevice",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).DisableDevice(ctx, req.(*voltha.Device))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_ReEnableDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(voltha.Device)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).ReEnableDevice(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/ReEnableDevice",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).ReEnableDevice(ctx, req.(*voltha.Device))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_RebootDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(voltha.Device)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).RebootDevice(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/RebootDevice",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).RebootDevice(ctx, req.(*voltha.Device))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_SelfTestDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(voltha.Device)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).SelfTestDevice(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/SelfTestDevice",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).SelfTestDevice(ctx, req.(*voltha.Device))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_GetOfpDeviceInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(voltha.Device)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).GetOfpDeviceInfo(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/GetOfpDeviceInfo",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).GetOfpDeviceInfo(ctx, req.(*voltha.Device))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_ChildDeviceLost_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(voltha.Device)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).ChildDeviceLost(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/ChildDeviceLost",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).ChildDeviceLost(ctx, req.(*voltha.Device))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_EnablePort_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(voltha.Port)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).EnablePort(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/EnablePort",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).EnablePort(ctx, req.(*voltha.Port))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_DisablePort_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(voltha.Port)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).DisablePort(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/DisablePort",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).DisablePort(ctx, req.(*voltha.Port))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_UpdateFlowsBulk_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.BulkFlows)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).UpdateFlowsBulk(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/UpdateFlowsBulk",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).UpdateFlowsBulk(ctx, req.(*inter_container.BulkFlows))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_UpdateFlowsIncrementally_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.IncrementalFlows)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).UpdateFlowsIncrementally(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/UpdateFlowsIncrementally",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).UpdateFlowsIncrementally(ctx, req.(*inter_container.IncrementalFlows))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_SendPacketOut_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.PacketOut)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).SendPacketOut(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/SendPacketOut",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).SendPacketOut(ctx, req.(*inter_container.PacketOut))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_UpdatePmConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.PmConfigsInfo)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).UpdatePmConfig(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/UpdatePmConfig",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).UpdatePmConfig(ctx, req.(*inter_container.PmConfigsInfo))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_DownloadOnuImage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(voltha.DeviceImageDownloadRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).DownloadOnuImage(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/DownloadOnuImage",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).DownloadOnuImage(ctx, req.(*voltha.DeviceImageDownloadRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_GetOnuImageStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(voltha.DeviceImageRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).GetOnuImageStatus(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/GetOnuImageStatus",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).GetOnuImageStatus(ctx, req.(*voltha.DeviceImageRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_AbortOnuImageUpgrade_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(voltha.DeviceImageRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).AbortOnuImageUpgrade(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/AbortOnuImageUpgrade",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).AbortOnuImageUpgrade(ctx, req.(*voltha.DeviceImageRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_GetOnuImages_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).GetOnuImages(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/GetOnuImages",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).GetOnuImages(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_ActivateOnuImage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(voltha.DeviceImageRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).ActivateOnuImage(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/ActivateOnuImage",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).ActivateOnuImage(ctx, req.(*voltha.DeviceImageRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_CommitOnuImage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(voltha.DeviceImageRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).CommitOnuImage(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/CommitOnuImage",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).CommitOnuImage(ctx, req.(*voltha.DeviceImageRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_DownloadImage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.ImageDownloadMessage)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).DownloadImage(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/DownloadImage",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).DownloadImage(ctx, req.(*inter_container.ImageDownloadMessage))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_GetImageDownloadStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.ImageDownloadMessage)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).GetImageDownloadStatus(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/GetImageDownloadStatus",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).GetImageDownloadStatus(ctx, req.(*inter_container.ImageDownloadMessage))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_CancelImageDownload_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.ImageDownloadMessage)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).CancelImageDownload(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/CancelImageDownload",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).CancelImageDownload(ctx, req.(*inter_container.ImageDownloadMessage))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_ActivateImageUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.ImageDownloadMessage)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).ActivateImageUpdate(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/ActivateImageUpdate",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).ActivateImageUpdate(ctx, req.(*inter_container.ImageDownloadMessage))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_RevertImageUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.ImageDownloadMessage)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).RevertImageUpdate(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/RevertImageUpdate",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).RevertImageUpdate(ctx, req.(*inter_container.ImageDownloadMessage))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_StartOmciTest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.OMCITest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).StartOmciTest(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/StartOmciTest",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).StartOmciTest(ctx, req.(*inter_container.OMCITest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_SimulateAlarm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.SimulateAlarmMessage)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).SimulateAlarm(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/SimulateAlarm",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).SimulateAlarm(ctx, req.(*inter_container.SimulateAlarmMessage))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_SuppressEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(voltha.EventFilter)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).SuppressEvent(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/SuppressEvent",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).SuppressEvent(ctx, req.(*voltha.EventFilter))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_UnSuppressEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(voltha.EventFilter)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).UnSuppressEvent(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/UnSuppressEvent",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).UnSuppressEvent(ctx, req.(*voltha.EventFilter))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_GetExtValue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.GetExtValueMessage)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).GetExtValue(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/GetExtValue",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).GetExtValue(ctx, req.(*inter_container.GetExtValueMessage))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_SetExtValue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.SetExtValueMessage)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).SetExtValue(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/SetExtValue",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).SetExtValue(ctx, req.(*inter_container.SetExtValueMessage))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_GetSingleValue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(extension.SingleGetValueRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).GetSingleValue(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/GetSingleValue",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).GetSingleValue(ctx, req.(*extension.SingleGetValueRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _AdapterService_SetSingleValue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(extension.SingleSetValueRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AdapterServiceServer).SetSingleValue(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.AdapterService/SetSingleValue",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AdapterServiceServer).SetSingleValue(ctx, req.(*extension.SingleSetValueRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _AdapterService_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "voltha.AdapterService",
+	HandlerType: (*AdapterServiceServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "GetHealthStatus",
+			Handler:    _AdapterService_GetHealthStatus_Handler,
+		},
+		{
+			MethodName: "AdoptDevice",
+			Handler:    _AdapterService_AdoptDevice_Handler,
+		},
+		{
+			MethodName: "ReconcileDevice",
+			Handler:    _AdapterService_ReconcileDevice_Handler,
+		},
+		{
+			MethodName: "DeleteDevice",
+			Handler:    _AdapterService_DeleteDevice_Handler,
+		},
+		{
+			MethodName: "DisableDevice",
+			Handler:    _AdapterService_DisableDevice_Handler,
+		},
+		{
+			MethodName: "ReEnableDevice",
+			Handler:    _AdapterService_ReEnableDevice_Handler,
+		},
+		{
+			MethodName: "RebootDevice",
+			Handler:    _AdapterService_RebootDevice_Handler,
+		},
+		{
+			MethodName: "SelfTestDevice",
+			Handler:    _AdapterService_SelfTestDevice_Handler,
+		},
+		{
+			MethodName: "GetOfpDeviceInfo",
+			Handler:    _AdapterService_GetOfpDeviceInfo_Handler,
+		},
+		{
+			MethodName: "ChildDeviceLost",
+			Handler:    _AdapterService_ChildDeviceLost_Handler,
+		},
+		{
+			MethodName: "EnablePort",
+			Handler:    _AdapterService_EnablePort_Handler,
+		},
+		{
+			MethodName: "DisablePort",
+			Handler:    _AdapterService_DisablePort_Handler,
+		},
+		{
+			MethodName: "UpdateFlowsBulk",
+			Handler:    _AdapterService_UpdateFlowsBulk_Handler,
+		},
+		{
+			MethodName: "UpdateFlowsIncrementally",
+			Handler:    _AdapterService_UpdateFlowsIncrementally_Handler,
+		},
+		{
+			MethodName: "SendPacketOut",
+			Handler:    _AdapterService_SendPacketOut_Handler,
+		},
+		{
+			MethodName: "UpdatePmConfig",
+			Handler:    _AdapterService_UpdatePmConfig_Handler,
+		},
+		{
+			MethodName: "DownloadOnuImage",
+			Handler:    _AdapterService_DownloadOnuImage_Handler,
+		},
+		{
+			MethodName: "GetOnuImageStatus",
+			Handler:    _AdapterService_GetOnuImageStatus_Handler,
+		},
+		{
+			MethodName: "AbortOnuImageUpgrade",
+			Handler:    _AdapterService_AbortOnuImageUpgrade_Handler,
+		},
+		{
+			MethodName: "GetOnuImages",
+			Handler:    _AdapterService_GetOnuImages_Handler,
+		},
+		{
+			MethodName: "ActivateOnuImage",
+			Handler:    _AdapterService_ActivateOnuImage_Handler,
+		},
+		{
+			MethodName: "CommitOnuImage",
+			Handler:    _AdapterService_CommitOnuImage_Handler,
+		},
+		{
+			MethodName: "DownloadImage",
+			Handler:    _AdapterService_DownloadImage_Handler,
+		},
+		{
+			MethodName: "GetImageDownloadStatus",
+			Handler:    _AdapterService_GetImageDownloadStatus_Handler,
+		},
+		{
+			MethodName: "CancelImageDownload",
+			Handler:    _AdapterService_CancelImageDownload_Handler,
+		},
+		{
+			MethodName: "ActivateImageUpdate",
+			Handler:    _AdapterService_ActivateImageUpdate_Handler,
+		},
+		{
+			MethodName: "RevertImageUpdate",
+			Handler:    _AdapterService_RevertImageUpdate_Handler,
+		},
+		{
+			MethodName: "StartOmciTest",
+			Handler:    _AdapterService_StartOmciTest_Handler,
+		},
+		{
+			MethodName: "SimulateAlarm",
+			Handler:    _AdapterService_SimulateAlarm_Handler,
+		},
+		{
+			MethodName: "SuppressEvent",
+			Handler:    _AdapterService_SuppressEvent_Handler,
+		},
+		{
+			MethodName: "UnSuppressEvent",
+			Handler:    _AdapterService_UnSuppressEvent_Handler,
+		},
+		{
+			MethodName: "GetExtValue",
+			Handler:    _AdapterService_GetExtValue_Handler,
+		},
+		{
+			MethodName: "SetExtValue",
+			Handler:    _AdapterService_SetExtValue_Handler,
+		},
+		{
+			MethodName: "GetSingleValue",
+			Handler:    _AdapterService_GetSingleValue_Handler,
+		},
+		{
+			MethodName: "SetSingleValue",
+			Handler:    _AdapterService_SetSingleValue_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "voltha_protos/adapter_services.proto",
+}
+
+// OnuInterAdapterServiceClient is the client API for OnuInterAdapterService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type OnuInterAdapterServiceClient interface {
+	// GetHealthStatus is used by an OnuInterAdapterService client to verify connectivity
+	// to the gRPC server hosting the OnuInterAdapterService service
+	GetHealthStatus(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*voltha.HealthStatus, error)
+	OnuIndication(ctx context.Context, in *inter_container.OnuIndicationMessage, opts ...grpc.CallOption) (*empty.Empty, error)
+	OmciIndication(ctx context.Context, in *inter_container.OmciMessage, opts ...grpc.CallOption) (*empty.Empty, error)
+	DownloadTechProfile(ctx context.Context, in *inter_container.TechProfileDownloadMessage, opts ...grpc.CallOption) (*empty.Empty, error)
+	DeleteGemPort(ctx context.Context, in *inter_container.DeleteGemPortMessage, opts ...grpc.CallOption) (*empty.Empty, error)
+	DeleteTCont(ctx context.Context, in *inter_container.DeleteTcontMessage, opts ...grpc.CallOption) (*empty.Empty, error)
+}
+
+type onuInterAdapterServiceClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewOnuInterAdapterServiceClient(cc *grpc.ClientConn) OnuInterAdapterServiceClient {
+	return &onuInterAdapterServiceClient{cc}
+}
+
+func (c *onuInterAdapterServiceClient) GetHealthStatus(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*voltha.HealthStatus, error) {
+	out := new(voltha.HealthStatus)
+	err := c.cc.Invoke(ctx, "/voltha.OnuInterAdapterService/GetHealthStatus", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *onuInterAdapterServiceClient) OnuIndication(ctx context.Context, in *inter_container.OnuIndicationMessage, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.OnuInterAdapterService/OnuIndication", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *onuInterAdapterServiceClient) OmciIndication(ctx context.Context, in *inter_container.OmciMessage, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.OnuInterAdapterService/OmciIndication", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *onuInterAdapterServiceClient) DownloadTechProfile(ctx context.Context, in *inter_container.TechProfileDownloadMessage, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.OnuInterAdapterService/DownloadTechProfile", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *onuInterAdapterServiceClient) DeleteGemPort(ctx context.Context, in *inter_container.DeleteGemPortMessage, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.OnuInterAdapterService/DeleteGemPort", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *onuInterAdapterServiceClient) DeleteTCont(ctx context.Context, in *inter_container.DeleteTcontMessage, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.OnuInterAdapterService/DeleteTCont", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// OnuInterAdapterServiceServer is the server API for OnuInterAdapterService service.
+type OnuInterAdapterServiceServer interface {
+	// GetHealthStatus is used by an OnuInterAdapterService client to verify connectivity
+	// to the gRPC server hosting the OnuInterAdapterService service
+	GetHealthStatus(context.Context, *empty.Empty) (*voltha.HealthStatus, error)
+	OnuIndication(context.Context, *inter_container.OnuIndicationMessage) (*empty.Empty, error)
+	OmciIndication(context.Context, *inter_container.OmciMessage) (*empty.Empty, error)
+	DownloadTechProfile(context.Context, *inter_container.TechProfileDownloadMessage) (*empty.Empty, error)
+	DeleteGemPort(context.Context, *inter_container.DeleteGemPortMessage) (*empty.Empty, error)
+	DeleteTCont(context.Context, *inter_container.DeleteTcontMessage) (*empty.Empty, error)
+}
+
+// UnimplementedOnuInterAdapterServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedOnuInterAdapterServiceServer struct {
+}
+
+func (*UnimplementedOnuInterAdapterServiceServer) GetHealthStatus(ctx context.Context, req *empty.Empty) (*voltha.HealthStatus, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetHealthStatus not implemented")
+}
+func (*UnimplementedOnuInterAdapterServiceServer) OnuIndication(ctx context.Context, req *inter_container.OnuIndicationMessage) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method OnuIndication not implemented")
+}
+func (*UnimplementedOnuInterAdapterServiceServer) OmciIndication(ctx context.Context, req *inter_container.OmciMessage) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method OmciIndication not implemented")
+}
+func (*UnimplementedOnuInterAdapterServiceServer) DownloadTechProfile(ctx context.Context, req *inter_container.TechProfileDownloadMessage) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DownloadTechProfile not implemented")
+}
+func (*UnimplementedOnuInterAdapterServiceServer) DeleteGemPort(ctx context.Context, req *inter_container.DeleteGemPortMessage) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DeleteGemPort not implemented")
+}
+func (*UnimplementedOnuInterAdapterServiceServer) DeleteTCont(ctx context.Context, req *inter_container.DeleteTcontMessage) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DeleteTCont not implemented")
+}
+
+func RegisterOnuInterAdapterServiceServer(s *grpc.Server, srv OnuInterAdapterServiceServer) {
+	s.RegisterService(&_OnuInterAdapterService_serviceDesc, srv)
+}
+
+func _OnuInterAdapterService_GetHealthStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(empty.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OnuInterAdapterServiceServer).GetHealthStatus(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.OnuInterAdapterService/GetHealthStatus",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OnuInterAdapterServiceServer).GetHealthStatus(ctx, req.(*empty.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _OnuInterAdapterService_OnuIndication_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.OnuIndicationMessage)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OnuInterAdapterServiceServer).OnuIndication(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.OnuInterAdapterService/OnuIndication",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OnuInterAdapterServiceServer).OnuIndication(ctx, req.(*inter_container.OnuIndicationMessage))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _OnuInterAdapterService_OmciIndication_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.OmciMessage)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OnuInterAdapterServiceServer).OmciIndication(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.OnuInterAdapterService/OmciIndication",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OnuInterAdapterServiceServer).OmciIndication(ctx, req.(*inter_container.OmciMessage))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _OnuInterAdapterService_DownloadTechProfile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.TechProfileDownloadMessage)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OnuInterAdapterServiceServer).DownloadTechProfile(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.OnuInterAdapterService/DownloadTechProfile",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OnuInterAdapterServiceServer).DownloadTechProfile(ctx, req.(*inter_container.TechProfileDownloadMessage))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _OnuInterAdapterService_DeleteGemPort_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.DeleteGemPortMessage)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OnuInterAdapterServiceServer).DeleteGemPort(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.OnuInterAdapterService/DeleteGemPort",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OnuInterAdapterServiceServer).DeleteGemPort(ctx, req.(*inter_container.DeleteGemPortMessage))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _OnuInterAdapterService_DeleteTCont_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.DeleteTcontMessage)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OnuInterAdapterServiceServer).DeleteTCont(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.OnuInterAdapterService/DeleteTCont",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OnuInterAdapterServiceServer).DeleteTCont(ctx, req.(*inter_container.DeleteTcontMessage))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _OnuInterAdapterService_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "voltha.OnuInterAdapterService",
+	HandlerType: (*OnuInterAdapterServiceServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "GetHealthStatus",
+			Handler:    _OnuInterAdapterService_GetHealthStatus_Handler,
+		},
+		{
+			MethodName: "OnuIndication",
+			Handler:    _OnuInterAdapterService_OnuIndication_Handler,
+		},
+		{
+			MethodName: "OmciIndication",
+			Handler:    _OnuInterAdapterService_OmciIndication_Handler,
+		},
+		{
+			MethodName: "DownloadTechProfile",
+			Handler:    _OnuInterAdapterService_DownloadTechProfile_Handler,
+		},
+		{
+			MethodName: "DeleteGemPort",
+			Handler:    _OnuInterAdapterService_DeleteGemPort_Handler,
+		},
+		{
+			MethodName: "DeleteTCont",
+			Handler:    _OnuInterAdapterService_DeleteTCont_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "voltha_protos/adapter_services.proto",
+}
+
+// OltInterAdapterServiceClient is the client API for OltInterAdapterService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type OltInterAdapterServiceClient interface {
+	// GetHealthStatus is used by an OltInterAdapterService client to verify connectivity
+	// to the gRPC server hosting the OltInterAdapterService service
+	GetHealthStatus(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*voltha.HealthStatus, error)
+	ProxyOmciRequest(ctx context.Context, in *inter_container.OmciMessage, opts ...grpc.CallOption) (*empty.Empty, error)
+	GetTechProfileInstance(ctx context.Context, in *inter_container.TechProfileInstanceRequestMessage, opts ...grpc.CallOption) (*inter_container.TechProfileDownloadMessage, error)
+}
+
+type oltInterAdapterServiceClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewOltInterAdapterServiceClient(cc *grpc.ClientConn) OltInterAdapterServiceClient {
+	return &oltInterAdapterServiceClient{cc}
+}
+
+func (c *oltInterAdapterServiceClient) GetHealthStatus(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*voltha.HealthStatus, error) {
+	out := new(voltha.HealthStatus)
+	err := c.cc.Invoke(ctx, "/voltha.OltInterAdapterService/GetHealthStatus", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *oltInterAdapterServiceClient) ProxyOmciRequest(ctx context.Context, in *inter_container.OmciMessage, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.OltInterAdapterService/ProxyOmciRequest", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *oltInterAdapterServiceClient) GetTechProfileInstance(ctx context.Context, in *inter_container.TechProfileInstanceRequestMessage, opts ...grpc.CallOption) (*inter_container.TechProfileDownloadMessage, error) {
+	out := new(inter_container.TechProfileDownloadMessage)
+	err := c.cc.Invoke(ctx, "/voltha.OltInterAdapterService/GetTechProfileInstance", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// OltInterAdapterServiceServer is the server API for OltInterAdapterService service.
+type OltInterAdapterServiceServer interface {
+	// GetHealthStatus is used by an OltInterAdapterService client to verify connectivity
+	// to the gRPC server hosting the OltInterAdapterService service
+	GetHealthStatus(context.Context, *empty.Empty) (*voltha.HealthStatus, error)
+	ProxyOmciRequest(context.Context, *inter_container.OmciMessage) (*empty.Empty, error)
+	GetTechProfileInstance(context.Context, *inter_container.TechProfileInstanceRequestMessage) (*inter_container.TechProfileDownloadMessage, error)
+}
+
+// UnimplementedOltInterAdapterServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedOltInterAdapterServiceServer struct {
+}
+
+func (*UnimplementedOltInterAdapterServiceServer) GetHealthStatus(ctx context.Context, req *empty.Empty) (*voltha.HealthStatus, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetHealthStatus not implemented")
+}
+func (*UnimplementedOltInterAdapterServiceServer) ProxyOmciRequest(ctx context.Context, req *inter_container.OmciMessage) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ProxyOmciRequest not implemented")
+}
+func (*UnimplementedOltInterAdapterServiceServer) GetTechProfileInstance(ctx context.Context, req *inter_container.TechProfileInstanceRequestMessage) (*inter_container.TechProfileDownloadMessage, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetTechProfileInstance not implemented")
+}
+
+func RegisterOltInterAdapterServiceServer(s *grpc.Server, srv OltInterAdapterServiceServer) {
+	s.RegisterService(&_OltInterAdapterService_serviceDesc, srv)
+}
+
+func _OltInterAdapterService_GetHealthStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(empty.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OltInterAdapterServiceServer).GetHealthStatus(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.OltInterAdapterService/GetHealthStatus",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OltInterAdapterServiceServer).GetHealthStatus(ctx, req.(*empty.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _OltInterAdapterService_ProxyOmciRequest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.OmciMessage)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OltInterAdapterServiceServer).ProxyOmciRequest(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.OltInterAdapterService/ProxyOmciRequest",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OltInterAdapterServiceServer).ProxyOmciRequest(ctx, req.(*inter_container.OmciMessage))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _OltInterAdapterService_GetTechProfileInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.TechProfileInstanceRequestMessage)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OltInterAdapterServiceServer).GetTechProfileInstance(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.OltInterAdapterService/GetTechProfileInstance",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OltInterAdapterServiceServer).GetTechProfileInstance(ctx, req.(*inter_container.TechProfileInstanceRequestMessage))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _OltInterAdapterService_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "voltha.OltInterAdapterService",
+	HandlerType: (*OltInterAdapterServiceServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "GetHealthStatus",
+			Handler:    _OltInterAdapterService_GetHealthStatus_Handler,
+		},
+		{
+			MethodName: "ProxyOmciRequest",
+			Handler:    _OltInterAdapterService_ProxyOmciRequest_Handler,
+		},
+		{
+			MethodName: "GetTechProfileInstance",
+			Handler:    _OltInterAdapterService_GetTechProfileInstance_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "voltha_protos/adapter_services.proto",
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/common/common.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/common/common.pb.go
similarity index 82%
rename from vendor/github.com/opencord/voltha-protos/v4/go/common/common.pb.go
rename to vendor/github.com/opencord/voltha-protos/v5/go/common/common.pb.go
index b57e775..a397778 100644
--- a/vendor/github.com/opencord/voltha-protos/v4/go/common/common.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/common/common.pb.go
@@ -607,45 +607,45 @@
 func init() { proto.RegisterFile("voltha_protos/common.proto", fileDescriptor_c2e3fd231961e826) }
 
 var fileDescriptor_c2e3fd231961e826 = []byte{
-	// 634 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0x5d, 0x4f, 0xe3, 0x3a,
-	0x10, 0x6d, 0xfa, 0x05, 0x9d, 0xd2, 0x90, 0x6b, 0x3e, 0xd4, 0x8b, 0xae, 0x74, 0xab, 0xbc, 0xc0,
-	0xbd, 0x62, 0x5b, 0x89, 0xe5, 0x75, 0x1f, 0x42, 0xe2, 0xed, 0x5a, 0x80, 0x53, 0x39, 0x49, 0xd1,
-	0xf2, 0x12, 0x85, 0xc6, 0x40, 0x24, 0x1a, 0x47, 0x89, 0x8b, 0xc4, 0xeb, 0xfe, 0x83, 0xfd, 0xab,
-	0xfb, 0x0b, 0x56, 0x76, 0xca, 0xd7, 0x8a, 0x97, 0xd6, 0x67, 0xce, 0xc9, 0x8c, 0xcf, 0x8c, 0x07,
-	0x0e, 0x1e, 0xc5, 0x83, 0xbc, 0x4f, 0xe2, 0xa2, 0x14, 0x52, 0x54, 0x93, 0x85, 0x58, 0x2e, 0x45,
-	0x3e, 0xd6, 0x08, 0x75, 0x6b, 0x64, 0xef, 0x42, 0x93, 0x78, 0xc8, 0x84, 0x66, 0x96, 0x0e, 0x8d,
-	0x91, 0x71, 0xd4, 0x63, 0xcd, 0x2c, 0xb5, 0x0f, 0xa1, 0x45, 0xbc, 0x0a, 0x8d, 0xa0, 0x93, 0x49,
-	0xbe, 0xac, 0x86, 0xc6, 0xa8, 0x75, 0xd4, 0x3f, 0x81, 0xf1, 0x3a, 0x05, 0xf1, 0x58, 0x4d, 0xd8,
-	0xf7, 0x00, 0x4e, 0xba, 0xcc, 0xf2, 0x40, 0x26, 0x92, 0xdb, 0xd7, 0xd0, 0x09, 0x9f, 0x0a, 0x5e,
-	0xa1, 0x3e, 0x6c, 0x44, 0xf4, 0x9c, 0xfa, 0x57, 0xd4, 0x6a, 0x20, 0x04, 0xe6, 0x8c, 0xe1, 0x19,
-	0xf3, 0xe7, 0x24, 0x20, 0x3e, 0xc5, 0x9e, 0x65, 0x28, 0x01, 0xa6, 0xce, 0xd9, 0x05, 0xf6, 0xac,
-	0x26, 0xda, 0x82, 0x4d, 0x8f, 0x04, 0x35, 0x6a, 0xa1, 0x3d, 0xf8, 0xcb, 0xf3, 0xaf, 0xe8, 0x85,
-	0xef, 0x78, 0x84, 0x4e, 0x63, 0x72, 0xe9, 0x4c, 0xb1, 0xd5, 0xb6, 0x7f, 0x1a, 0x00, 0x7e, 0xc1,
-	0x4b, 0x55, 0x69, 0x55, 0xd9, 0x3f, 0x8c, 0x0f, 0x6b, 0x99, 0x00, 0x1e, 0x09, 0x5c, 0x7f, 0x8e,
-	0x99, 0xae, 0x63, 0x02, 0x38, 0x6e, 0x48, 0xe6, 0x4e, 0x48, 0xe8, 0xd4, 0x6a, 0x2a, 0x71, 0x88,
-	0x03, 0x0d, 0x5a, 0x08, 0xa0, 0xab, 0x49, 0x6c, 0xb5, 0xd5, 0xf9, 0xab, 0x43, 0xd4, 0x0d, 0x3a,
-	0x68, 0x1b, 0xfa, 0x0c, 0xbb, 0x3e, 0x75, 0xc9, 0x85, 0x12, 0x76, 0xd1, 0x3e, 0xa0, 0x37, 0x81,
-	0x78, 0x2d, 0xdc, 0xb0, 0x31, 0x0c, 0x5c, 0x91, 0xe7, 0x7c, 0x21, 0xd7, 0xb7, 0x3a, 0xfd, 0xf0,
-	0x52, 0xdb, 0xd0, 0x8f, 0x28, 0xc3, 0x8e, 0xfb, 0x4d, 0x79, 0xb4, 0x0c, 0x34, 0x80, 0xde, 0x2b,
-	0x6c, 0xda, 0xbf, 0x0c, 0x18, 0x28, 0x6b, 0x89, 0xcc, 0x44, 0xce, 0x78, 0x55, 0xa0, 0x2f, 0xd0,
-	0x5e, 0x88, 0x94, 0xeb, 0x89, 0x98, 0x27, 0xff, 0x3d, 0xf7, 0xfd, 0x9d, 0xe8, 0x2d, 0x92, 0xab,
-	0x32, 0x77, 0x45, 0xca, 0x99, 0xfe, 0x0c, 0x1d, 0xc2, 0x76, 0x92, 0xa6, 0x99, 0xe2, 0x92, 0x87,
-	0x38, 0xcb, 0x6f, 0xc5, 0xb0, 0xa9, 0x67, 0x6b, 0xbe, 0x86, 0x49, 0x7e, 0x2b, 0xec, 0x27, 0xd8,
-	0xf9, 0x20, 0x8b, 0x1a, 0x81, 0x3f, 0xc3, 0xcc, 0x09, 0x89, 0x4f, 0xe3, 0x20, 0x72, 0x5d, 0x1c,
-	0x04, 0x56, 0xe3, 0x7d, 0x58, 0x35, 0x21, 0x62, 0xca, 0xcd, 0xdf, 0xb0, 0xf7, 0x1a, 0x8e, 0x68,
-	0x10, 0xcd, 0x66, 0x3e, 0x0b, 0xf5, 0x64, 0xdf, 0x51, 0x84, 0xc6, 0x33, 0xe6, 0x4f, 0x99, 0x4a,
-	0xd6, 0xb2, 0x8f, 0xa1, 0x37, 0x4f, 0x1e, 0x56, 0x5c, 0xf5, 0xcb, 0xfe, 0x17, 0xda, 0xea, 0x1f,
-	0xf5, 0xa0, 0x83, 0x2f, 0x67, 0xe1, 0x77, 0xab, 0xb1, 0x7e, 0x14, 0xa1, 0x43, 0x5d, 0x6c, 0x19,
-	0x36, 0x05, 0x53, 0xab, 0x83, 0x82, 0x2f, 0xb2, 0xdb, 0x8c, 0x97, 0x7f, 0x3e, 0x59, 0x74, 0x0c,
-	0x9d, 0x47, 0xa5, 0xd0, 0x4e, 0xcd, 0x93, 0xfd, 0xe7, 0x9e, 0xbd, 0x14, 0x19, 0xab, 0x1f, 0x56,
-	0x8b, 0x6c, 0x09, 0x5b, 0xb5, 0x5f, 0x4d, 0x57, 0xc8, 0x82, 0x56, 0xc0, 0xa5, 0x4e, 0x37, 0x60,
-	0xea, 0x88, 0x46, 0xd0, 0x8f, 0xf2, 0x6a, 0x55, 0x14, 0xa2, 0x94, 0x3c, 0xd5, 0x59, 0x07, 0xec,
-	0x6d, 0x08, 0xed, 0x42, 0x07, 0x97, 0xa5, 0x28, 0x87, 0x2d, 0xcd, 0xd5, 0x00, 0x1d, 0xc0, 0xa6,
-	0x97, 0x55, 0x32, 0xc9, 0x17, 0x7c, 0xd8, 0xd6, 0xc4, 0x0b, 0xfe, 0xff, 0x1f, 0xd8, 0x0a, 0x79,
-	0x25, 0x2f, 0x45, 0xca, 0xcf, 0xf9, 0x53, 0xa5, 0x3c, 0x26, 0x45, 0x16, 0x4b, 0x5e, 0x49, 0xab,
-	0x71, 0x86, 0x61, 0x47, 0x94, 0x77, 0x63, 0x51, 0xf0, 0x7c, 0x21, 0xca, 0x74, 0x5c, 0x6f, 0xef,
-	0xf5, 0xf8, 0x2e, 0x93, 0xf7, 0xab, 0x1b, 0xe5, 0x67, 0xf2, 0xcc, 0x4d, 0x6a, 0xee, 0xd3, 0x7a,
-	0xb3, 0x1f, 0x4f, 0x27, 0x77, 0x62, 0xbd, 0xdf, 0x37, 0x5d, 0x1d, 0xfc, 0xfc, 0x3b, 0x00, 0x00,
-	0xff, 0xff, 0x29, 0xd3, 0x39, 0x3c, 0xfe, 0x03, 0x00, 0x00,
+	// 635 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0x5d, 0x4f, 0xdb, 0x4a,
+	0x10, 0x8d, 0xf3, 0x05, 0x99, 0x10, 0xe3, 0xbb, 0x7c, 0x28, 0x17, 0x5d, 0xe9, 0x46, 0x7e, 0x81,
+	0x7b, 0x45, 0x13, 0x89, 0xb6, 0x8f, 0x7d, 0x30, 0xf6, 0x36, 0x5d, 0x01, 0xeb, 0x68, 0x6d, 0x07,
+	0x95, 0x17, 0xcb, 0xc4, 0x0b, 0x58, 0x22, 0x5e, 0xcb, 0xde, 0x20, 0xf1, 0xda, 0x7f, 0xd0, 0xbf,
+	0xda, 0x5f, 0x50, 0xed, 0x3a, 0x7c, 0x55, 0xbc, 0x24, 0x7b, 0xe6, 0x1c, 0xcf, 0xec, 0x99, 0xd9,
+	0x81, 0x83, 0x07, 0x71, 0x2f, 0xef, 0x92, 0xb8, 0x28, 0x85, 0x14, 0xd5, 0x64, 0x21, 0x96, 0x4b,
+	0x91, 0x8f, 0x35, 0x42, 0xdd, 0x1a, 0xd9, 0xbb, 0xd0, 0x24, 0x1e, 0x32, 0xa1, 0x99, 0xa5, 0x43,
+	0x63, 0x64, 0x1c, 0xf5, 0x58, 0x33, 0x4b, 0xed, 0x43, 0x68, 0x11, 0xaf, 0x42, 0x23, 0xe8, 0x64,
+	0x92, 0x2f, 0xab, 0xa1, 0x31, 0x6a, 0x1d, 0xf5, 0x4f, 0x60, 0xbc, 0x4e, 0x41, 0x3c, 0x56, 0x13,
+	0xf6, 0x1d, 0x80, 0x93, 0x2e, 0xb3, 0x3c, 0x90, 0x89, 0xe4, 0xf6, 0x15, 0x74, 0xc2, 0xc7, 0x82,
+	0x57, 0xa8, 0x0f, 0x1b, 0x11, 0x3d, 0xa3, 0xfe, 0x25, 0xb5, 0x1a, 0x08, 0x81, 0x39, 0x63, 0x78,
+	0xc6, 0xfc, 0x39, 0x09, 0x88, 0x4f, 0xb1, 0x67, 0x19, 0x4a, 0x80, 0xa9, 0x73, 0x7a, 0x8e, 0x3d,
+	0xab, 0x89, 0xb6, 0x60, 0xd3, 0x23, 0x41, 0x8d, 0x5a, 0x68, 0x0f, 0xfe, 0xf2, 0xfc, 0x4b, 0x7a,
+	0xee, 0x3b, 0x1e, 0xa1, 0xd3, 0x98, 0x5c, 0x38, 0x53, 0x6c, 0xb5, 0xed, 0x9f, 0x06, 0x80, 0x5f,
+	0xf0, 0x52, 0x55, 0x5a, 0x55, 0xf6, 0x0f, 0xe3, 0xdd, 0x5a, 0x26, 0x80, 0x47, 0x02, 0xd7, 0x9f,
+	0x63, 0xa6, 0xeb, 0x98, 0x00, 0x8e, 0x1b, 0x92, 0xb9, 0x13, 0x12, 0x3a, 0xb5, 0x9a, 0x4a, 0x1c,
+	0xe2, 0x40, 0x83, 0x16, 0x02, 0xe8, 0x6a, 0x12, 0x5b, 0x6d, 0x75, 0xfe, 0xea, 0x10, 0x75, 0x83,
+	0x0e, 0xda, 0x86, 0x3e, 0xc3, 0xae, 0x4f, 0x5d, 0x72, 0xae, 0x84, 0x5d, 0xb4, 0x0f, 0xe8, 0x55,
+	0x20, 0x5e, 0x0b, 0x37, 0x6c, 0x0c, 0x03, 0x57, 0xe4, 0x39, 0x5f, 0xc8, 0xf5, 0xad, 0x3e, 0xbd,
+	0x7b, 0xa9, 0x6d, 0xe8, 0x47, 0x94, 0x61, 0xc7, 0xfd, 0xa6, 0x3c, 0x5a, 0x06, 0x1a, 0x40, 0xef,
+	0x05, 0x36, 0xed, 0x5f, 0x06, 0x0c, 0x94, 0xb5, 0x44, 0x66, 0x22, 0x67, 0xbc, 0x2a, 0xd0, 0x17,
+	0x68, 0x2f, 0x44, 0xca, 0xf5, 0x44, 0xcc, 0x93, 0xff, 0x9e, 0xfa, 0xfe, 0x46, 0xf4, 0x1a, 0xc9,
+	0x55, 0x99, 0xbb, 0x22, 0xe5, 0x4c, 0x7f, 0x86, 0x0e, 0x61, 0x3b, 0x49, 0xd3, 0x4c, 0x71, 0xc9,
+	0x7d, 0x9c, 0xe5, 0x37, 0x62, 0xd8, 0xd4, 0xb3, 0x35, 0x5f, 0xc2, 0x24, 0xbf, 0x11, 0xf6, 0x23,
+	0xec, 0xbc, 0x93, 0x45, 0x8d, 0xc0, 0x9f, 0x61, 0xe6, 0x84, 0xc4, 0xa7, 0x71, 0x10, 0xb9, 0x2e,
+	0x0e, 0x02, 0xab, 0xf1, 0x36, 0xac, 0x9a, 0x10, 0x31, 0xe5, 0xe6, 0x6f, 0xd8, 0x7b, 0x09, 0x47,
+	0x34, 0x88, 0x66, 0x33, 0x9f, 0x85, 0x7a, 0xb2, 0x6f, 0x28, 0x42, 0xe3, 0x19, 0xf3, 0xa7, 0x4c,
+	0x25, 0x6b, 0xd9, 0xc7, 0xd0, 0x9b, 0x27, 0xf7, 0x2b, 0xae, 0xfa, 0x65, 0xff, 0x0b, 0x6d, 0xf5,
+	0x8f, 0x7a, 0xd0, 0xc1, 0x17, 0xb3, 0xf0, 0xbb, 0xd5, 0x58, 0x3f, 0x8a, 0xd0, 0xa1, 0x2e, 0xb6,
+	0x0c, 0x9b, 0x82, 0xa9, 0xd5, 0x41, 0xc1, 0x17, 0xd9, 0x4d, 0xc6, 0xcb, 0x3f, 0x9f, 0x2c, 0x3a,
+	0x86, 0xce, 0x83, 0x52, 0x68, 0xa7, 0xe6, 0xc9, 0xfe, 0x53, 0xcf, 0x9e, 0x8b, 0x8c, 0xd5, 0x0f,
+	0xab, 0x45, 0xb6, 0x84, 0xad, 0xda, 0xaf, 0xa6, 0x2b, 0x64, 0x41, 0x2b, 0xe0, 0x52, 0xa7, 0x1b,
+	0x30, 0x75, 0x44, 0x23, 0xe8, 0x47, 0x79, 0xb5, 0x2a, 0x0a, 0x51, 0x4a, 0x9e, 0xea, 0xac, 0x03,
+	0xf6, 0x3a, 0x84, 0x76, 0xa1, 0x83, 0xcb, 0x52, 0x94, 0xc3, 0x96, 0xe6, 0x6a, 0x80, 0x0e, 0x60,
+	0xd3, 0xcb, 0x2a, 0x99, 0xe4, 0x0b, 0x3e, 0x6c, 0x6b, 0xe2, 0x19, 0xff, 0xff, 0x0f, 0x6c, 0x85,
+	0xbc, 0x92, 0x17, 0x22, 0xe5, 0x67, 0xfc, 0xb1, 0x52, 0x1e, 0x93, 0x22, 0x8b, 0x25, 0xaf, 0xa4,
+	0xd5, 0x38, 0xc5, 0xb0, 0x23, 0xca, 0xdb, 0xb1, 0x28, 0x78, 0xbe, 0x10, 0x65, 0x3a, 0xae, 0xb7,
+	0xf7, 0x6a, 0x7c, 0x9b, 0xc9, 0xbb, 0xd5, 0xb5, 0xf2, 0x33, 0x79, 0xe2, 0x26, 0x35, 0xf7, 0x61,
+	0xbd, 0xd9, 0x0f, 0x9f, 0x27, 0xb7, 0x62, 0xbd, 0xdf, 0xd7, 0x5d, 0x1d, 0xfc, 0xf8, 0x3b, 0x00,
+	0x00, 0xff, 0xff, 0x71, 0x53, 0xdb, 0xeb, 0xfe, 0x03, 0x00, 0x00,
 }
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/common/meta.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/common/meta.pb.go
similarity index 70%
rename from vendor/github.com/opencord/voltha-protos/v4/go/common/meta.pb.go
rename to vendor/github.com/opencord/voltha-protos/v5/go/common/meta.pb.go
index 0af225d..26dfeef 100644
--- a/vendor/github.com/opencord/voltha-protos/v4/go/common/meta.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/common/meta.pb.go
@@ -120,23 +120,23 @@
 func init() { proto.RegisterFile("voltha_protos/meta.proto", fileDescriptor_96b320e8a67781f3) }
 
 var fileDescriptor_96b320e8a67781f3 = []byte{
-	// 281 bytes of a gzipped FileDescriptorProto
+	// 282 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x90, 0x41, 0x4b, 0x84, 0x40,
 	0x14, 0xc7, 0xb3, 0x05, 0xc1, 0x17, 0x2d, 0xe6, 0x69, 0x59, 0x58, 0x90, 0x4e, 0x4b, 0xd0, 0x0c,
-	0x58, 0xa7, 0xbd, 0x6d, 0xdb, 0x6e, 0x2d, 0x98, 0x82, 0x74, 0xa9, 0x8b, 0xe8, 0x38, 0xa9, 0xa4,
-	0x3e, 0x71, 0x66, 0x17, 0xfa, 0xa8, 0x5d, 0xfa, 0x04, 0xf5, 0x1d, 0x42, 0xc7, 0xe9, 0xda, 0xed,
-	0xff, 0xde, 0xfc, 0xe7, 0xc7, 0x8f, 0x07, 0xb3, 0x23, 0x56, 0xb2, 0x48, 0xe2, 0xb6, 0x43, 0x89,
-	0x82, 0xd6, 0x5c, 0x26, 0x64, 0xc8, 0x8e, 0xa9, 0x5e, 0xe6, 0x6e, 0x8e, 0x98, 0x57, 0x9c, 0x0e,
-	0xdb, 0xf4, 0xf0, 0x46, 0x33, 0x2e, 0x58, 0x57, 0xb6, 0x12, 0x3b, 0xd5, 0xbc, 0x5c, 0x80, 0xb5,
-	0x29, 0xca, 0x2a, 0x0b, 0x30, 0xe3, 0x8e, 0x0d, 0x93, 0x77, 0xfe, 0x31, 0x33, 0x5c, 0x63, 0x69,
-	0x45, 0x7d, 0xbc, 0xf2, 0xc0, 0x5c, 0x33, 0xc6, 0x85, 0x70, 0x00, 0xcc, 0x4d, 0x18, 0xec, 0xf6,
-	0x0f, 0xf6, 0x89, 0x73, 0x0e, 0x56, 0xb4, 0x5d, 0xdf, 0xc7, 0x61, 0xe0, 0xbf, 0xd8, 0xc6, 0x38,
-	0xfa, 0xf1, 0xf3, 0xfe, 0x69, 0x6b, 0x9f, 0xae, 0x22, 0x00, 0xd6, 0x23, 0xe3, 0xa6, 0x67, 0x2e,
-	0x88, 0x72, 0x20, 0xda, 0x81, 0xec, 0x4a, 0x5e, 0x65, 0x61, 0x2b, 0x4b, 0x6c, 0xc4, 0xec, 0xfb,
-	0xeb, 0x73, 0xe2, 0x1a, 0xcb, 0x33, 0xef, 0x82, 0x28, 0x67, 0xf2, 0xa7, 0x13, 0x59, 0x4c, 0xc7,
-	0xd5, 0x23, 0x98, 0x89, 0xf2, 0xf8, 0x87, 0xf7, 0xa3, 0x78, 0x53, 0x6f, 0xaa, 0x79, 0xca, 0x3f,
-	0x1a, 0xff, 0xdf, 0xf9, 0x30, 0xc7, 0x2e, 0x27, 0xd8, 0xf2, 0x86, 0x61, 0x97, 0xe9, 0x16, 0xc3,
-	0xba, 0xc6, 0xe6, 0x95, 0xe4, 0xa5, 0x2c, 0x0e, 0x69, 0x3f, 0x52, 0x5d, 0xa1, 0xaa, 0x72, 0x3d,
-	0x9e, 0xf9, 0x78, 0x4b, 0x73, 0xa4, 0xaa, 0x9f, 0x9a, 0xc3, 0xf2, 0xe6, 0x37, 0x00, 0x00, 0xff,
-	0xff, 0x0e, 0xea, 0x69, 0xf5, 0x8b, 0x01, 0x00, 0x00,
+	0x18, 0x5d, 0xf6, 0xb6, 0x6d, 0xbb, 0xb5, 0x60, 0x0a, 0xd2, 0xa5, 0x2e, 0xa2, 0xe3, 0xa4, 0x92,
+	0xfa, 0xc4, 0x99, 0x5d, 0xe8, 0xa3, 0x76, 0xe9, 0x13, 0xd4, 0x77, 0x08, 0x1d, 0xa7, 0x6b, 0xb7,
+	0xff, 0x7b, 0xf3, 0x9f, 0x1f, 0x3f, 0x1e, 0xcc, 0x8e, 0x58, 0xc9, 0x22, 0x89, 0xdb, 0x0e, 0x25,
+	0x0a, 0x5a, 0x73, 0x99, 0x90, 0x21, 0x3b, 0xa6, 0x7a, 0x99, 0xbb, 0x39, 0x62, 0x5e, 0x71, 0x3a,
+	0x6c, 0xd3, 0xc3, 0x1b, 0xcd, 0xb8, 0x60, 0x5d, 0xd9, 0x4a, 0xec, 0x54, 0xf3, 0x72, 0x01, 0xd6,
+	0xa6, 0x28, 0xab, 0x2c, 0xc0, 0x8c, 0x3b, 0x36, 0x4c, 0xde, 0xf9, 0xc7, 0xcc, 0x70, 0x8d, 0xa5,
+	0x15, 0xf5, 0xf1, 0xca, 0x03, 0x73, 0xcd, 0x18, 0x17, 0xc2, 0x01, 0x30, 0x37, 0x61, 0xb0, 0xdb,
+	0x3f, 0xd8, 0x27, 0xce, 0x39, 0x58, 0xd1, 0x76, 0x7d, 0x1f, 0x87, 0x81, 0xff, 0x62, 0x1b, 0xe3,
+	0xe8, 0xc7, 0xcf, 0xfb, 0xa7, 0xad, 0x7d, 0xba, 0x8a, 0x00, 0x58, 0x8f, 0x8c, 0x9b, 0x9e, 0xb9,
+	0x20, 0xca, 0x81, 0x68, 0x07, 0xb2, 0x2b, 0x79, 0x95, 0x85, 0xad, 0x2c, 0xb1, 0x11, 0xb3, 0xef,
+	0xaf, 0xcf, 0x89, 0x6b, 0x2c, 0xcf, 0xbc, 0x0b, 0xa2, 0x9c, 0xc9, 0x9f, 0x4e, 0x64, 0x31, 0x1d,
+	0x57, 0x8f, 0x60, 0x26, 0xca, 0xe3, 0x1f, 0xde, 0x8f, 0xe2, 0x4d, 0xbd, 0xa9, 0xe6, 0x29, 0xff,
+	0x68, 0xfc, 0x7f, 0xe7, 0xc3, 0x1c, 0xbb, 0x9c, 0x60, 0xcb, 0x1b, 0x86, 0x5d, 0xa6, 0x5b, 0x0c,
+	0xeb, 0x1a, 0x9b, 0x57, 0x92, 0x97, 0xb2, 0x38, 0xa4, 0xfd, 0x48, 0x75, 0x85, 0xaa, 0xca, 0xf5,
+	0x78, 0xe6, 0xe3, 0x2d, 0xcd, 0x91, 0xaa, 0x7e, 0x6a, 0x0e, 0xcb, 0x9b, 0xdf, 0x00, 0x00, 0x00,
+	0xff, 0xff, 0x56, 0x6a, 0x8b, 0x22, 0x8b, 0x01, 0x00, 0x00,
 }
diff --git a/vendor/github.com/opencord/voltha-protos/v5/go/core/core.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/core/core.pb.go
new file mode 100644
index 0000000..f586730
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/core/core.pb.go
@@ -0,0 +1,1089 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/core.proto
+
+package core
+
+import (
+	context "context"
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	empty "github.com/golang/protobuf/ptypes/empty"
+	common "github.com/opencord/voltha-protos/v5/go/common"
+	inter_container "github.com/opencord/voltha-protos/v5/go/inter_container"
+	voltha "github.com/opencord/voltha-protos/v5/go/voltha"
+	grpc "google.golang.org/grpc"
+	codes "google.golang.org/grpc/codes"
+	status "google.golang.org/grpc/status"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// Transient State for devices
+type DeviceTransientState_Types int32
+
+const (
+	// The transient state of the device is not set
+	DeviceTransientState_NONE DeviceTransientState_Types = 0
+	// The state of the device in core is any state, i.e DELETING, DELETED, DELETE_FAILED, NONE.
+	// This state is only used for transitions.
+	DeviceTransientState_ANY DeviceTransientState_Types = 1
+	// The device is in FORCE_DELETING state
+	DeviceTransientState_FORCE_DELETING DeviceTransientState_Types = 2
+	// The device is getting deleted from adapter state
+	DeviceTransientState_DELETING_FROM_ADAPTER DeviceTransientState_Types = 3
+	// The device is deleted from adapter and is getting deleted in core.
+	DeviceTransientState_DELETING_POST_ADAPTER_RESPONSE DeviceTransientState_Types = 4
+	// State to represent that the device deletion is failed
+	DeviceTransientState_DELETE_FAILED DeviceTransientState_Types = 5
+	// State to represent that reconcile is in progress
+	DeviceTransientState_RECONCILE_IN_PROGRESS DeviceTransientState_Types = 6
+)
+
+var DeviceTransientState_Types_name = map[int32]string{
+	0: "NONE",
+	1: "ANY",
+	2: "FORCE_DELETING",
+	3: "DELETING_FROM_ADAPTER",
+	4: "DELETING_POST_ADAPTER_RESPONSE",
+	5: "DELETE_FAILED",
+	6: "RECONCILE_IN_PROGRESS",
+}
+
+var DeviceTransientState_Types_value = map[string]int32{
+	"NONE":                           0,
+	"ANY":                            1,
+	"FORCE_DELETING":                 2,
+	"DELETING_FROM_ADAPTER":          3,
+	"DELETING_POST_ADAPTER_RESPONSE": 4,
+	"DELETE_FAILED":                  5,
+	"RECONCILE_IN_PROGRESS":          6,
+}
+
+func (x DeviceTransientState_Types) String() string {
+	return proto.EnumName(DeviceTransientState_Types_name, int32(x))
+}
+
+func (DeviceTransientState_Types) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_39634f15fb8a505e, []int{0, 0}
+}
+
+type DeviceTransientState struct {
+	TransientState       DeviceTransientState_Types `protobuf:"varint,1,opt,name=transient_state,json=transientState,proto3,enum=voltha.DeviceTransientState_Types" json:"transient_state,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                   `json:"-"`
+	XXX_unrecognized     []byte                     `json:"-"`
+	XXX_sizecache        int32                      `json:"-"`
+}
+
+func (m *DeviceTransientState) Reset()         { *m = DeviceTransientState{} }
+func (m *DeviceTransientState) String() string { return proto.CompactTextString(m) }
+func (*DeviceTransientState) ProtoMessage()    {}
+func (*DeviceTransientState) Descriptor() ([]byte, []int) {
+	return fileDescriptor_39634f15fb8a505e, []int{0}
+}
+
+func (m *DeviceTransientState) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeviceTransientState.Unmarshal(m, b)
+}
+func (m *DeviceTransientState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeviceTransientState.Marshal(b, m, deterministic)
+}
+func (m *DeviceTransientState) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceTransientState.Merge(m, src)
+}
+func (m *DeviceTransientState) XXX_Size() int {
+	return xxx_messageInfo_DeviceTransientState.Size(m)
+}
+func (m *DeviceTransientState) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceTransientState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceTransientState proto.InternalMessageInfo
+
+func (m *DeviceTransientState) GetTransientState() DeviceTransientState_Types {
+	if m != nil {
+		return m.TransientState
+	}
+	return DeviceTransientState_NONE
+}
+
+func init() {
+	proto.RegisterEnum("voltha.DeviceTransientState_Types", DeviceTransientState_Types_name, DeviceTransientState_Types_value)
+	proto.RegisterType((*DeviceTransientState)(nil), "voltha.DeviceTransientState")
+}
+
+func init() { proto.RegisterFile("voltha_protos/core.proto", fileDescriptor_39634f15fb8a505e) }
+
+var fileDescriptor_39634f15fb8a505e = []byte{
+	// 771 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x5d, 0x6f, 0xeb, 0x44,
+	0x10, 0x25, 0xf7, 0xa3, 0x5c, 0xa6, 0x6d, 0xe2, 0xee, 0x4d, 0xb9, 0x6d, 0x2a, 0x55, 0x28, 0x3c,
+	0xc0, 0x43, 0xeb, 0x88, 0x16, 0x2a, 0x44, 0x11, 0xc8, 0xb5, 0x9d, 0x34, 0x22, 0x4d, 0x2c, 0x3b,
+	0x08, 0xc1, 0x8b, 0xe5, 0xda, 0x53, 0xc7, 0xc2, 0xf1, 0x46, 0xbb, 0xdb, 0x40, 0x1e, 0xf9, 0x1d,
+	0xfc, 0x14, 0xfe, 0x1c, 0xb2, 0xd7, 0x4e, 0xed, 0x34, 0x96, 0x2a, 0xdd, 0xb7, 0x64, 0xce, 0x9c,
+	0x33, 0x3b, 0x73, 0x76, 0xbc, 0x70, 0xb4, 0xa4, 0xb1, 0x98, 0x79, 0xee, 0x82, 0x51, 0x41, 0x79,
+	0xcf, 0xa7, 0x0c, 0xd5, 0xec, 0x37, 0xd9, 0x91, 0x48, 0xe7, 0x24, 0xa4, 0x34, 0x8c, 0xb1, 0x97,
+	0x45, 0xef, 0x1f, 0x1f, 0x7a, 0x38, 0x5f, 0x88, 0x95, 0x4c, 0xea, 0x7c, 0x59, 0xa5, 0x47, 0x89,
+	0x40, 0xe6, 0xfa, 0x34, 0x11, 0x5e, 0x94, 0x20, 0xcb, 0x93, 0x3a, 0x9b, 0x35, 0xe6, 0x73, 0x9a,
+	0x6c, 0xc7, 0x02, 0x5c, 0x46, 0x3e, 0x6e, 0xc7, 0x66, 0xe8, 0xc5, 0x62, 0x26, 0xb1, 0xee, 0x3f,
+	0xaf, 0xa0, 0x6d, 0x64, 0xc9, 0x53, 0xe6, 0x25, 0x3c, 0xc2, 0x44, 0x38, 0xc2, 0x13, 0x48, 0x7e,
+	0x81, 0x96, 0x28, 0x22, 0x2e, 0x4f, 0x43, 0x47, 0x8d, 0x2f, 0x1a, 0x5f, 0x37, 0x2f, 0xba, 0xaa,
+	0x94, 0x53, 0xb7, 0xd1, 0xd4, 0xe9, 0x6a, 0x81, 0xdc, 0x6e, 0x8a, 0x4a, 0xb4, 0xfb, 0x6f, 0x03,
+	0xde, 0x66, 0x08, 0x79, 0x07, 0x6f, 0xc6, 0x93, 0xb1, 0xa9, 0x7c, 0x42, 0x3e, 0x85, 0xd7, 0xda,
+	0xf8, 0x77, 0xa5, 0x41, 0x08, 0x34, 0xfb, 0x13, 0x5b, 0x37, 0x5d, 0xc3, 0x1c, 0x99, 0xd3, 0xe1,
+	0x78, 0xa0, 0xbc, 0x22, 0xc7, 0x70, 0x58, 0xfc, 0x73, 0xfb, 0xf6, 0xe4, 0xce, 0xd5, 0x0c, 0xcd,
+	0x9a, 0x9a, 0xb6, 0xf2, 0x9a, 0x74, 0xe1, 0x74, 0x0d, 0x59, 0x13, 0x67, 0x5a, 0x40, 0xae, 0x6d,
+	0x3a, 0xd6, 0x64, 0xec, 0x98, 0xca, 0x1b, 0x72, 0x00, 0xfb, 0x59, 0x8e, 0xe9, 0xf6, 0xb5, 0xe1,
+	0xc8, 0x34, 0x94, 0xb7, 0xa9, 0xa2, 0x6d, 0xea, 0x93, 0xb1, 0x3e, 0x1c, 0x99, 0xee, 0x70, 0xec,
+	0x5a, 0xf6, 0x64, 0x60, 0x9b, 0x8e, 0xa3, 0xec, 0x5c, 0xfc, 0xb7, 0x0b, 0xbb, 0x3a, 0x65, 0xe8,
+	0x20, 0x4b, 0x3b, 0x22, 0x3f, 0x43, 0x6b, 0x80, 0xe2, 0x36, 0x1b, 0x53, 0x7a, 0xfe, 0x47, 0x4e,
+	0x3e, 0x57, 0xa5, 0x7b, 0x6a, 0xe1, 0x9e, 0x6a, 0xa6, 0xee, 0x75, 0xda, 0xc5, 0x30, 0x2a, 0xd9,
+	0x7d, 0x68, 0xd9, 0x18, 0x46, 0x5c, 0x20, 0xd3, 0x02, 0x6f, 0x21, 0x90, 0x91, 0x93, 0x22, 0x31,
+	0x0f, 0x48, 0x9c, 0x79, 0x22, 0xa2, 0x49, 0xa7, 0x46, 0x9d, 0x5c, 0xc1, 0x9e, 0x1c, 0xf2, 0xaf,
+	0x8b, 0x20, 0xf5, 0xa4, 0x59, 0x1d, 0x7d, 0x2d, 0xef, 0x12, 0x76, 0x2d, 0xca, 0x84, 0xce, 0xd0,
+	0x13, 0x18, 0x90, 0xbd, 0x82, 0x96, 0x06, 0x6b, 0x49, 0x3a, 0x28, 0x29, 0xce, 0x33, 0xc7, 0xf2,
+	0x82, 0x1f, 0xca, 0xcc, 0x0c, 0xe8, 0x47, 0xb1, 0x40, 0x56, 0x2b, 0xf2, 0x2d, 0x34, 0x0d, 0x8c,
+	0x51, 0xa0, 0x16, 0xc7, 0x99, 0x1a, 0x01, 0x35, 0xbf, 0xa7, 0x43, 0xa3, 0x96, 0xf5, 0x0d, 0xec,
+	0x0f, 0x50, 0xc8, 0xa6, 0x52, 0x16, 0x21, 0xe5, 0xba, 0x79, 0xc9, 0x4a, 0x17, 0xe4, 0x0c, 0x5a,
+	0xa3, 0x88, 0x97, 0x38, 0xd5, 0x4a, 0xfb, 0xe5, 0xe4, 0xd4, 0x90, 0x03, 0x99, 0x59, 0x6e, 0xee,
+	0xb8, 0x3a, 0xcd, 0x97, 0xb4, 0xa7, 0x15, 0xcb, 0x62, 0xdd, 0xe9, 0x34, 0x79, 0x88, 0xc2, 0x5c,
+	0xea, 0x60, 0x5d, 0x6e, 0x2e, 0xe3, 0xbc, 0x56, 0xe2, 0x27, 0x78, 0xaf, 0xcf, 0xa2, 0x38, 0x90,
+	0x3a, 0x06, 0x0a, 0xf4, 0x53, 0x8f, 0x3e, 0x54, 0x0f, 0x63, 0x44, 0xdc, 0xa7, 0x4b, 0x64, 0xab,
+	0xce, 0x86, 0xe7, 0xe4, 0x0a, 0x94, 0x12, 0x9f, 0x8f, 0x28, 0x17, 0x2f, 0x9a, 0xf1, 0x0f, 0xd0,
+	0x2e, 0xf3, 0xd6, 0x85, 0x5f, 0xc2, 0xfd, 0x0a, 0x3e, 0x5b, 0xfb, 0x53, 0x21, 0x6c, 0x1e, 0xee,
+	0x1a, 0x9a, 0x03, 0x14, 0xa5, 0x3a, 0x4f, 0x43, 0x2e, 0x05, 0xf3, 0x21, 0x6f, 0x92, 0xd5, 0x6c,
+	0xed, 0xca, 0x87, 0xac, 0xd4, 0x6a, 0x55, 0xd3, 0x39, 0xf9, 0x1e, 0xf6, 0x1c, 0x4c, 0x02, 0xcb,
+	0xf3, 0xff, 0x44, 0x31, 0x4c, 0x88, 0xb2, 0x36, 0x21, 0x8f, 0xd4, 0xf6, 0x73, 0x03, 0x44, 0x8a,
+	0xd8, 0xe8, 0x71, 0x9a, 0xe4, 0x26, 0xb6, 0xab, 0x05, 0x24, 0x56, 0xab, 0xf1, 0x23, 0xb4, 0xd6,
+	0x4b, 0xf1, 0xec, 0x16, 0x14, 0x40, 0x2d, 0xfb, 0x1a, 0x0e, 0x6d, 0xf4, 0x69, 0xe2, 0x47, 0x31,
+	0xd6, 0x76, 0x5c, 0x47, 0xbe, 0x83, 0xd3, 0xea, 0xa0, 0x7e, 0x8b, 0xc4, 0xcc, 0x62, 0xf4, 0xef,
+	0x95, 0x16, 0x04, 0x0c, 0x39, 0x7f, 0xfa, 0xda, 0x48, 0x5c, 0x2d, 0x83, 0xcf, 0xe6, 0x7e, 0x0e,
+	0xef, 0x06, 0x28, 0xe4, 0xa2, 0x6c, 0x5b, 0xbc, 0x8d, 0x5d, 0xba, 0xcd, 0x2f, 0x30, 0xc3, 0xe4,
+	0x23, 0xb7, 0xc9, 0x80, 0xf7, 0x92, 0x3c, 0x9c, 0x7b, 0x21, 0x1a, 0xf4, 0xaf, 0x24, 0xa6, 0x5e,
+	0x40, 0x0e, 0x0b, 0xa5, 0x4a, 0xb8, 0x4e, 0xe5, 0xe6, 0x16, 0x8e, 0x28, 0x0b, 0x55, 0xba, 0xc0,
+	0xc4, 0xa7, 0x2c, 0x28, 0xc8, 0xe9, 0x0b, 0xfc, 0xc7, 0x59, 0x18, 0x89, 0xd9, 0xe3, 0x7d, 0x3a,
+	0xd3, 0x5e, 0x91, 0xd0, 0x93, 0x09, 0xe7, 0xf9, 0x6b, 0xb8, 0xfc, 0xae, 0x17, 0xd2, 0xec, 0xbd,
+	0xbe, 0xdf, 0xc9, 0x42, 0x97, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x60, 0x7e, 0xd5, 0xfd, 0xcc,
+	0x07, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// CoreServiceClient is the client API for CoreService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type CoreServiceClient interface {
+	//	 in coreProxy interface
+	GetHealthStatus(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*voltha.HealthStatus, error)
+	RegisterAdapter(ctx context.Context, in *inter_container.AdapterRegistration, opts ...grpc.CallOption) (*empty.Empty, error)
+	DeviceUpdate(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error)
+	PortCreated(ctx context.Context, in *voltha.Port, opts ...grpc.CallOption) (*empty.Empty, error)
+	PortsStateUpdate(ctx context.Context, in *inter_container.PortStateFilter, opts ...grpc.CallOption) (*empty.Empty, error)
+	DeleteAllPorts(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error)
+	GetDevicePort(ctx context.Context, in *inter_container.PortFilter, opts ...grpc.CallOption) (*voltha.Port, error)
+	ListDevicePorts(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*voltha.Ports, error)
+	DeviceStateUpdate(ctx context.Context, in *inter_container.DeviceStateFilter, opts ...grpc.CallOption) (*empty.Empty, error)
+	DevicePMConfigUpdate(ctx context.Context, in *voltha.PmConfigs, opts ...grpc.CallOption) (*empty.Empty, error)
+	ChildDeviceDetected(ctx context.Context, in *inter_container.DeviceDiscovery, opts ...grpc.CallOption) (*voltha.Device, error)
+	ChildDevicesLost(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error)
+	ChildDevicesDetected(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error)
+	GetDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*voltha.Device, error)
+	GetChildDevice(ctx context.Context, in *inter_container.ChildDeviceFilter, opts ...grpc.CallOption) (*voltha.Device, error)
+	GetChildDevices(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*voltha.Devices, error)
+	SendPacketIn(ctx context.Context, in *inter_container.PacketIn, opts ...grpc.CallOption) (*empty.Empty, error)
+	DeviceReasonUpdate(ctx context.Context, in *inter_container.DeviceReason, opts ...grpc.CallOption) (*empty.Empty, error)
+	PortStateUpdate(ctx context.Context, in *inter_container.PortState, opts ...grpc.CallOption) (*empty.Empty, error)
+	// Additional API found in the Core - unused?
+	ReconcileChildDevices(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error)
+	GetChildDeviceWithProxyAddress(ctx context.Context, in *voltha.Device_ProxyAddress, opts ...grpc.CallOption) (*voltha.Device, error)
+	GetPorts(ctx context.Context, in *inter_container.PortFilter, opts ...grpc.CallOption) (*voltha.Ports, error)
+	ChildrenStateUpdate(ctx context.Context, in *inter_container.DeviceStateFilter, opts ...grpc.CallOption) (*empty.Empty, error)
+	UpdateImageDownload(ctx context.Context, in *voltha.ImageDownload, opts ...grpc.CallOption) (*empty.Empty, error)
+}
+
+type coreServiceClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewCoreServiceClient(cc *grpc.ClientConn) CoreServiceClient {
+	return &coreServiceClient{cc}
+}
+
+func (c *coreServiceClient) GetHealthStatus(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*voltha.HealthStatus, error) {
+	out := new(voltha.HealthStatus)
+	err := c.cc.Invoke(ctx, "/voltha.CoreService/GetHealthStatus", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *coreServiceClient) RegisterAdapter(ctx context.Context, in *inter_container.AdapterRegistration, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.CoreService/RegisterAdapter", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *coreServiceClient) DeviceUpdate(ctx context.Context, in *voltha.Device, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.CoreService/DeviceUpdate", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *coreServiceClient) PortCreated(ctx context.Context, in *voltha.Port, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.CoreService/PortCreated", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *coreServiceClient) PortsStateUpdate(ctx context.Context, in *inter_container.PortStateFilter, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.CoreService/PortsStateUpdate", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *coreServiceClient) DeleteAllPorts(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.CoreService/DeleteAllPorts", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *coreServiceClient) GetDevicePort(ctx context.Context, in *inter_container.PortFilter, opts ...grpc.CallOption) (*voltha.Port, error) {
+	out := new(voltha.Port)
+	err := c.cc.Invoke(ctx, "/voltha.CoreService/GetDevicePort", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *coreServiceClient) ListDevicePorts(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*voltha.Ports, error) {
+	out := new(voltha.Ports)
+	err := c.cc.Invoke(ctx, "/voltha.CoreService/ListDevicePorts", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *coreServiceClient) DeviceStateUpdate(ctx context.Context, in *inter_container.DeviceStateFilter, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.CoreService/DeviceStateUpdate", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *coreServiceClient) DevicePMConfigUpdate(ctx context.Context, in *voltha.PmConfigs, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.CoreService/DevicePMConfigUpdate", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *coreServiceClient) ChildDeviceDetected(ctx context.Context, in *inter_container.DeviceDiscovery, opts ...grpc.CallOption) (*voltha.Device, error) {
+	out := new(voltha.Device)
+	err := c.cc.Invoke(ctx, "/voltha.CoreService/ChildDeviceDetected", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *coreServiceClient) ChildDevicesLost(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.CoreService/ChildDevicesLost", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *coreServiceClient) ChildDevicesDetected(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.CoreService/ChildDevicesDetected", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *coreServiceClient) GetDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*voltha.Device, error) {
+	out := new(voltha.Device)
+	err := c.cc.Invoke(ctx, "/voltha.CoreService/GetDevice", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *coreServiceClient) GetChildDevice(ctx context.Context, in *inter_container.ChildDeviceFilter, opts ...grpc.CallOption) (*voltha.Device, error) {
+	out := new(voltha.Device)
+	err := c.cc.Invoke(ctx, "/voltha.CoreService/GetChildDevice", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *coreServiceClient) GetChildDevices(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*voltha.Devices, error) {
+	out := new(voltha.Devices)
+	err := c.cc.Invoke(ctx, "/voltha.CoreService/GetChildDevices", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *coreServiceClient) SendPacketIn(ctx context.Context, in *inter_container.PacketIn, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.CoreService/SendPacketIn", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *coreServiceClient) DeviceReasonUpdate(ctx context.Context, in *inter_container.DeviceReason, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.CoreService/DeviceReasonUpdate", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *coreServiceClient) PortStateUpdate(ctx context.Context, in *inter_container.PortState, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.CoreService/PortStateUpdate", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *coreServiceClient) ReconcileChildDevices(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.CoreService/ReconcileChildDevices", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *coreServiceClient) GetChildDeviceWithProxyAddress(ctx context.Context, in *voltha.Device_ProxyAddress, opts ...grpc.CallOption) (*voltha.Device, error) {
+	out := new(voltha.Device)
+	err := c.cc.Invoke(ctx, "/voltha.CoreService/GetChildDeviceWithProxyAddress", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *coreServiceClient) GetPorts(ctx context.Context, in *inter_container.PortFilter, opts ...grpc.CallOption) (*voltha.Ports, error) {
+	out := new(voltha.Ports)
+	err := c.cc.Invoke(ctx, "/voltha.CoreService/GetPorts", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *coreServiceClient) ChildrenStateUpdate(ctx context.Context, in *inter_container.DeviceStateFilter, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.CoreService/ChildrenStateUpdate", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *coreServiceClient) UpdateImageDownload(ctx context.Context, in *voltha.ImageDownload, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.CoreService/UpdateImageDownload", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// CoreServiceServer is the server API for CoreService service.
+type CoreServiceServer interface {
+	//	 in coreProxy interface
+	GetHealthStatus(context.Context, *empty.Empty) (*voltha.HealthStatus, error)
+	RegisterAdapter(context.Context, *inter_container.AdapterRegistration) (*empty.Empty, error)
+	DeviceUpdate(context.Context, *voltha.Device) (*empty.Empty, error)
+	PortCreated(context.Context, *voltha.Port) (*empty.Empty, error)
+	PortsStateUpdate(context.Context, *inter_container.PortStateFilter) (*empty.Empty, error)
+	DeleteAllPorts(context.Context, *common.ID) (*empty.Empty, error)
+	GetDevicePort(context.Context, *inter_container.PortFilter) (*voltha.Port, error)
+	ListDevicePorts(context.Context, *common.ID) (*voltha.Ports, error)
+	DeviceStateUpdate(context.Context, *inter_container.DeviceStateFilter) (*empty.Empty, error)
+	DevicePMConfigUpdate(context.Context, *voltha.PmConfigs) (*empty.Empty, error)
+	ChildDeviceDetected(context.Context, *inter_container.DeviceDiscovery) (*voltha.Device, error)
+	ChildDevicesLost(context.Context, *common.ID) (*empty.Empty, error)
+	ChildDevicesDetected(context.Context, *common.ID) (*empty.Empty, error)
+	GetDevice(context.Context, *common.ID) (*voltha.Device, error)
+	GetChildDevice(context.Context, *inter_container.ChildDeviceFilter) (*voltha.Device, error)
+	GetChildDevices(context.Context, *common.ID) (*voltha.Devices, error)
+	SendPacketIn(context.Context, *inter_container.PacketIn) (*empty.Empty, error)
+	DeviceReasonUpdate(context.Context, *inter_container.DeviceReason) (*empty.Empty, error)
+	PortStateUpdate(context.Context, *inter_container.PortState) (*empty.Empty, error)
+	// Additional API found in the Core - unused?
+	ReconcileChildDevices(context.Context, *common.ID) (*empty.Empty, error)
+	GetChildDeviceWithProxyAddress(context.Context, *voltha.Device_ProxyAddress) (*voltha.Device, error)
+	GetPorts(context.Context, *inter_container.PortFilter) (*voltha.Ports, error)
+	ChildrenStateUpdate(context.Context, *inter_container.DeviceStateFilter) (*empty.Empty, error)
+	UpdateImageDownload(context.Context, *voltha.ImageDownload) (*empty.Empty, error)
+}
+
+// UnimplementedCoreServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedCoreServiceServer struct {
+}
+
+func (*UnimplementedCoreServiceServer) GetHealthStatus(ctx context.Context, req *empty.Empty) (*voltha.HealthStatus, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetHealthStatus not implemented")
+}
+func (*UnimplementedCoreServiceServer) RegisterAdapter(ctx context.Context, req *inter_container.AdapterRegistration) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method RegisterAdapter not implemented")
+}
+func (*UnimplementedCoreServiceServer) DeviceUpdate(ctx context.Context, req *voltha.Device) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DeviceUpdate not implemented")
+}
+func (*UnimplementedCoreServiceServer) PortCreated(ctx context.Context, req *voltha.Port) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method PortCreated not implemented")
+}
+func (*UnimplementedCoreServiceServer) PortsStateUpdate(ctx context.Context, req *inter_container.PortStateFilter) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method PortsStateUpdate not implemented")
+}
+func (*UnimplementedCoreServiceServer) DeleteAllPorts(ctx context.Context, req *common.ID) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DeleteAllPorts not implemented")
+}
+func (*UnimplementedCoreServiceServer) GetDevicePort(ctx context.Context, req *inter_container.PortFilter) (*voltha.Port, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetDevicePort not implemented")
+}
+func (*UnimplementedCoreServiceServer) ListDevicePorts(ctx context.Context, req *common.ID) (*voltha.Ports, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ListDevicePorts not implemented")
+}
+func (*UnimplementedCoreServiceServer) DeviceStateUpdate(ctx context.Context, req *inter_container.DeviceStateFilter) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DeviceStateUpdate not implemented")
+}
+func (*UnimplementedCoreServiceServer) DevicePMConfigUpdate(ctx context.Context, req *voltha.PmConfigs) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DevicePMConfigUpdate not implemented")
+}
+func (*UnimplementedCoreServiceServer) ChildDeviceDetected(ctx context.Context, req *inter_container.DeviceDiscovery) (*voltha.Device, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ChildDeviceDetected not implemented")
+}
+func (*UnimplementedCoreServiceServer) ChildDevicesLost(ctx context.Context, req *common.ID) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ChildDevicesLost not implemented")
+}
+func (*UnimplementedCoreServiceServer) ChildDevicesDetected(ctx context.Context, req *common.ID) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ChildDevicesDetected not implemented")
+}
+func (*UnimplementedCoreServiceServer) GetDevice(ctx context.Context, req *common.ID) (*voltha.Device, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetDevice not implemented")
+}
+func (*UnimplementedCoreServiceServer) GetChildDevice(ctx context.Context, req *inter_container.ChildDeviceFilter) (*voltha.Device, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetChildDevice not implemented")
+}
+func (*UnimplementedCoreServiceServer) GetChildDevices(ctx context.Context, req *common.ID) (*voltha.Devices, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetChildDevices not implemented")
+}
+func (*UnimplementedCoreServiceServer) SendPacketIn(ctx context.Context, req *inter_container.PacketIn) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method SendPacketIn not implemented")
+}
+func (*UnimplementedCoreServiceServer) DeviceReasonUpdate(ctx context.Context, req *inter_container.DeviceReason) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DeviceReasonUpdate not implemented")
+}
+func (*UnimplementedCoreServiceServer) PortStateUpdate(ctx context.Context, req *inter_container.PortState) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method PortStateUpdate not implemented")
+}
+func (*UnimplementedCoreServiceServer) ReconcileChildDevices(ctx context.Context, req *common.ID) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ReconcileChildDevices not implemented")
+}
+func (*UnimplementedCoreServiceServer) GetChildDeviceWithProxyAddress(ctx context.Context, req *voltha.Device_ProxyAddress) (*voltha.Device, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetChildDeviceWithProxyAddress not implemented")
+}
+func (*UnimplementedCoreServiceServer) GetPorts(ctx context.Context, req *inter_container.PortFilter) (*voltha.Ports, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetPorts not implemented")
+}
+func (*UnimplementedCoreServiceServer) ChildrenStateUpdate(ctx context.Context, req *inter_container.DeviceStateFilter) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ChildrenStateUpdate not implemented")
+}
+func (*UnimplementedCoreServiceServer) UpdateImageDownload(ctx context.Context, req *voltha.ImageDownload) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UpdateImageDownload not implemented")
+}
+
+func RegisterCoreServiceServer(s *grpc.Server, srv CoreServiceServer) {
+	s.RegisterService(&_CoreService_serviceDesc, srv)
+}
+
+func _CoreService_GetHealthStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(empty.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(CoreServiceServer).GetHealthStatus(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.CoreService/GetHealthStatus",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(CoreServiceServer).GetHealthStatus(ctx, req.(*empty.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _CoreService_RegisterAdapter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.AdapterRegistration)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(CoreServiceServer).RegisterAdapter(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.CoreService/RegisterAdapter",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(CoreServiceServer).RegisterAdapter(ctx, req.(*inter_container.AdapterRegistration))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _CoreService_DeviceUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(voltha.Device)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(CoreServiceServer).DeviceUpdate(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.CoreService/DeviceUpdate",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(CoreServiceServer).DeviceUpdate(ctx, req.(*voltha.Device))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _CoreService_PortCreated_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(voltha.Port)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(CoreServiceServer).PortCreated(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.CoreService/PortCreated",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(CoreServiceServer).PortCreated(ctx, req.(*voltha.Port))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _CoreService_PortsStateUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.PortStateFilter)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(CoreServiceServer).PortsStateUpdate(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.CoreService/PortsStateUpdate",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(CoreServiceServer).PortsStateUpdate(ctx, req.(*inter_container.PortStateFilter))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _CoreService_DeleteAllPorts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(CoreServiceServer).DeleteAllPorts(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.CoreService/DeleteAllPorts",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(CoreServiceServer).DeleteAllPorts(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _CoreService_GetDevicePort_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.PortFilter)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(CoreServiceServer).GetDevicePort(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.CoreService/GetDevicePort",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(CoreServiceServer).GetDevicePort(ctx, req.(*inter_container.PortFilter))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _CoreService_ListDevicePorts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(CoreServiceServer).ListDevicePorts(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.CoreService/ListDevicePorts",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(CoreServiceServer).ListDevicePorts(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _CoreService_DeviceStateUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.DeviceStateFilter)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(CoreServiceServer).DeviceStateUpdate(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.CoreService/DeviceStateUpdate",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(CoreServiceServer).DeviceStateUpdate(ctx, req.(*inter_container.DeviceStateFilter))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _CoreService_DevicePMConfigUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(voltha.PmConfigs)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(CoreServiceServer).DevicePMConfigUpdate(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.CoreService/DevicePMConfigUpdate",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(CoreServiceServer).DevicePMConfigUpdate(ctx, req.(*voltha.PmConfigs))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _CoreService_ChildDeviceDetected_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.DeviceDiscovery)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(CoreServiceServer).ChildDeviceDetected(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.CoreService/ChildDeviceDetected",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(CoreServiceServer).ChildDeviceDetected(ctx, req.(*inter_container.DeviceDiscovery))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _CoreService_ChildDevicesLost_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(CoreServiceServer).ChildDevicesLost(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.CoreService/ChildDevicesLost",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(CoreServiceServer).ChildDevicesLost(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _CoreService_ChildDevicesDetected_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(CoreServiceServer).ChildDevicesDetected(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.CoreService/ChildDevicesDetected",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(CoreServiceServer).ChildDevicesDetected(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _CoreService_GetDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(CoreServiceServer).GetDevice(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.CoreService/GetDevice",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(CoreServiceServer).GetDevice(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _CoreService_GetChildDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.ChildDeviceFilter)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(CoreServiceServer).GetChildDevice(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.CoreService/GetChildDevice",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(CoreServiceServer).GetChildDevice(ctx, req.(*inter_container.ChildDeviceFilter))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _CoreService_GetChildDevices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(CoreServiceServer).GetChildDevices(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.CoreService/GetChildDevices",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(CoreServiceServer).GetChildDevices(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _CoreService_SendPacketIn_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.PacketIn)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(CoreServiceServer).SendPacketIn(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.CoreService/SendPacketIn",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(CoreServiceServer).SendPacketIn(ctx, req.(*inter_container.PacketIn))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _CoreService_DeviceReasonUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.DeviceReason)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(CoreServiceServer).DeviceReasonUpdate(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.CoreService/DeviceReasonUpdate",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(CoreServiceServer).DeviceReasonUpdate(ctx, req.(*inter_container.DeviceReason))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _CoreService_PortStateUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.PortState)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(CoreServiceServer).PortStateUpdate(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.CoreService/PortStateUpdate",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(CoreServiceServer).PortStateUpdate(ctx, req.(*inter_container.PortState))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _CoreService_ReconcileChildDevices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(CoreServiceServer).ReconcileChildDevices(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.CoreService/ReconcileChildDevices",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(CoreServiceServer).ReconcileChildDevices(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _CoreService_GetChildDeviceWithProxyAddress_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(voltha.Device_ProxyAddress)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(CoreServiceServer).GetChildDeviceWithProxyAddress(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.CoreService/GetChildDeviceWithProxyAddress",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(CoreServiceServer).GetChildDeviceWithProxyAddress(ctx, req.(*voltha.Device_ProxyAddress))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _CoreService_GetPorts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.PortFilter)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(CoreServiceServer).GetPorts(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.CoreService/GetPorts",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(CoreServiceServer).GetPorts(ctx, req.(*inter_container.PortFilter))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _CoreService_ChildrenStateUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(inter_container.DeviceStateFilter)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(CoreServiceServer).ChildrenStateUpdate(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.CoreService/ChildrenStateUpdate",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(CoreServiceServer).ChildrenStateUpdate(ctx, req.(*inter_container.DeviceStateFilter))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _CoreService_UpdateImageDownload_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(voltha.ImageDownload)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(CoreServiceServer).UpdateImageDownload(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.CoreService/UpdateImageDownload",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(CoreServiceServer).UpdateImageDownload(ctx, req.(*voltha.ImageDownload))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _CoreService_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "voltha.CoreService",
+	HandlerType: (*CoreServiceServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "GetHealthStatus",
+			Handler:    _CoreService_GetHealthStatus_Handler,
+		},
+		{
+			MethodName: "RegisterAdapter",
+			Handler:    _CoreService_RegisterAdapter_Handler,
+		},
+		{
+			MethodName: "DeviceUpdate",
+			Handler:    _CoreService_DeviceUpdate_Handler,
+		},
+		{
+			MethodName: "PortCreated",
+			Handler:    _CoreService_PortCreated_Handler,
+		},
+		{
+			MethodName: "PortsStateUpdate",
+			Handler:    _CoreService_PortsStateUpdate_Handler,
+		},
+		{
+			MethodName: "DeleteAllPorts",
+			Handler:    _CoreService_DeleteAllPorts_Handler,
+		},
+		{
+			MethodName: "GetDevicePort",
+			Handler:    _CoreService_GetDevicePort_Handler,
+		},
+		{
+			MethodName: "ListDevicePorts",
+			Handler:    _CoreService_ListDevicePorts_Handler,
+		},
+		{
+			MethodName: "DeviceStateUpdate",
+			Handler:    _CoreService_DeviceStateUpdate_Handler,
+		},
+		{
+			MethodName: "DevicePMConfigUpdate",
+			Handler:    _CoreService_DevicePMConfigUpdate_Handler,
+		},
+		{
+			MethodName: "ChildDeviceDetected",
+			Handler:    _CoreService_ChildDeviceDetected_Handler,
+		},
+		{
+			MethodName: "ChildDevicesLost",
+			Handler:    _CoreService_ChildDevicesLost_Handler,
+		},
+		{
+			MethodName: "ChildDevicesDetected",
+			Handler:    _CoreService_ChildDevicesDetected_Handler,
+		},
+		{
+			MethodName: "GetDevice",
+			Handler:    _CoreService_GetDevice_Handler,
+		},
+		{
+			MethodName: "GetChildDevice",
+			Handler:    _CoreService_GetChildDevice_Handler,
+		},
+		{
+			MethodName: "GetChildDevices",
+			Handler:    _CoreService_GetChildDevices_Handler,
+		},
+		{
+			MethodName: "SendPacketIn",
+			Handler:    _CoreService_SendPacketIn_Handler,
+		},
+		{
+			MethodName: "DeviceReasonUpdate",
+			Handler:    _CoreService_DeviceReasonUpdate_Handler,
+		},
+		{
+			MethodName: "PortStateUpdate",
+			Handler:    _CoreService_PortStateUpdate_Handler,
+		},
+		{
+			MethodName: "ReconcileChildDevices",
+			Handler:    _CoreService_ReconcileChildDevices_Handler,
+		},
+		{
+			MethodName: "GetChildDeviceWithProxyAddress",
+			Handler:    _CoreService_GetChildDeviceWithProxyAddress_Handler,
+		},
+		{
+			MethodName: "GetPorts",
+			Handler:    _CoreService_GetPorts_Handler,
+		},
+		{
+			MethodName: "ChildrenStateUpdate",
+			Handler:    _CoreService_ChildrenStateUpdate_Handler,
+		},
+		{
+			MethodName: "UpdateImageDownload",
+			Handler:    _CoreService_UpdateImageDownload_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "voltha_protos/core.proto",
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/ext/config/ext_config.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/ext/config/ext_config.pb.go
similarity index 82%
rename from vendor/github.com/opencord/voltha-protos/v4/go/ext/config/ext_config.pb.go
rename to vendor/github.com/opencord/voltha-protos/v5/go/ext/config/ext_config.pb.go
index b68a7e4..2db2ed1 100644
--- a/vendor/github.com/opencord/voltha-protos/v4/go/ext/config/ext_config.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/ext/config/ext_config.pb.go
@@ -480,44 +480,44 @@
 func init() { proto.RegisterFile("voltha_protos/ext_config.proto", fileDescriptor_fb43b44b7fa3aba9) }
 
 var fileDescriptor_fb43b44b7fa3aba9 = []byte{
-	// 609 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xd1, 0x4e, 0x1a, 0x41,
-	0x14, 0x05, 0xac, 0x80, 0xd7, 0x14, 0xe8, 0x80, 0x8a, 0x3e, 0x58, 0xe3, 0x83, 0x35, 0x6d, 0xba,
-	0x24, 0xd4, 0x97, 0x26, 0x7d, 0x41, 0xa5, 0x85, 0x84, 0xa8, 0x19, 0xd1, 0x87, 0xa6, 0xc9, 0x76,
-	0x64, 0xc7, 0x65, 0xea, 0x32, 0xb3, 0x19, 0x66, 0x69, 0x5f, 0xfa, 0x35, 0xfd, 0x8e, 0xf6, 0xdb,
-	0x9a, 0x99, 0xd9, 0x05, 0x81, 0xc5, 0xa4, 0xe9, 0x0b, 0xc9, 0x9e, 0x7b, 0xcf, 0xb9, 0x87, 0x73,
-	0x67, 0x06, 0xf6, 0x27, 0x22, 0x50, 0x43, 0xe2, 0x86, 0x52, 0x28, 0x31, 0x6e, 0xd0, 0x1f, 0xca,
-	0x1d, 0x08, 0x7e, 0xcf, 0x7c, 0xc7, 0x20, 0x28, 0x6f, 0xbf, 0x0e, 0x19, 0x6c, 0xb6, 0x02, 0x22,
-	0x47, 0x67, 0xe6, 0x13, 0x5d, 0x41, 0x5d, 0xf0, 0xc8, 0x65, 0x2a, 0x72, 0x43, 0xc1, 0x5d, 0xa2,
-	0x4b, 0x31, 0xb1, 0x9e, 0x3d, 0xc8, 0x1e, 0x6f, 0x36, 0xb7, 0x9d, 0x58, 0xe7, 0x92, 0x47, 0x5d,
-	0x15, 0x5d, 0x09, 0x6e, 0xf8, 0x9d, 0x0c, 0xae, 0x89, 0x39, 0xc4, 0x2a, 0x9e, 0x16, 0x21, 0x19,
-	0xf5, 0x67, 0x03, 0x4a, 0xf3, 0x24, 0xb4, 0x05, 0x79, 0x3d, 0x86, 0x33, 0x23, 0x5e, 0xc0, 0xeb,
-	0xa1, 0xe0, 0x17, 0x4c, 0xc3, 0xc6, 0x85, 0x57, 0xcf, 0x59, 0x58, 0x2b, 0x7b, 0xe8, 0x3d, 0x14,
-	0xad, 0x21, 0xe6, 0xd5, 0xd7, 0x0e, 0xb2, 0xc7, 0xa5, 0xe6, 0x7e, 0xba, 0x19, 0xc7, 0xfc, 0x76,
-	0xcf, 0x71, 0xc1, 0xf4, 0x77, 0x3d, 0xf4, 0x0d, 0x76, 0x2d, 0x55, 0xd2, 0x50, 0x48, 0xc5, 0xb8,
-	0xaf, 0xff, 0x95, 0xc7, 0x14, 0x13, 0xbc, 0xfe, 0xcc, 0x68, 0x39, 0x4f, 0x69, 0xe1, 0x84, 0x76,
-	0x96, 0xb0, 0xf0, 0x0e, 0x49, 0x2f, 0xa0, 0xaf, 0xb0, 0x25, 0x89, 0xa2, 0xae, 0x1a, 0x4a, 0x3a,
-	0x1e, 0x8a, 0xc0, 0x4b, 0x02, 0x5c, 0x37, 0x01, 0xbe, 0x5e, 0x31, 0x07, 0x13, 0x45, 0xfb, 0x09,
-	0xc5, 0x86, 0xd7, 0xc9, 0xe0, 0xaa, 0x5c, 0x86, 0x51, 0x1f, 0x5e, 0x98, 0x09, 0x92, 0x70, 0x9f,
-	0x26, 0xea, 0x79, 0xa3, 0x7e, 0xf4, 0x84, 0x3a, 0xd6, 0xed, 0x53, 0xe5, 0xb2, 0x9c, 0x87, 0xd0,
-	0x00, 0xb6, 0x27, 0x24, 0x88, 0x52, 0x8c, 0x17, 0x8c, 0xf4, 0x9b, 0x15, 0xd2, 0xb7, 0x9a, 0xb4,
-	0xec, 0xbc, 0x36, 0x49, 0xc1, 0xf7, 0xbe, 0x40, 0xf1, 0x5a, 0x90, 0x87, 0x3e, 0x1b, 0x51, 0x74,
-	0x0c, 0x15, 0x32, 0x50, 0x6c, 0x42, 0xdd, 0xb1, 0x20, 0x0f, 0xae, 0x62, 0x23, 0x1a, 0x9f, 0x83,
-	0x92, 0xc5, 0xa7, 0x9d, 0x47, 0x50, 0x1e, 0x04, 0x94, 0xc8, 0x47, 0x8d, 0xf6, 0x64, 0x3c, 0x37,
-	0x70, 0xd2, 0xb7, 0xf7, 0x3b, 0x0b, 0xd5, 0x94, 0x1c, 0x51, 0x73, 0x69, 0x25, 0x92, 0x8d, 0x19,
-	0xb7, 0x67, 0x3a, 0xbf, 0x10, 0x32, 0x36, 0x25, 0x74, 0x02, 0xdb, 0x0b, 0x9c, 0x7b, 0x12, 0x04,
-	0x9a, 0x94, 0x33, 0xa4, 0xda, 0x1c, 0xe9, 0xa3, 0xad, 0xa1, 0x0f, 0xb0, 0x31, 0xf3, 0xb8, 0x66,
-	0x72, 0x7b, 0xb9, 0x22, 0xb7, 0xc4, 0x35, 0x2e, 0x8e, 0x13, 0xff, 0xbf, 0xb2, 0x50, 0x5e, 0xd8,
-	0x94, 0x4e, 0xe9, 0xd1, 0xb2, 0x03, 0xf1, 0x9d, 0xca, 0xd8, 0x76, 0x69, 0xba, 0xc1, 0x9e, 0x46,
-	0x17, 0x3a, 0xa3, 0x30, 0xa4, 0x32, 0xf6, 0x3a, 0xeb, 0xbc, 0xd1, 0xe8, 0x7f, 0xba, 0xfc, 0x09,
-	0xb5, 0xb4, 0x9d, 0xa3, 0x57, 0x50, 0x9e, 0x85, 0x15, 0xb0, 0x11, 0x53, 0x89, 0xd1, 0x29, 0xdc,
-	0xd3, 0xe8, 0xfc, 0xf8, 0xdc, 0x3f, 0x8e, 0x3f, 0xdc, 0x85, 0x42, 0x7c, 0xbf, 0x51, 0x09, 0x00,
-	0x9f, 0x77, 0xdd, 0x36, 0xc6, 0x97, 0xf8, 0xba, 0x92, 0x39, 0xc4, 0xb0, 0xb3, 0xe2, 0xba, 0x22,
-	0x04, 0x25, 0xdc, 0xea, 0xb7, 0xdd, 0x7e, 0x07, 0xb7, 0xaf, 0x3b, 0x97, 0xbd, 0xf3, 0x4a, 0xc6,
-	0xd0, 0x35, 0x86, 0x5b, 0x17, 0x9f, 0xda, 0x95, 0x2c, 0xaa, 0x42, 0xf9, 0xb6, 0xd5, 0xbb, 0x79,
-	0xdc, 0x94, 0x9b, 0x3d, 0x60, 0xa7, 0x5d, 0xa8, 0x0a, 0xe9, 0x3b, 0x22, 0xa4, 0x7c, 0x20, 0xa4,
-	0xe7, 0xd8, 0x27, 0xf6, 0x73, 0xd3, 0x67, 0x6a, 0x18, 0xdd, 0x39, 0x03, 0x31, 0x6a, 0x24, 0xb5,
-	0x86, 0xad, 0xbd, 0x8d, 0x9f, 0xdf, 0xc9, 0x49, 0xc3, 0x17, 0xfa, 0x11, 0x6e, 0x58, 0xa9, 0xbb,
-	0xbc, 0x29, 0xbc, 0xfb, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xe2, 0x77, 0x17, 0x08, 0xa7, 0x05, 0x00,
-	0x00,
+	// 610 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xd1, 0x6e, 0x12, 0x41,
+	0x14, 0x05, 0x6a, 0x81, 0xde, 0x46, 0xc0, 0x81, 0xb6, 0xb4, 0x0f, 0xb5, 0xe9, 0x43, 0x6d, 0x34,
+	0x2e, 0x09, 0xea, 0x83, 0x89, 0x2f, 0xb4, 0x45, 0x21, 0x21, 0x6d, 0x33, 0xa5, 0x7d, 0x30, 0x26,
+	0xeb, 0x94, 0x9d, 0x2e, 0x63, 0x97, 0x99, 0xcd, 0x30, 0x8b, 0xbe, 0xf8, 0x35, 0x7e, 0x87, 0x7e,
+	0x9b, 0x99, 0x99, 0x5d, 0x28, 0xb0, 0x34, 0x31, 0xbe, 0x90, 0xec, 0xb9, 0xf7, 0x9c, 0x7b, 0x38,
+	0x77, 0x66, 0x60, 0x7f, 0x22, 0x02, 0x35, 0x24, 0x6e, 0x28, 0x85, 0x12, 0xe3, 0x06, 0xfd, 0xa1,
+	0xdc, 0x81, 0xe0, 0x77, 0xcc, 0x77, 0x0c, 0x82, 0xf2, 0xf6, 0xeb, 0x90, 0xc1, 0x66, 0x2b, 0x20,
+	0x72, 0x74, 0x6a, 0x3e, 0xd1, 0x25, 0xd4, 0x05, 0x8f, 0x5c, 0xa6, 0x22, 0x37, 0x14, 0xdc, 0x25,
+	0xba, 0x14, 0x13, 0xeb, 0xd9, 0x83, 0xec, 0xf1, 0x66, 0x73, 0xdb, 0x89, 0x75, 0x2e, 0x78, 0xd4,
+	0x55, 0xd1, 0xa5, 0xe0, 0x86, 0xdf, 0xc9, 0xe0, 0x9a, 0x98, 0x43, 0xac, 0xe2, 0x49, 0x11, 0x92,
+	0x51, 0x7f, 0x36, 0xa0, 0x34, 0x4f, 0x42, 0x5b, 0x90, 0xd7, 0x63, 0x38, 0x33, 0xe2, 0x05, 0xbc,
+	0x1e, 0x0a, 0x7e, 0xce, 0x34, 0x6c, 0x5c, 0x78, 0xf5, 0x9c, 0x85, 0xb5, 0xb2, 0x87, 0xde, 0x43,
+	0xd1, 0x1a, 0x62, 0x5e, 0x7d, 0xed, 0x20, 0x7b, 0x5c, 0x6a, 0xee, 0xa7, 0x9b, 0x71, 0xcc, 0x6f,
+	0xf7, 0x0c, 0x17, 0x4c, 0x7f, 0xd7, 0x43, 0xdf, 0x60, 0xd7, 0x52, 0x25, 0x0d, 0x85, 0x54, 0x8c,
+	0xfb, 0xfa, 0x5f, 0x79, 0x4c, 0x31, 0xc1, 0xeb, 0x4f, 0x8c, 0x96, 0xf3, 0x98, 0x16, 0x4e, 0x68,
+	0xa7, 0x09, 0x0b, 0xef, 0x90, 0xf4, 0x02, 0xfa, 0x0a, 0x5b, 0x92, 0x28, 0xea, 0xaa, 0xa1, 0xa4,
+	0xe3, 0xa1, 0x08, 0xbc, 0x24, 0xc0, 0x75, 0x13, 0xe0, 0xcb, 0x15, 0x73, 0x30, 0x51, 0xb4, 0x9f,
+	0x50, 0x6c, 0x78, 0x9d, 0x0c, 0xae, 0xca, 0x65, 0x18, 0xf5, 0xe1, 0x99, 0x99, 0x20, 0x09, 0xf7,
+	0x69, 0xa2, 0x9e, 0x37, 0xea, 0x47, 0x8f, 0xa8, 0x63, 0xdd, 0x3e, 0x55, 0x2e, 0xcb, 0x79, 0x08,
+	0x0d, 0x60, 0x7b, 0x42, 0x82, 0x28, 0xc5, 0x78, 0xc1, 0x48, 0xbf, 0x5a, 0x21, 0x7d, 0xa3, 0x49,
+	0xcb, 0xce, 0x6b, 0x93, 0x14, 0x7c, 0xef, 0x0b, 0x14, 0xaf, 0x04, 0xb9, 0xef, 0xb3, 0x11, 0x45,
+	0xc7, 0x50, 0x21, 0x03, 0xc5, 0x26, 0xd4, 0x1d, 0x0b, 0x72, 0xef, 0x2a, 0x36, 0xa2, 0xf1, 0x39,
+	0x28, 0x59, 0x7c, 0xda, 0x79, 0x04, 0xe5, 0x41, 0x40, 0x89, 0x7c, 0xd0, 0x68, 0x4f, 0xc6, 0x53,
+	0x03, 0x27, 0x7d, 0x7b, 0xbf, 0xb3, 0x50, 0x4d, 0xc9, 0x11, 0x35, 0x97, 0x56, 0x22, 0xd9, 0x98,
+	0x71, 0x7b, 0xa6, 0xf3, 0x0b, 0x21, 0x63, 0x53, 0x42, 0x6f, 0x61, 0x7b, 0x81, 0x73, 0x47, 0x82,
+	0x40, 0x93, 0x72, 0x86, 0x54, 0x9b, 0x23, 0x7d, 0xb4, 0x35, 0xf4, 0x01, 0x36, 0x66, 0x1e, 0xd7,
+	0x4c, 0x6e, 0xcf, 0x57, 0xe4, 0x96, 0xb8, 0xc6, 0xc5, 0x71, 0xe2, 0xff, 0x57, 0x16, 0xca, 0x0b,
+	0x9b, 0xd2, 0x29, 0x3d, 0x58, 0x76, 0x20, 0xbe, 0x53, 0x19, 0xdb, 0x2e, 0x4d, 0x37, 0xd8, 0xd3,
+	0xe8, 0x42, 0x67, 0x14, 0x86, 0x54, 0xc6, 0x5e, 0x67, 0x9d, 0xd7, 0x1a, 0xfd, 0x4f, 0x97, 0x3f,
+	0xa1, 0x96, 0xb6, 0x73, 0xf4, 0x02, 0xca, 0xb3, 0xb0, 0x02, 0x36, 0x62, 0x2a, 0x31, 0x3a, 0x85,
+	0x7b, 0x1a, 0x9d, 0x1f, 0x9f, 0xfb, 0xc7, 0xf1, 0x87, 0xbb, 0x50, 0x88, 0xef, 0x37, 0x2a, 0x01,
+	0xe0, 0xb3, 0xae, 0xdb, 0xc6, 0xf8, 0x02, 0x5f, 0x55, 0x32, 0x87, 0x18, 0x76, 0x56, 0x5c, 0x57,
+	0x84, 0xa0, 0x84, 0x5b, 0xfd, 0xb6, 0xdb, 0xef, 0xe0, 0xf6, 0x55, 0xe7, 0xa2, 0x77, 0x56, 0xc9,
+	0x18, 0xba, 0xc6, 0x70, 0xeb, 0xfc, 0x53, 0xbb, 0x92, 0x45, 0x55, 0x28, 0xdf, 0xb4, 0x7a, 0xd7,
+	0x0f, 0x9b, 0x72, 0xb3, 0x07, 0xec, 0xa4, 0x0b, 0x55, 0x21, 0x7d, 0x47, 0x84, 0x94, 0x0f, 0x84,
+	0xf4, 0x1c, 0xfb, 0xc4, 0x7e, 0x6e, 0xfa, 0x4c, 0x0d, 0xa3, 0x5b, 0x67, 0x20, 0x46, 0x8d, 0xa4,
+	0xd6, 0xb0, 0xb5, 0xd7, 0xf1, 0xf3, 0x3b, 0x79, 0xd7, 0xf0, 0x85, 0x7e, 0x84, 0x1b, 0x56, 0xea,
+	0x36, 0x6f, 0x0a, 0x6f, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0x36, 0x9d, 0x64, 0x93, 0xa7, 0x05,
+	0x00, 0x00,
 }
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/extension/extensions.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/extension/extensions.pb.go
similarity index 99%
rename from vendor/github.com/opencord/voltha-protos/v4/go/extension/extensions.pb.go
rename to vendor/github.com/opencord/voltha-protos/v5/go/extension/extensions.pb.go
index 92bc924..16fe305 100644
--- a/vendor/github.com/opencord/voltha-protos/v4/go/extension/extensions.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/extension/extensions.pb.go
@@ -8,7 +8,7 @@
 	fmt "fmt"
 	proto "github.com/golang/protobuf/proto"
 	empty "github.com/golang/protobuf/ptypes/empty"
-	config "github.com/opencord/voltha-protos/v4/go/ext/config"
+	config "github.com/opencord/voltha-protos/v5/go/ext/config"
 	grpc "google.golang.org/grpc"
 	codes "google.golang.org/grpc/codes"
 	status "google.golang.org/grpc/status"
@@ -3187,8 +3187,8 @@
 	0xdd, 0x80, 0x90, 0x53, 0xc3, 0x50, 0xe8, 0xdd, 0xa8, 0xd9, 0x7b, 0xa7, 0x66, 0x7a, 0xfe, 0x1a,
 	0x86, 0xbb, 0xae, 0x77, 0x55, 0x71, 0x27, 0x84, 0x7d, 0x7a, 0x5b, 0x15, 0xf1, 0x2f, 0x10, 0x11,
 	0xef, 0x17, 0xd5, 0x2b, 0x9b, 0xbe, 0x9e, 0x5e, 0x54, 0x86, 0xee, 0xf8, 0x49, 0x80, 0x7a, 0x22,
-	0x50, 0x9f, 0xcb, 0x7f, 0x94, 0x78, 0x73, 0xf8, 0xe4, 0xca, 0x8d, 0xfe, 0xb3, 0xa2, 0xbb, 0x74,
-	0xa1, 0x70, 0xcf, 0x17, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x71, 0x00, 0x5d, 0x55, 0x7d, 0x21,
+	0x50, 0x9f, 0xcb, 0x7f, 0x94, 0x78, 0x53, 0x7f, 0x72, 0xe5, 0x46, 0xff, 0x59, 0xd1, 0x5d, 0xba,
+	0x50, 0xb8, 0xe7, 0x8b, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x56, 0x65, 0x78, 0xd4, 0x7d, 0x21,
 	0x00, 0x00,
 }
 
diff --git a/vendor/github.com/opencord/voltha-protos/v5/go/inter_container/inter_container.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/inter_container/inter_container.pb.go
new file mode 100644
index 0000000..fbffef2
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/inter_container/inter_container.pb.go
@@ -0,0 +1,1637 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/inter_container.proto
+
+package inter_container
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	common "github.com/opencord/voltha-protos/v5/go/common"
+	openflow_13 "github.com/opencord/voltha-protos/v5/go/openflow_13"
+	openolt "github.com/opencord/voltha-protos/v5/go/openolt"
+	tech_profile "github.com/opencord/voltha-protos/v5/go/tech_profile"
+	voltha "github.com/opencord/voltha-protos/v5/go/voltha"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+//
+// Core-Adapter messages
+// ****
+// ****
+// ****
+type AdapterRegistration struct {
+	Adapter              *voltha.Adapter     `protobuf:"bytes,1,opt,name=adapter,proto3" json:"adapter,omitempty"`
+	DTypes               *voltha.DeviceTypes `protobuf:"bytes,2,opt,name=dTypes,proto3" json:"dTypes,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *AdapterRegistration) Reset()         { *m = AdapterRegistration{} }
+func (m *AdapterRegistration) String() string { return proto.CompactTextString(m) }
+func (*AdapterRegistration) ProtoMessage()    {}
+func (*AdapterRegistration) Descriptor() ([]byte, []int) {
+	return fileDescriptor_941f0031a549667f, []int{0}
+}
+
+func (m *AdapterRegistration) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AdapterRegistration.Unmarshal(m, b)
+}
+func (m *AdapterRegistration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AdapterRegistration.Marshal(b, m, deterministic)
+}
+func (m *AdapterRegistration) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AdapterRegistration.Merge(m, src)
+}
+func (m *AdapterRegistration) XXX_Size() int {
+	return xxx_messageInfo_AdapterRegistration.Size(m)
+}
+func (m *AdapterRegistration) XXX_DiscardUnknown() {
+	xxx_messageInfo_AdapterRegistration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AdapterRegistration proto.InternalMessageInfo
+
+func (m *AdapterRegistration) GetAdapter() *voltha.Adapter {
+	if m != nil {
+		return m.Adapter
+	}
+	return nil
+}
+
+func (m *AdapterRegistration) GetDTypes() *voltha.DeviceTypes {
+	if m != nil {
+		return m.DTypes
+	}
+	return nil
+}
+
+type ChildDeviceFilter struct {
+	ParentId             string   `protobuf:"bytes,1,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"`
+	SerialNumber         string   `protobuf:"bytes,2,opt,name=serial_number,json=serialNumber,proto3" json:"serial_number,omitempty"`
+	OnuId                uint32   `protobuf:"varint,3,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	ParentPortNo         uint32   `protobuf:"varint,4,opt,name=parent_port_no,json=parentPortNo,proto3" json:"parent_port_no,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ChildDeviceFilter) Reset()         { *m = ChildDeviceFilter{} }
+func (m *ChildDeviceFilter) String() string { return proto.CompactTextString(m) }
+func (*ChildDeviceFilter) ProtoMessage()    {}
+func (*ChildDeviceFilter) Descriptor() ([]byte, []int) {
+	return fileDescriptor_941f0031a549667f, []int{1}
+}
+
+func (m *ChildDeviceFilter) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ChildDeviceFilter.Unmarshal(m, b)
+}
+func (m *ChildDeviceFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ChildDeviceFilter.Marshal(b, m, deterministic)
+}
+func (m *ChildDeviceFilter) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ChildDeviceFilter.Merge(m, src)
+}
+func (m *ChildDeviceFilter) XXX_Size() int {
+	return xxx_messageInfo_ChildDeviceFilter.Size(m)
+}
+func (m *ChildDeviceFilter) XXX_DiscardUnknown() {
+	xxx_messageInfo_ChildDeviceFilter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ChildDeviceFilter proto.InternalMessageInfo
+
+func (m *ChildDeviceFilter) GetParentId() string {
+	if m != nil {
+		return m.ParentId
+	}
+	return ""
+}
+
+func (m *ChildDeviceFilter) GetSerialNumber() string {
+	if m != nil {
+		return m.SerialNumber
+	}
+	return ""
+}
+
+func (m *ChildDeviceFilter) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *ChildDeviceFilter) GetParentPortNo() uint32 {
+	if m != nil {
+		return m.ParentPortNo
+	}
+	return 0
+}
+
+type PortFilter struct {
+	DeviceId             string               `protobuf:"bytes,1,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	Port                 uint32               `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"`
+	PortType             voltha.Port_PortType `protobuf:"varint,3,opt,name=port_type,json=portType,proto3,enum=voltha.Port_PortType" json:"port_type,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
+	XXX_unrecognized     []byte               `json:"-"`
+	XXX_sizecache        int32                `json:"-"`
+}
+
+func (m *PortFilter) Reset()         { *m = PortFilter{} }
+func (m *PortFilter) String() string { return proto.CompactTextString(m) }
+func (*PortFilter) ProtoMessage()    {}
+func (*PortFilter) Descriptor() ([]byte, []int) {
+	return fileDescriptor_941f0031a549667f, []int{2}
+}
+
+func (m *PortFilter) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PortFilter.Unmarshal(m, b)
+}
+func (m *PortFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PortFilter.Marshal(b, m, deterministic)
+}
+func (m *PortFilter) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PortFilter.Merge(m, src)
+}
+func (m *PortFilter) XXX_Size() int {
+	return xxx_messageInfo_PortFilter.Size(m)
+}
+func (m *PortFilter) XXX_DiscardUnknown() {
+	xxx_messageInfo_PortFilter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PortFilter proto.InternalMessageInfo
+
+func (m *PortFilter) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *PortFilter) GetPort() uint32 {
+	if m != nil {
+		return m.Port
+	}
+	return 0
+}
+
+func (m *PortFilter) GetPortType() voltha.Port_PortType {
+	if m != nil {
+		return m.PortType
+	}
+	return voltha.Port_UNKNOWN
+}
+
+type DeviceDiscovery struct {
+	ParentId             string   `protobuf:"bytes,1,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"`
+	ParentPortNo         uint32   `protobuf:"varint,2,opt,name=parent_port_no,json=parentPortNo,proto3" json:"parent_port_no,omitempty"`
+	ChildDeviceType      string   `protobuf:"bytes,3,opt,name=child_device_type,json=childDeviceType,proto3" json:"child_device_type,omitempty"`
+	ChannelId            uint32   `protobuf:"varint,4,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"`
+	VendorId             string   `protobuf:"bytes,5,opt,name=vendor_id,json=vendorId,proto3" json:"vendor_id,omitempty"`
+	SerialNumber         string   `protobuf:"bytes,6,opt,name=serial_number,json=serialNumber,proto3" json:"serial_number,omitempty"`
+	OnuId                uint32   `protobuf:"varint,7,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DeviceDiscovery) Reset()         { *m = DeviceDiscovery{} }
+func (m *DeviceDiscovery) String() string { return proto.CompactTextString(m) }
+func (*DeviceDiscovery) ProtoMessage()    {}
+func (*DeviceDiscovery) Descriptor() ([]byte, []int) {
+	return fileDescriptor_941f0031a549667f, []int{3}
+}
+
+func (m *DeviceDiscovery) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeviceDiscovery.Unmarshal(m, b)
+}
+func (m *DeviceDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeviceDiscovery.Marshal(b, m, deterministic)
+}
+func (m *DeviceDiscovery) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceDiscovery.Merge(m, src)
+}
+func (m *DeviceDiscovery) XXX_Size() int {
+	return xxx_messageInfo_DeviceDiscovery.Size(m)
+}
+func (m *DeviceDiscovery) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceDiscovery.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceDiscovery proto.InternalMessageInfo
+
+func (m *DeviceDiscovery) GetParentId() string {
+	if m != nil {
+		return m.ParentId
+	}
+	return ""
+}
+
+func (m *DeviceDiscovery) GetParentPortNo() uint32 {
+	if m != nil {
+		return m.ParentPortNo
+	}
+	return 0
+}
+
+func (m *DeviceDiscovery) GetChildDeviceType() string {
+	if m != nil {
+		return m.ChildDeviceType
+	}
+	return ""
+}
+
+func (m *DeviceDiscovery) GetChannelId() uint32 {
+	if m != nil {
+		return m.ChannelId
+	}
+	return 0
+}
+
+func (m *DeviceDiscovery) GetVendorId() string {
+	if m != nil {
+		return m.VendorId
+	}
+	return ""
+}
+
+func (m *DeviceDiscovery) GetSerialNumber() string {
+	if m != nil {
+		return m.SerialNumber
+	}
+	return ""
+}
+
+func (m *DeviceDiscovery) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+type DeviceStateFilter struct {
+	DeviceId             string                     `protobuf:"bytes,1,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	ParentDeviceId       string                     `protobuf:"bytes,2,opt,name=parent_device_id,json=parentDeviceId,proto3" json:"parent_device_id,omitempty"`
+	OperStatus           common.OperStatus_Types    `protobuf:"varint,3,opt,name=oper_status,json=operStatus,proto3,enum=common.OperStatus_Types" json:"oper_status,omitempty"`
+	ConnStatus           common.ConnectStatus_Types `protobuf:"varint,4,opt,name=conn_status,json=connStatus,proto3,enum=common.ConnectStatus_Types" json:"conn_status,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                   `json:"-"`
+	XXX_unrecognized     []byte                     `json:"-"`
+	XXX_sizecache        int32                      `json:"-"`
+}
+
+func (m *DeviceStateFilter) Reset()         { *m = DeviceStateFilter{} }
+func (m *DeviceStateFilter) String() string { return proto.CompactTextString(m) }
+func (*DeviceStateFilter) ProtoMessage()    {}
+func (*DeviceStateFilter) Descriptor() ([]byte, []int) {
+	return fileDescriptor_941f0031a549667f, []int{4}
+}
+
+func (m *DeviceStateFilter) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeviceStateFilter.Unmarshal(m, b)
+}
+func (m *DeviceStateFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeviceStateFilter.Marshal(b, m, deterministic)
+}
+func (m *DeviceStateFilter) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceStateFilter.Merge(m, src)
+}
+func (m *DeviceStateFilter) XXX_Size() int {
+	return xxx_messageInfo_DeviceStateFilter.Size(m)
+}
+func (m *DeviceStateFilter) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceStateFilter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceStateFilter proto.InternalMessageInfo
+
+func (m *DeviceStateFilter) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *DeviceStateFilter) GetParentDeviceId() string {
+	if m != nil {
+		return m.ParentDeviceId
+	}
+	return ""
+}
+
+func (m *DeviceStateFilter) GetOperStatus() common.OperStatus_Types {
+	if m != nil {
+		return m.OperStatus
+	}
+	return common.OperStatus_UNKNOWN
+}
+
+func (m *DeviceStateFilter) GetConnStatus() common.ConnectStatus_Types {
+	if m != nil {
+		return m.ConnStatus
+	}
+	return common.ConnectStatus_UNKNOWN
+}
+
+type PortState struct {
+	DeviceId             string                  `protobuf:"bytes,1,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	PortType             voltha.Port_PortType    `protobuf:"varint,2,opt,name=port_type,json=portType,proto3,enum=voltha.Port_PortType" json:"port_type,omitempty"`
+	PortNo               uint32                  `protobuf:"varint,3,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
+	OperStatus           common.OperStatus_Types `protobuf:"varint,4,opt,name=oper_status,json=operStatus,proto3,enum=common.OperStatus_Types" json:"oper_status,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                `json:"-"`
+	XXX_unrecognized     []byte                  `json:"-"`
+	XXX_sizecache        int32                   `json:"-"`
+}
+
+func (m *PortState) Reset()         { *m = PortState{} }
+func (m *PortState) String() string { return proto.CompactTextString(m) }
+func (*PortState) ProtoMessage()    {}
+func (*PortState) Descriptor() ([]byte, []int) {
+	return fileDescriptor_941f0031a549667f, []int{5}
+}
+
+func (m *PortState) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PortState.Unmarshal(m, b)
+}
+func (m *PortState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PortState.Marshal(b, m, deterministic)
+}
+func (m *PortState) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PortState.Merge(m, src)
+}
+func (m *PortState) XXX_Size() int {
+	return xxx_messageInfo_PortState.Size(m)
+}
+func (m *PortState) XXX_DiscardUnknown() {
+	xxx_messageInfo_PortState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PortState proto.InternalMessageInfo
+
+func (m *PortState) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *PortState) GetPortType() voltha.Port_PortType {
+	if m != nil {
+		return m.PortType
+	}
+	return voltha.Port_UNKNOWN
+}
+
+func (m *PortState) GetPortNo() uint32 {
+	if m != nil {
+		return m.PortNo
+	}
+	return 0
+}
+
+func (m *PortState) GetOperStatus() common.OperStatus_Types {
+	if m != nil {
+		return m.OperStatus
+	}
+	return common.OperStatus_UNKNOWN
+}
+
+type PortStateFilter struct {
+	DeviceId             string                  `protobuf:"bytes,1,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	PortTypeFilter       uint32                  `protobuf:"varint,2,opt,name=port_type_filter,json=portTypeFilter,proto3" json:"port_type_filter,omitempty"`
+	OperStatus           common.OperStatus_Types `protobuf:"varint,4,opt,name=oper_status,json=operStatus,proto3,enum=common.OperStatus_Types" json:"oper_status,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                `json:"-"`
+	XXX_unrecognized     []byte                  `json:"-"`
+	XXX_sizecache        int32                   `json:"-"`
+}
+
+func (m *PortStateFilter) Reset()         { *m = PortStateFilter{} }
+func (m *PortStateFilter) String() string { return proto.CompactTextString(m) }
+func (*PortStateFilter) ProtoMessage()    {}
+func (*PortStateFilter) Descriptor() ([]byte, []int) {
+	return fileDescriptor_941f0031a549667f, []int{6}
+}
+
+func (m *PortStateFilter) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PortStateFilter.Unmarshal(m, b)
+}
+func (m *PortStateFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PortStateFilter.Marshal(b, m, deterministic)
+}
+func (m *PortStateFilter) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PortStateFilter.Merge(m, src)
+}
+func (m *PortStateFilter) XXX_Size() int {
+	return xxx_messageInfo_PortStateFilter.Size(m)
+}
+func (m *PortStateFilter) XXX_DiscardUnknown() {
+	xxx_messageInfo_PortStateFilter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PortStateFilter proto.InternalMessageInfo
+
+func (m *PortStateFilter) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *PortStateFilter) GetPortTypeFilter() uint32 {
+	if m != nil {
+		return m.PortTypeFilter
+	}
+	return 0
+}
+
+func (m *PortStateFilter) GetOperStatus() common.OperStatus_Types {
+	if m != nil {
+		return m.OperStatus
+	}
+	return common.OperStatus_UNKNOWN
+}
+
+type PacketIn struct {
+	DeviceId             string   `protobuf:"bytes,1,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	Port                 uint32   `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"`
+	Packet               []byte   `protobuf:"bytes,3,opt,name=packet,proto3" json:"packet,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *PacketIn) Reset()         { *m = PacketIn{} }
+func (m *PacketIn) String() string { return proto.CompactTextString(m) }
+func (*PacketIn) ProtoMessage()    {}
+func (*PacketIn) Descriptor() ([]byte, []int) {
+	return fileDescriptor_941f0031a549667f, []int{7}
+}
+
+func (m *PacketIn) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PacketIn.Unmarshal(m, b)
+}
+func (m *PacketIn) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PacketIn.Marshal(b, m, deterministic)
+}
+func (m *PacketIn) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PacketIn.Merge(m, src)
+}
+func (m *PacketIn) XXX_Size() int {
+	return xxx_messageInfo_PacketIn.Size(m)
+}
+func (m *PacketIn) XXX_DiscardUnknown() {
+	xxx_messageInfo_PacketIn.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PacketIn proto.InternalMessageInfo
+
+func (m *PacketIn) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *PacketIn) GetPort() uint32 {
+	if m != nil {
+		return m.Port
+	}
+	return 0
+}
+
+func (m *PacketIn) GetPacket() []byte {
+	if m != nil {
+		return m.Packet
+	}
+	return nil
+}
+
+type PacketOut struct {
+	DeviceId             string                    `protobuf:"bytes,1,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	EgressPortNo         uint32                    `protobuf:"varint,2,opt,name=egress_port_no,json=egressPortNo,proto3" json:"egress_port_no,omitempty"`
+	Packet               *openflow_13.OfpPacketOut `protobuf:"bytes,3,opt,name=packet,proto3" json:"packet,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                  `json:"-"`
+	XXX_unrecognized     []byte                    `json:"-"`
+	XXX_sizecache        int32                     `json:"-"`
+}
+
+func (m *PacketOut) Reset()         { *m = PacketOut{} }
+func (m *PacketOut) String() string { return proto.CompactTextString(m) }
+func (*PacketOut) ProtoMessage()    {}
+func (*PacketOut) Descriptor() ([]byte, []int) {
+	return fileDescriptor_941f0031a549667f, []int{8}
+}
+
+func (m *PacketOut) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PacketOut.Unmarshal(m, b)
+}
+func (m *PacketOut) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PacketOut.Marshal(b, m, deterministic)
+}
+func (m *PacketOut) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PacketOut.Merge(m, src)
+}
+func (m *PacketOut) XXX_Size() int {
+	return xxx_messageInfo_PacketOut.Size(m)
+}
+func (m *PacketOut) XXX_DiscardUnknown() {
+	xxx_messageInfo_PacketOut.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PacketOut proto.InternalMessageInfo
+
+func (m *PacketOut) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *PacketOut) GetEgressPortNo() uint32 {
+	if m != nil {
+		return m.EgressPortNo
+	}
+	return 0
+}
+
+func (m *PacketOut) GetPacket() *openflow_13.OfpPacketOut {
+	if m != nil {
+		return m.Packet
+	}
+	return nil
+}
+
+type DeviceReason struct {
+	DeviceId             string   `protobuf:"bytes,1,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	Reason               string   `protobuf:"bytes,2,opt,name=reason,proto3" json:"reason,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DeviceReason) Reset()         { *m = DeviceReason{} }
+func (m *DeviceReason) String() string { return proto.CompactTextString(m) }
+func (*DeviceReason) ProtoMessage()    {}
+func (*DeviceReason) Descriptor() ([]byte, []int) {
+	return fileDescriptor_941f0031a549667f, []int{9}
+}
+
+func (m *DeviceReason) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeviceReason.Unmarshal(m, b)
+}
+func (m *DeviceReason) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeviceReason.Marshal(b, m, deterministic)
+}
+func (m *DeviceReason) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceReason.Merge(m, src)
+}
+func (m *DeviceReason) XXX_Size() int {
+	return xxx_messageInfo_DeviceReason.Size(m)
+}
+func (m *DeviceReason) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceReason.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceReason proto.InternalMessageInfo
+
+func (m *DeviceReason) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *DeviceReason) GetReason() string {
+	if m != nil {
+		return m.Reason
+	}
+	return ""
+}
+
+type BulkFlows struct {
+	Device               *voltha.Device          `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"`
+	Flows                *openflow_13.Flows      `protobuf:"bytes,2,opt,name=flows,proto3" json:"flows,omitempty"`
+	Groups               *openflow_13.FlowGroups `protobuf:"bytes,3,opt,name=groups,proto3" json:"groups,omitempty"`
+	FlowMetadata         *voltha.FlowMetadata    `protobuf:"bytes,4,opt,name=flow_metadata,json=flowMetadata,proto3" json:"flow_metadata,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                `json:"-"`
+	XXX_unrecognized     []byte                  `json:"-"`
+	XXX_sizecache        int32                   `json:"-"`
+}
+
+func (m *BulkFlows) Reset()         { *m = BulkFlows{} }
+func (m *BulkFlows) String() string { return proto.CompactTextString(m) }
+func (*BulkFlows) ProtoMessage()    {}
+func (*BulkFlows) Descriptor() ([]byte, []int) {
+	return fileDescriptor_941f0031a549667f, []int{10}
+}
+
+func (m *BulkFlows) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_BulkFlows.Unmarshal(m, b)
+}
+func (m *BulkFlows) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_BulkFlows.Marshal(b, m, deterministic)
+}
+func (m *BulkFlows) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_BulkFlows.Merge(m, src)
+}
+func (m *BulkFlows) XXX_Size() int {
+	return xxx_messageInfo_BulkFlows.Size(m)
+}
+func (m *BulkFlows) XXX_DiscardUnknown() {
+	xxx_messageInfo_BulkFlows.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BulkFlows proto.InternalMessageInfo
+
+func (m *BulkFlows) GetDevice() *voltha.Device {
+	if m != nil {
+		return m.Device
+	}
+	return nil
+}
+
+func (m *BulkFlows) GetFlows() *openflow_13.Flows {
+	if m != nil {
+		return m.Flows
+	}
+	return nil
+}
+
+func (m *BulkFlows) GetGroups() *openflow_13.FlowGroups {
+	if m != nil {
+		return m.Groups
+	}
+	return nil
+}
+
+func (m *BulkFlows) GetFlowMetadata() *voltha.FlowMetadata {
+	if m != nil {
+		return m.FlowMetadata
+	}
+	return nil
+}
+
+type IncrementalFlows struct {
+	Device               *voltha.Device                `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"`
+	Flows                *openflow_13.FlowChanges      `protobuf:"bytes,2,opt,name=flows,proto3" json:"flows,omitempty"`
+	Groups               *openflow_13.FlowGroupChanges `protobuf:"bytes,3,opt,name=groups,proto3" json:"groups,omitempty"`
+	FlowMetadata         *voltha.FlowMetadata          `protobuf:"bytes,4,opt,name=flow_metadata,json=flowMetadata,proto3" json:"flow_metadata,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                      `json:"-"`
+	XXX_unrecognized     []byte                        `json:"-"`
+	XXX_sizecache        int32                         `json:"-"`
+}
+
+func (m *IncrementalFlows) Reset()         { *m = IncrementalFlows{} }
+func (m *IncrementalFlows) String() string { return proto.CompactTextString(m) }
+func (*IncrementalFlows) ProtoMessage()    {}
+func (*IncrementalFlows) Descriptor() ([]byte, []int) {
+	return fileDescriptor_941f0031a549667f, []int{11}
+}
+
+func (m *IncrementalFlows) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_IncrementalFlows.Unmarshal(m, b)
+}
+func (m *IncrementalFlows) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_IncrementalFlows.Marshal(b, m, deterministic)
+}
+func (m *IncrementalFlows) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_IncrementalFlows.Merge(m, src)
+}
+func (m *IncrementalFlows) XXX_Size() int {
+	return xxx_messageInfo_IncrementalFlows.Size(m)
+}
+func (m *IncrementalFlows) XXX_DiscardUnknown() {
+	xxx_messageInfo_IncrementalFlows.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IncrementalFlows proto.InternalMessageInfo
+
+func (m *IncrementalFlows) GetDevice() *voltha.Device {
+	if m != nil {
+		return m.Device
+	}
+	return nil
+}
+
+func (m *IncrementalFlows) GetFlows() *openflow_13.FlowChanges {
+	if m != nil {
+		return m.Flows
+	}
+	return nil
+}
+
+func (m *IncrementalFlows) GetGroups() *openflow_13.FlowGroupChanges {
+	if m != nil {
+		return m.Groups
+	}
+	return nil
+}
+
+func (m *IncrementalFlows) GetFlowMetadata() *voltha.FlowMetadata {
+	if m != nil {
+		return m.FlowMetadata
+	}
+	return nil
+}
+
+type PmConfigsInfo struct {
+	DeviceId             string            `protobuf:"bytes,1,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	PmConfigs            *voltha.PmConfigs `protobuf:"bytes,2,opt,name=pm_configs,json=pmConfigs,proto3" json:"pm_configs,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *PmConfigsInfo) Reset()         { *m = PmConfigsInfo{} }
+func (m *PmConfigsInfo) String() string { return proto.CompactTextString(m) }
+func (*PmConfigsInfo) ProtoMessage()    {}
+func (*PmConfigsInfo) Descriptor() ([]byte, []int) {
+	return fileDescriptor_941f0031a549667f, []int{12}
+}
+
+func (m *PmConfigsInfo) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PmConfigsInfo.Unmarshal(m, b)
+}
+func (m *PmConfigsInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PmConfigsInfo.Marshal(b, m, deterministic)
+}
+func (m *PmConfigsInfo) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PmConfigsInfo.Merge(m, src)
+}
+func (m *PmConfigsInfo) XXX_Size() int {
+	return xxx_messageInfo_PmConfigsInfo.Size(m)
+}
+func (m *PmConfigsInfo) XXX_DiscardUnknown() {
+	xxx_messageInfo_PmConfigsInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PmConfigsInfo proto.InternalMessageInfo
+
+func (m *PmConfigsInfo) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *PmConfigsInfo) GetPmConfigs() *voltha.PmConfigs {
+	if m != nil {
+		return m.PmConfigs
+	}
+	return nil
+}
+
+type SwitchCapability struct {
+	Desc                 *openflow_13.OfpDesc           `protobuf:"bytes,1,opt,name=desc,proto3" json:"desc,omitempty"`
+	SwitchFeatures       *openflow_13.OfpSwitchFeatures `protobuf:"bytes,2,opt,name=switch_features,json=switchFeatures,proto3" json:"switch_features,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                       `json:"-"`
+	XXX_unrecognized     []byte                         `json:"-"`
+	XXX_sizecache        int32                          `json:"-"`
+}
+
+func (m *SwitchCapability) Reset()         { *m = SwitchCapability{} }
+func (m *SwitchCapability) String() string { return proto.CompactTextString(m) }
+func (*SwitchCapability) ProtoMessage()    {}
+func (*SwitchCapability) Descriptor() ([]byte, []int) {
+	return fileDescriptor_941f0031a549667f, []int{13}
+}
+
+func (m *SwitchCapability) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SwitchCapability.Unmarshal(m, b)
+}
+func (m *SwitchCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SwitchCapability.Marshal(b, m, deterministic)
+}
+func (m *SwitchCapability) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SwitchCapability.Merge(m, src)
+}
+func (m *SwitchCapability) XXX_Size() int {
+	return xxx_messageInfo_SwitchCapability.Size(m)
+}
+func (m *SwitchCapability) XXX_DiscardUnknown() {
+	xxx_messageInfo_SwitchCapability.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SwitchCapability proto.InternalMessageInfo
+
+func (m *SwitchCapability) GetDesc() *openflow_13.OfpDesc {
+	if m != nil {
+		return m.Desc
+	}
+	return nil
+}
+
+func (m *SwitchCapability) GetSwitchFeatures() *openflow_13.OfpSwitchFeatures {
+	if m != nil {
+		return m.SwitchFeatures
+	}
+	return nil
+}
+
+type ImageDownloadMessage struct {
+	Device               *voltha.Device        `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"`
+	Image                *voltha.ImageDownload `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
+	XXX_unrecognized     []byte                `json:"-"`
+	XXX_sizecache        int32                 `json:"-"`
+}
+
+func (m *ImageDownloadMessage) Reset()         { *m = ImageDownloadMessage{} }
+func (m *ImageDownloadMessage) String() string { return proto.CompactTextString(m) }
+func (*ImageDownloadMessage) ProtoMessage()    {}
+func (*ImageDownloadMessage) Descriptor() ([]byte, []int) {
+	return fileDescriptor_941f0031a549667f, []int{14}
+}
+
+func (m *ImageDownloadMessage) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ImageDownloadMessage.Unmarshal(m, b)
+}
+func (m *ImageDownloadMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ImageDownloadMessage.Marshal(b, m, deterministic)
+}
+func (m *ImageDownloadMessage) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ImageDownloadMessage.Merge(m, src)
+}
+func (m *ImageDownloadMessage) XXX_Size() int {
+	return xxx_messageInfo_ImageDownloadMessage.Size(m)
+}
+func (m *ImageDownloadMessage) XXX_DiscardUnknown() {
+	xxx_messageInfo_ImageDownloadMessage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageDownloadMessage proto.InternalMessageInfo
+
+func (m *ImageDownloadMessage) GetDevice() *voltha.Device {
+	if m != nil {
+		return m.Device
+	}
+	return nil
+}
+
+func (m *ImageDownloadMessage) GetImage() *voltha.ImageDownload {
+	if m != nil {
+		return m.Image
+	}
+	return nil
+}
+
+type OMCITest struct {
+	Device               *voltha.Device          `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"`
+	Request              *voltha.OmciTestRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                `json:"-"`
+	XXX_unrecognized     []byte                  `json:"-"`
+	XXX_sizecache        int32                   `json:"-"`
+}
+
+func (m *OMCITest) Reset()         { *m = OMCITest{} }
+func (m *OMCITest) String() string { return proto.CompactTextString(m) }
+func (*OMCITest) ProtoMessage()    {}
+func (*OMCITest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_941f0031a549667f, []int{15}
+}
+
+func (m *OMCITest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OMCITest.Unmarshal(m, b)
+}
+func (m *OMCITest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OMCITest.Marshal(b, m, deterministic)
+}
+func (m *OMCITest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OMCITest.Merge(m, src)
+}
+func (m *OMCITest) XXX_Size() int {
+	return xxx_messageInfo_OMCITest.Size(m)
+}
+func (m *OMCITest) XXX_DiscardUnknown() {
+	xxx_messageInfo_OMCITest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OMCITest proto.InternalMessageInfo
+
+func (m *OMCITest) GetDevice() *voltha.Device {
+	if m != nil {
+		return m.Device
+	}
+	return nil
+}
+
+func (m *OMCITest) GetRequest() *voltha.OmciTestRequest {
+	if m != nil {
+		return m.Request
+	}
+	return nil
+}
+
+type SimulateAlarmMessage struct {
+	Device               *voltha.Device               `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"`
+	Request              *voltha.SimulateAlarmRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                     `json:"-"`
+	XXX_unrecognized     []byte                       `json:"-"`
+	XXX_sizecache        int32                        `json:"-"`
+}
+
+func (m *SimulateAlarmMessage) Reset()         { *m = SimulateAlarmMessage{} }
+func (m *SimulateAlarmMessage) String() string { return proto.CompactTextString(m) }
+func (*SimulateAlarmMessage) ProtoMessage()    {}
+func (*SimulateAlarmMessage) Descriptor() ([]byte, []int) {
+	return fileDescriptor_941f0031a549667f, []int{16}
+}
+
+func (m *SimulateAlarmMessage) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SimulateAlarmMessage.Unmarshal(m, b)
+}
+func (m *SimulateAlarmMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SimulateAlarmMessage.Marshal(b, m, deterministic)
+}
+func (m *SimulateAlarmMessage) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SimulateAlarmMessage.Merge(m, src)
+}
+func (m *SimulateAlarmMessage) XXX_Size() int {
+	return xxx_messageInfo_SimulateAlarmMessage.Size(m)
+}
+func (m *SimulateAlarmMessage) XXX_DiscardUnknown() {
+	xxx_messageInfo_SimulateAlarmMessage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SimulateAlarmMessage proto.InternalMessageInfo
+
+func (m *SimulateAlarmMessage) GetDevice() *voltha.Device {
+	if m != nil {
+		return m.Device
+	}
+	return nil
+}
+
+func (m *SimulateAlarmMessage) GetRequest() *voltha.SimulateAlarmRequest {
+	if m != nil {
+		return m.Request
+	}
+	return nil
+}
+
+type GetExtValueMessage struct {
+	ParentDevice         *voltha.Device        `protobuf:"bytes,1,opt,name=parent_device,json=parentDevice,proto3" json:"parent_device,omitempty"`
+	ChildDevice          *voltha.Device        `protobuf:"bytes,2,opt,name=child_device,json=childDevice,proto3" json:"child_device,omitempty"`
+	ValueType            common.ValueType_Type `protobuf:"varint,3,opt,name=value_type,json=valueType,proto3,enum=common.ValueType_Type" json:"value_type,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
+	XXX_unrecognized     []byte                `json:"-"`
+	XXX_sizecache        int32                 `json:"-"`
+}
+
+func (m *GetExtValueMessage) Reset()         { *m = GetExtValueMessage{} }
+func (m *GetExtValueMessage) String() string { return proto.CompactTextString(m) }
+func (*GetExtValueMessage) ProtoMessage()    {}
+func (*GetExtValueMessage) Descriptor() ([]byte, []int) {
+	return fileDescriptor_941f0031a549667f, []int{17}
+}
+
+func (m *GetExtValueMessage) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetExtValueMessage.Unmarshal(m, b)
+}
+func (m *GetExtValueMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetExtValueMessage.Marshal(b, m, deterministic)
+}
+func (m *GetExtValueMessage) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetExtValueMessage.Merge(m, src)
+}
+func (m *GetExtValueMessage) XXX_Size() int {
+	return xxx_messageInfo_GetExtValueMessage.Size(m)
+}
+func (m *GetExtValueMessage) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetExtValueMessage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetExtValueMessage proto.InternalMessageInfo
+
+func (m *GetExtValueMessage) GetParentDevice() *voltha.Device {
+	if m != nil {
+		return m.ParentDevice
+	}
+	return nil
+}
+
+func (m *GetExtValueMessage) GetChildDevice() *voltha.Device {
+	if m != nil {
+		return m.ChildDevice
+	}
+	return nil
+}
+
+func (m *GetExtValueMessage) GetValueType() common.ValueType_Type {
+	if m != nil {
+		return m.ValueType
+	}
+	return common.ValueType_EMPTY
+}
+
+type SetExtValueMessage struct {
+	Device               *voltha.Device   `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"`
+	Value                *voltha.ValueSet `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *SetExtValueMessage) Reset()         { *m = SetExtValueMessage{} }
+func (m *SetExtValueMessage) String() string { return proto.CompactTextString(m) }
+func (*SetExtValueMessage) ProtoMessage()    {}
+func (*SetExtValueMessage) Descriptor() ([]byte, []int) {
+	return fileDescriptor_941f0031a549667f, []int{18}
+}
+
+func (m *SetExtValueMessage) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SetExtValueMessage.Unmarshal(m, b)
+}
+func (m *SetExtValueMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SetExtValueMessage.Marshal(b, m, deterministic)
+}
+func (m *SetExtValueMessage) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SetExtValueMessage.Merge(m, src)
+}
+func (m *SetExtValueMessage) XXX_Size() int {
+	return xxx_messageInfo_SetExtValueMessage.Size(m)
+}
+func (m *SetExtValueMessage) XXX_DiscardUnknown() {
+	xxx_messageInfo_SetExtValueMessage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SetExtValueMessage proto.InternalMessageInfo
+
+func (m *SetExtValueMessage) GetDevice() *voltha.Device {
+	if m != nil {
+		return m.Device
+	}
+	return nil
+}
+
+func (m *SetExtValueMessage) GetValue() *voltha.ValueSet {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+type OmciMessage struct {
+	Message              []byte                      `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"`
+	ConnectStatus        common.ConnectStatus_Types  `protobuf:"varint,2,opt,name=connect_status,json=connectStatus,proto3,enum=common.ConnectStatus_Types" json:"connect_status,omitempty"`
+	ProxyAddress         *voltha.Device_ProxyAddress `protobuf:"bytes,3,opt,name=proxy_address,json=proxyAddress,proto3" json:"proxy_address,omitempty"`
+	ParentDeviceId       string                      `protobuf:"bytes,4,opt,name=parent_device_id,json=parentDeviceId,proto3" json:"parent_device_id,omitempty"`
+	ChildDeviceId        string                      `protobuf:"bytes,5,opt,name=child_device_id,json=childDeviceId,proto3" json:"child_device_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                    `json:"-"`
+	XXX_unrecognized     []byte                      `json:"-"`
+	XXX_sizecache        int32                       `json:"-"`
+}
+
+func (m *OmciMessage) Reset()         { *m = OmciMessage{} }
+func (m *OmciMessage) String() string { return proto.CompactTextString(m) }
+func (*OmciMessage) ProtoMessage()    {}
+func (*OmciMessage) Descriptor() ([]byte, []int) {
+	return fileDescriptor_941f0031a549667f, []int{19}
+}
+
+func (m *OmciMessage) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OmciMessage.Unmarshal(m, b)
+}
+func (m *OmciMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OmciMessage.Marshal(b, m, deterministic)
+}
+func (m *OmciMessage) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OmciMessage.Merge(m, src)
+}
+func (m *OmciMessage) XXX_Size() int {
+	return xxx_messageInfo_OmciMessage.Size(m)
+}
+func (m *OmciMessage) XXX_DiscardUnknown() {
+	xxx_messageInfo_OmciMessage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OmciMessage proto.InternalMessageInfo
+
+func (m *OmciMessage) GetMessage() []byte {
+	if m != nil {
+		return m.Message
+	}
+	return nil
+}
+
+func (m *OmciMessage) GetConnectStatus() common.ConnectStatus_Types {
+	if m != nil {
+		return m.ConnectStatus
+	}
+	return common.ConnectStatus_UNKNOWN
+}
+
+func (m *OmciMessage) GetProxyAddress() *voltha.Device_ProxyAddress {
+	if m != nil {
+		return m.ProxyAddress
+	}
+	return nil
+}
+
+func (m *OmciMessage) GetParentDeviceId() string {
+	if m != nil {
+		return m.ParentDeviceId
+	}
+	return ""
+}
+
+func (m *OmciMessage) GetChildDeviceId() string {
+	if m != nil {
+		return m.ChildDeviceId
+	}
+	return ""
+}
+
+type TechProfileDownloadMessage struct {
+	DeviceId       string `protobuf:"bytes,1,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	UniId          uint32 `protobuf:"varint,2,opt,name=uni_id,json=uniId,proto3" json:"uni_id,omitempty"`
+	TpInstancePath string `protobuf:"bytes,3,opt,name=tp_instance_path,json=tpInstancePath,proto3" json:"tp_instance_path,omitempty"`
+	// Types that are valid to be assigned to TechTpInstance:
+	//	*TechProfileDownloadMessage_TpInstance
+	//	*TechProfileDownloadMessage_EponTpInstance
+	TechTpInstance       isTechProfileDownloadMessage_TechTpInstance `protobuf_oneof:"tech_tp_instance"`
+	XXX_NoUnkeyedLiteral struct{}                                    `json:"-"`
+	XXX_unrecognized     []byte                                      `json:"-"`
+	XXX_sizecache        int32                                       `json:"-"`
+}
+
+func (m *TechProfileDownloadMessage) Reset()         { *m = TechProfileDownloadMessage{} }
+func (m *TechProfileDownloadMessage) String() string { return proto.CompactTextString(m) }
+func (*TechProfileDownloadMessage) ProtoMessage()    {}
+func (*TechProfileDownloadMessage) Descriptor() ([]byte, []int) {
+	return fileDescriptor_941f0031a549667f, []int{20}
+}
+
+func (m *TechProfileDownloadMessage) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_TechProfileDownloadMessage.Unmarshal(m, b)
+}
+func (m *TechProfileDownloadMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_TechProfileDownloadMessage.Marshal(b, m, deterministic)
+}
+func (m *TechProfileDownloadMessage) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TechProfileDownloadMessage.Merge(m, src)
+}
+func (m *TechProfileDownloadMessage) XXX_Size() int {
+	return xxx_messageInfo_TechProfileDownloadMessage.Size(m)
+}
+func (m *TechProfileDownloadMessage) XXX_DiscardUnknown() {
+	xxx_messageInfo_TechProfileDownloadMessage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TechProfileDownloadMessage proto.InternalMessageInfo
+
+func (m *TechProfileDownloadMessage) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *TechProfileDownloadMessage) GetUniId() uint32 {
+	if m != nil {
+		return m.UniId
+	}
+	return 0
+}
+
+func (m *TechProfileDownloadMessage) GetTpInstancePath() string {
+	if m != nil {
+		return m.TpInstancePath
+	}
+	return ""
+}
+
+type isTechProfileDownloadMessage_TechTpInstance interface {
+	isTechProfileDownloadMessage_TechTpInstance()
+}
+
+type TechProfileDownloadMessage_TpInstance struct {
+	TpInstance *tech_profile.TechProfileInstance `protobuf:"bytes,4,opt,name=tp_instance,json=tpInstance,proto3,oneof"`
+}
+
+type TechProfileDownloadMessage_EponTpInstance struct {
+	EponTpInstance *tech_profile.EponTechProfileInstance `protobuf:"bytes,5,opt,name=epon_tp_instance,json=eponTpInstance,proto3,oneof"`
+}
+
+func (*TechProfileDownloadMessage_TpInstance) isTechProfileDownloadMessage_TechTpInstance() {}
+
+func (*TechProfileDownloadMessage_EponTpInstance) isTechProfileDownloadMessage_TechTpInstance() {}
+
+func (m *TechProfileDownloadMessage) GetTechTpInstance() isTechProfileDownloadMessage_TechTpInstance {
+	if m != nil {
+		return m.TechTpInstance
+	}
+	return nil
+}
+
+func (m *TechProfileDownloadMessage) GetTpInstance() *tech_profile.TechProfileInstance {
+	if x, ok := m.GetTechTpInstance().(*TechProfileDownloadMessage_TpInstance); ok {
+		return x.TpInstance
+	}
+	return nil
+}
+
+func (m *TechProfileDownloadMessage) GetEponTpInstance() *tech_profile.EponTechProfileInstance {
+	if x, ok := m.GetTechTpInstance().(*TechProfileDownloadMessage_EponTpInstance); ok {
+		return x.EponTpInstance
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*TechProfileDownloadMessage) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*TechProfileDownloadMessage_TpInstance)(nil),
+		(*TechProfileDownloadMessage_EponTpInstance)(nil),
+	}
+}
+
+type DeleteGemPortMessage struct {
+	DeviceId             string   `protobuf:"bytes,1,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	UniId                uint32   `protobuf:"varint,2,opt,name=uni_id,json=uniId,proto3" json:"uni_id,omitempty"`
+	TpInstancePath       string   `protobuf:"bytes,3,opt,name=tp_instance_path,json=tpInstancePath,proto3" json:"tp_instance_path,omitempty"`
+	GemPortId            uint32   `protobuf:"varint,4,opt,name=gem_port_id,json=gemPortId,proto3" json:"gem_port_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DeleteGemPortMessage) Reset()         { *m = DeleteGemPortMessage{} }
+func (m *DeleteGemPortMessage) String() string { return proto.CompactTextString(m) }
+func (*DeleteGemPortMessage) ProtoMessage()    {}
+func (*DeleteGemPortMessage) Descriptor() ([]byte, []int) {
+	return fileDescriptor_941f0031a549667f, []int{21}
+}
+
+func (m *DeleteGemPortMessage) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeleteGemPortMessage.Unmarshal(m, b)
+}
+func (m *DeleteGemPortMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeleteGemPortMessage.Marshal(b, m, deterministic)
+}
+func (m *DeleteGemPortMessage) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeleteGemPortMessage.Merge(m, src)
+}
+func (m *DeleteGemPortMessage) XXX_Size() int {
+	return xxx_messageInfo_DeleteGemPortMessage.Size(m)
+}
+func (m *DeleteGemPortMessage) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeleteGemPortMessage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteGemPortMessage proto.InternalMessageInfo
+
+func (m *DeleteGemPortMessage) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *DeleteGemPortMessage) GetUniId() uint32 {
+	if m != nil {
+		return m.UniId
+	}
+	return 0
+}
+
+func (m *DeleteGemPortMessage) GetTpInstancePath() string {
+	if m != nil {
+		return m.TpInstancePath
+	}
+	return ""
+}
+
+func (m *DeleteGemPortMessage) GetGemPortId() uint32 {
+	if m != nil {
+		return m.GemPortId
+	}
+	return 0
+}
+
+type DeleteTcontMessage struct {
+	DeviceId             string   `protobuf:"bytes,1,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	UniId                uint32   `protobuf:"varint,2,opt,name=uni_id,json=uniId,proto3" json:"uni_id,omitempty"`
+	TpInstancePath       string   `protobuf:"bytes,3,opt,name=tp_instance_path,json=tpInstancePath,proto3" json:"tp_instance_path,omitempty"`
+	AllocId              uint32   `protobuf:"varint,4,opt,name=alloc_id,json=allocId,proto3" json:"alloc_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DeleteTcontMessage) Reset()         { *m = DeleteTcontMessage{} }
+func (m *DeleteTcontMessage) String() string { return proto.CompactTextString(m) }
+func (*DeleteTcontMessage) ProtoMessage()    {}
+func (*DeleteTcontMessage) Descriptor() ([]byte, []int) {
+	return fileDescriptor_941f0031a549667f, []int{22}
+}
+
+func (m *DeleteTcontMessage) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeleteTcontMessage.Unmarshal(m, b)
+}
+func (m *DeleteTcontMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeleteTcontMessage.Marshal(b, m, deterministic)
+}
+func (m *DeleteTcontMessage) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeleteTcontMessage.Merge(m, src)
+}
+func (m *DeleteTcontMessage) XXX_Size() int {
+	return xxx_messageInfo_DeleteTcontMessage.Size(m)
+}
+func (m *DeleteTcontMessage) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeleteTcontMessage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteTcontMessage proto.InternalMessageInfo
+
+func (m *DeleteTcontMessage) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *DeleteTcontMessage) GetUniId() uint32 {
+	if m != nil {
+		return m.UniId
+	}
+	return 0
+}
+
+func (m *DeleteTcontMessage) GetTpInstancePath() string {
+	if m != nil {
+		return m.TpInstancePath
+	}
+	return ""
+}
+
+func (m *DeleteTcontMessage) GetAllocId() uint32 {
+	if m != nil {
+		return m.AllocId
+	}
+	return 0
+}
+
+type OnuIndicationMessage struct {
+	DeviceId             string                 `protobuf:"bytes,1,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	OnuIndication        *openolt.OnuIndication `protobuf:"bytes,2,opt,name=onu_indication,json=onuIndication,proto3" json:"onu_indication,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
+	XXX_unrecognized     []byte                 `json:"-"`
+	XXX_sizecache        int32                  `json:"-"`
+}
+
+func (m *OnuIndicationMessage) Reset()         { *m = OnuIndicationMessage{} }
+func (m *OnuIndicationMessage) String() string { return proto.CompactTextString(m) }
+func (*OnuIndicationMessage) ProtoMessage()    {}
+func (*OnuIndicationMessage) Descriptor() ([]byte, []int) {
+	return fileDescriptor_941f0031a549667f, []int{23}
+}
+
+func (m *OnuIndicationMessage) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuIndicationMessage.Unmarshal(m, b)
+}
+func (m *OnuIndicationMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuIndicationMessage.Marshal(b, m, deterministic)
+}
+func (m *OnuIndicationMessage) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuIndicationMessage.Merge(m, src)
+}
+func (m *OnuIndicationMessage) XXX_Size() int {
+	return xxx_messageInfo_OnuIndicationMessage.Size(m)
+}
+func (m *OnuIndicationMessage) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuIndicationMessage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuIndicationMessage proto.InternalMessageInfo
+
+func (m *OnuIndicationMessage) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *OnuIndicationMessage) GetOnuIndication() *openolt.OnuIndication {
+	if m != nil {
+		return m.OnuIndication
+	}
+	return nil
+}
+
+type TechProfileInstanceRequestMessage struct {
+	DeviceId             string   `protobuf:"bytes,1,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	TpInstancePath       string   `protobuf:"bytes,2,opt,name=tp_instance_path,json=tpInstancePath,proto3" json:"tp_instance_path,omitempty"`
+	ParentDeviceId       string   `protobuf:"bytes,3,opt,name=parent_device_id,json=parentDeviceId,proto3" json:"parent_device_id,omitempty"`
+	ParentPonPort        uint32   `protobuf:"varint,4,opt,name=parent_pon_port,json=parentPonPort,proto3" json:"parent_pon_port,omitempty"`
+	OnuId                uint32   `protobuf:"varint,5,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	UniId                uint32   `protobuf:"varint,6,opt,name=uni_id,json=uniId,proto3" json:"uni_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *TechProfileInstanceRequestMessage) Reset()         { *m = TechProfileInstanceRequestMessage{} }
+func (m *TechProfileInstanceRequestMessage) String() string { return proto.CompactTextString(m) }
+func (*TechProfileInstanceRequestMessage) ProtoMessage()    {}
+func (*TechProfileInstanceRequestMessage) Descriptor() ([]byte, []int) {
+	return fileDescriptor_941f0031a549667f, []int{24}
+}
+
+func (m *TechProfileInstanceRequestMessage) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_TechProfileInstanceRequestMessage.Unmarshal(m, b)
+}
+func (m *TechProfileInstanceRequestMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_TechProfileInstanceRequestMessage.Marshal(b, m, deterministic)
+}
+func (m *TechProfileInstanceRequestMessage) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TechProfileInstanceRequestMessage.Merge(m, src)
+}
+func (m *TechProfileInstanceRequestMessage) XXX_Size() int {
+	return xxx_messageInfo_TechProfileInstanceRequestMessage.Size(m)
+}
+func (m *TechProfileInstanceRequestMessage) XXX_DiscardUnknown() {
+	xxx_messageInfo_TechProfileInstanceRequestMessage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TechProfileInstanceRequestMessage proto.InternalMessageInfo
+
+func (m *TechProfileInstanceRequestMessage) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *TechProfileInstanceRequestMessage) GetTpInstancePath() string {
+	if m != nil {
+		return m.TpInstancePath
+	}
+	return ""
+}
+
+func (m *TechProfileInstanceRequestMessage) GetParentDeviceId() string {
+	if m != nil {
+		return m.ParentDeviceId
+	}
+	return ""
+}
+
+func (m *TechProfileInstanceRequestMessage) GetParentPonPort() uint32 {
+	if m != nil {
+		return m.ParentPonPort
+	}
+	return 0
+}
+
+func (m *TechProfileInstanceRequestMessage) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *TechProfileInstanceRequestMessage) GetUniId() uint32 {
+	if m != nil {
+		return m.UniId
+	}
+	return 0
+}
+
+func init() {
+	proto.RegisterType((*AdapterRegistration)(nil), "voltha.AdapterRegistration")
+	proto.RegisterType((*ChildDeviceFilter)(nil), "voltha.ChildDeviceFilter")
+	proto.RegisterType((*PortFilter)(nil), "voltha.PortFilter")
+	proto.RegisterType((*DeviceDiscovery)(nil), "voltha.DeviceDiscovery")
+	proto.RegisterType((*DeviceStateFilter)(nil), "voltha.DeviceStateFilter")
+	proto.RegisterType((*PortState)(nil), "voltha.PortState")
+	proto.RegisterType((*PortStateFilter)(nil), "voltha.PortStateFilter")
+	proto.RegisterType((*PacketIn)(nil), "voltha.PacketIn")
+	proto.RegisterType((*PacketOut)(nil), "voltha.PacketOut")
+	proto.RegisterType((*DeviceReason)(nil), "voltha.DeviceReason")
+	proto.RegisterType((*BulkFlows)(nil), "voltha.BulkFlows")
+	proto.RegisterType((*IncrementalFlows)(nil), "voltha.IncrementalFlows")
+	proto.RegisterType((*PmConfigsInfo)(nil), "voltha.PmConfigsInfo")
+	proto.RegisterType((*SwitchCapability)(nil), "voltha.SwitchCapability")
+	proto.RegisterType((*ImageDownloadMessage)(nil), "voltha.ImageDownloadMessage")
+	proto.RegisterType((*OMCITest)(nil), "voltha.OMCITest")
+	proto.RegisterType((*SimulateAlarmMessage)(nil), "voltha.SimulateAlarmMessage")
+	proto.RegisterType((*GetExtValueMessage)(nil), "voltha.GetExtValueMessage")
+	proto.RegisterType((*SetExtValueMessage)(nil), "voltha.SetExtValueMessage")
+	proto.RegisterType((*OmciMessage)(nil), "voltha.OmciMessage")
+	proto.RegisterType((*TechProfileDownloadMessage)(nil), "voltha.TechProfileDownloadMessage")
+	proto.RegisterType((*DeleteGemPortMessage)(nil), "voltha.DeleteGemPortMessage")
+	proto.RegisterType((*DeleteTcontMessage)(nil), "voltha.DeleteTcontMessage")
+	proto.RegisterType((*OnuIndicationMessage)(nil), "voltha.OnuIndicationMessage")
+	proto.RegisterType((*TechProfileInstanceRequestMessage)(nil), "voltha.TechProfileInstanceRequestMessage")
+}
+
+func init() {
+	proto.RegisterFile("voltha_protos/inter_container.proto", fileDescriptor_941f0031a549667f)
+}
+
+var fileDescriptor_941f0031a549667f = []byte{
+	// 1440 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4f, 0x6f, 0xdb, 0xc6,
+	0x12, 0x7f, 0x74, 0x2c, 0xd9, 0x1a, 0xfd, 0xb1, 0xbd, 0xb1, 0x1d, 0xbf, 0xf8, 0xe5, 0x3d, 0x87,
+	0xc9, 0x0b, 0x9c, 0x06, 0x95, 0x1b, 0x1b, 0x69, 0x11, 0xa0, 0x05, 0x1a, 0xcb, 0x49, 0xca, 0x43,
+	0x62, 0x97, 0x32, 0x7a, 0xe8, 0xa1, 0xc4, 0x9a, 0x5c, 0x49, 0x44, 0xc8, 0x5d, 0x86, 0x5c, 0xca,
+	0xf1, 0xb9, 0x97, 0x5e, 0x7a, 0x69, 0xfb, 0x1d, 0x7a, 0x2d, 0xd0, 0x4f, 0xd1, 0x73, 0xaf, 0xfd,
+	0x04, 0xfd, 0x04, 0xbd, 0xb5, 0xd8, 0x7f, 0x12, 0x29, 0x29, 0x8e, 0x82, 0x16, 0xb9, 0x08, 0xdc,
+	0x9d, 0xdf, 0xcc, 0xfc, 0x66, 0x66, 0x77, 0x76, 0x57, 0x70, 0x6b, 0xc8, 0x22, 0x3e, 0xc0, 0x5e,
+	0x92, 0x32, 0xce, 0xb2, 0xbd, 0x90, 0x72, 0x92, 0x7a, 0x3e, 0xa3, 0x1c, 0x87, 0x94, 0xa4, 0x6d,
+	0x39, 0x8d, 0xaa, 0x0a, 0x74, 0xfd, 0x7a, 0x19, 0xec, 0xb3, 0x38, 0x66, 0x54, 0x61, 0x26, 0x65,
+	0x6a, 0xa4, 0x65, 0xff, 0x2b, 0xcb, 0x58, 0x42, 0x68, 0x2f, 0x62, 0xe7, 0xde, 0xfd, 0x03, 0x0d,
+	0xd8, 0x29, 0x03, 0x38, 0xf1, 0x07, 0xe2, 0xbb, 0x17, 0x46, 0x44, 0x23, 0xb6, 0xa7, 0x4d, 0xb0,
+	0x88, 0xcf, 0x16, 0xe2, 0x00, 0x27, 0xdc, 0x90, 0x9f, 0x24, 0x16, 0x90, 0x61, 0xe8, 0x6b, 0xab,
+	0x76, 0x0c, 0x57, 0x1f, 0x29, 0xb0, 0x4b, 0xfa, 0x61, 0xc6, 0x53, 0xcc, 0x43, 0x46, 0xd1, 0x5d,
+	0x58, 0xd2, 0x36, 0xb6, 0xac, 0x1d, 0x6b, 0xb7, 0xbe, 0xbf, 0xd2, 0xd6, 0xf1, 0x18, 0xb4, 0x91,
+	0xa3, 0x7b, 0x50, 0x0d, 0x4e, 0x2f, 0x12, 0x92, 0x6d, 0x2d, 0x48, 0xe4, 0x55, 0x83, 0x3c, 0x92,
+	0x7e, 0xa4, 0xc8, 0xd5, 0x10, 0xfb, 0x3b, 0x0b, 0xd6, 0x3a, 0x83, 0x30, 0x0a, 0x94, 0xf0, 0x49,
+	0x18, 0x09, 0x13, 0xdb, 0x50, 0x4b, 0x70, 0x4a, 0x28, 0xf7, 0xc2, 0x40, 0xfa, 0xab, 0xb9, 0xcb,
+	0x6a, 0xc2, 0x09, 0xd0, 0x2d, 0x68, 0x66, 0x24, 0x0d, 0x71, 0xe4, 0xd1, 0x3c, 0x3e, 0x23, 0xa9,
+	0x74, 0x53, 0x73, 0x1b, 0x6a, 0xf2, 0xb9, 0x9c, 0x43, 0x1b, 0x50, 0x65, 0x34, 0x17, 0xea, 0x57,
+	0x76, 0xac, 0xdd, 0xa6, 0x5b, 0x61, 0x34, 0x77, 0x02, 0x74, 0x1b, 0x5a, 0xda, 0x70, 0xc2, 0x52,
+	0xee, 0x51, 0xb6, 0xb5, 0x28, 0xc5, 0x0d, 0x35, 0x7b, 0xc2, 0x52, 0xfe, 0x9c, 0xd9, 0x2f, 0x01,
+	0xc4, 0xd7, 0x98, 0x8c, 0xca, 0x50, 0x81, 0x8c, 0x9a, 0x70, 0x02, 0x84, 0x60, 0x51, 0x58, 0x92,
+	0x1c, 0x9a, 0xae, 0xfc, 0x46, 0xfb, 0x50, 0x93, 0xd6, 0xf9, 0x45, 0x42, 0xa4, 0xfb, 0xd6, 0xfe,
+	0x86, 0xc9, 0x81, 0xb0, 0x2b, 0x7f, 0x44, 0xf8, 0xee, 0x72, 0xa2, 0xbf, 0xec, 0x3f, 0x2c, 0x58,
+	0x51, 0x29, 0x38, 0x0a, 0x33, 0x9f, 0x0d, 0x49, 0x7a, 0x71, 0x79, 0x16, 0xa6, 0x23, 0x59, 0x98,
+	0x8e, 0x04, 0xbd, 0x07, 0x6b, 0xbe, 0xc8, 0xae, 0xa7, 0x23, 0x18, 0x51, 0xaa, 0xb9, 0x2b, 0xfe,
+	0x38, 0xed, 0x82, 0x02, 0xba, 0x01, 0xe0, 0x0f, 0x30, 0xa5, 0x24, 0x12, 0xfe, 0x54, 0x5e, 0x6a,
+	0x7a, 0xc6, 0x09, 0x04, 0x9b, 0x21, 0xa1, 0x01, 0x4b, 0x85, 0xb4, 0xa2, 0xd8, 0xa8, 0x89, 0x59,
+	0x35, 0xa9, 0x5e, 0x5a, 0x93, 0xa5, 0x42, 0x4d, 0xec, 0x5f, 0x2d, 0x58, 0x53, 0x34, 0xba, 0x1c,
+	0x73, 0x32, 0x4f, 0xd6, 0x77, 0x61, 0x55, 0x07, 0x3f, 0xc6, 0xa8, 0x55, 0xa0, 0x93, 0x72, 0x64,
+	0x90, 0x0f, 0xa1, 0xce, 0x12, 0x92, 0x7a, 0x19, 0xc7, 0x3c, 0xcf, 0x74, 0x35, 0xb6, 0xda, 0x7a,
+	0x9f, 0x1e, 0x27, 0x24, 0xed, 0x4a, 0x49, 0x5b, 0x2d, 0x4b, 0x60, 0xa3, 0x19, 0xf4, 0x31, 0xd4,
+	0x7d, 0x46, 0xa9, 0x51, 0x5d, 0x94, 0xaa, 0xdb, 0x46, 0xb5, 0xc3, 0x28, 0x25, 0x3e, 0x2f, 0x6b,
+	0x0b, 0xbc, 0x9a, 0xb1, 0x7f, 0xb2, 0xa0, 0x26, 0x8a, 0x20, 0x63, 0xba, 0x3c, 0x9a, 0xd2, 0x7a,
+	0x59, 0x98, 0x6b, 0xbd, 0xa0, 0x6b, 0xb0, 0x64, 0xea, 0xae, 0x16, 0x78, 0x35, 0x51, 0x15, 0x9f,
+	0x08, 0x78, 0x71, 0xfe, 0x80, 0xed, 0xef, 0x2d, 0x58, 0x19, 0x51, 0x9e, 0xb7, 0x0c, 0x86, 0xb8,
+	0xd7, 0x93, 0x0a, 0x7a, 0x15, 0xb6, 0x0c, 0x51, 0x6d, 0xe6, 0x6f, 0xb0, 0xea, 0xc2, 0xf2, 0x09,
+	0xf6, 0x5f, 0x10, 0xee, 0xd0, 0xb7, 0xdf, 0x8a, 0x9b, 0x50, 0x4d, 0xa4, 0xb2, 0xcc, 0x52, 0xc3,
+	0xd5, 0x23, 0xfb, 0x6b, 0x51, 0x1d, 0xf9, 0x79, 0x9c, 0xf3, 0xcb, 0xcd, 0xde, 0x86, 0x16, 0xe9,
+	0xa7, 0x24, 0xcb, 0x26, 0x37, 0x9a, 0x9a, 0xd5, 0x1b, 0xed, 0xa0, 0xe4, 0xa8, 0xbe, 0xbf, 0xdd,
+	0x2e, 0xb6, 0x74, 0xd6, 0x4b, 0x3c, 0x25, 0xf6, 0x58, 0xce, 0x47, 0x2c, 0x3a, 0xd0, 0x50, 0x0b,
+	0xd5, 0x25, 0x38, 0x63, 0x6f, 0x08, 0x6f, 0x13, 0xaa, 0xa9, 0x84, 0xe9, 0x95, 0xae, 0x47, 0xf6,
+	0x2f, 0x16, 0xd4, 0x0e, 0xf3, 0xe8, 0xc5, 0x93, 0x88, 0x9d, 0x67, 0xe8, 0x0e, 0x54, 0x95, 0x86,
+	0x6e, 0xd3, 0xad, 0x72, 0xf3, 0x75, 0xb5, 0x14, 0xed, 0x42, 0x45, 0x90, 0x33, 0x3d, 0x1a, 0x95,
+	0xe8, 0x4a, 0x53, 0xae, 0x02, 0xa0, 0x3d, 0xa8, 0xf6, 0x53, 0x96, 0x27, 0x99, 0x8e, 0xec, 0xda,
+	0x14, 0xf4, 0xa9, 0x14, 0xbb, 0x1a, 0x86, 0x1e, 0x42, 0x53, 0x4a, 0x63, 0xc2, 0x71, 0x80, 0x39,
+	0x96, 0xd5, 0xae, 0xef, 0xaf, 0x1b, 0x26, 0x42, 0xe5, 0x99, 0x96, 0xb9, 0x8d, 0x5e, 0x61, 0x64,
+	0xff, 0x66, 0xc1, 0xaa, 0x43, 0xfd, 0x94, 0xc4, 0x84, 0x72, 0x1c, 0xbd, 0x5d, 0x48, 0xed, 0x72,
+	0x48, 0x5b, 0x53, 0x3c, 0x3b, 0x03, 0x4c, 0xfb, 0x64, 0x14, 0xd8, 0x83, 0x89, 0xc0, 0x6e, 0xcc,
+	0x0e, 0xcc, 0x68, 0xfd, 0x03, 0xe1, 0x7d, 0x05, 0xcd, 0x93, 0xb8, 0xc3, 0x68, 0x2f, 0xec, 0x67,
+	0x0e, 0xed, 0xb1, 0xcb, 0x0b, 0xfe, 0x01, 0x40, 0x12, 0x8b, 0x8b, 0x87, 0x80, 0xeb, 0xa0, 0xd6,
+	0x46, 0x7d, 0xc1, 0xd8, 0x71, 0x6b, 0x89, 0xf9, 0xb4, 0xbf, 0xb1, 0x60, 0xb5, 0x7b, 0x1e, 0x72,
+	0x7f, 0xd0, 0xc1, 0x09, 0x3e, 0x0b, 0xa3, 0x90, 0x5f, 0xa0, 0xbb, 0xb0, 0x18, 0x90, 0xcc, 0xd7,
+	0xc9, 0xdb, 0x98, 0x5a, 0x97, 0x42, 0xe8, 0x4a, 0x08, 0x72, 0x60, 0x25, 0x93, 0xea, 0x5e, 0x8f,
+	0x60, 0x9e, 0xa7, 0xa3, 0x23, 0x7c, 0x67, 0x4a, 0x6b, 0x02, 0xe7, 0xb6, 0xd4, 0xc4, 0x13, 0x3d,
+	0xb6, 0x5f, 0xc0, 0xba, 0x13, 0xe3, 0x3e, 0x39, 0x62, 0xe7, 0x34, 0x62, 0x38, 0x78, 0x46, 0xb2,
+	0x0c, 0xf7, 0xc9, 0xdc, 0xc5, 0xbc, 0x07, 0x95, 0x50, 0xe8, 0x6b, 0x02, 0xa3, 0x7e, 0x58, 0x32,
+	0xea, 0x2a, 0x8c, 0x4d, 0x60, 0xf9, 0xf8, 0x59, 0xc7, 0x39, 0x25, 0x19, 0x9f, 0xdb, 0xc1, 0x7d,
+	0x58, 0x4a, 0xc9, 0xcb, 0x9c, 0x64, 0x5c, 0xbb, 0xb8, 0x66, 0x80, 0xc7, 0xb1, 0x1f, 0x0a, 0x53,
+	0xae, 0x12, 0xbb, 0x06, 0x67, 0x0f, 0x61, 0xbd, 0x1b, 0xc6, 0x79, 0x84, 0x39, 0x79, 0x14, 0xe1,
+	0x34, 0x7e, 0xdb, 0x98, 0x3e, 0x9c, 0x74, 0xf9, 0x1f, 0x03, 0x2c, 0x99, 0x9d, 0xf2, 0xfb, 0xb3,
+	0x05, 0xe8, 0x29, 0xe1, 0x8f, 0x5f, 0xf1, 0x2f, 0x70, 0x94, 0x13, 0xe3, 0xf6, 0x00, 0x9a, 0xa5,
+	0x43, 0xf0, 0x35, 0xde, 0x1b, 0xc5, 0x13, 0x11, 0xdd, 0x87, 0x46, 0xf1, 0x42, 0xa0, 0x89, 0x4c,
+	0xea, 0xd4, 0x0b, 0x77, 0x03, 0xf4, 0x00, 0x60, 0x28, 0xfc, 0x16, 0xef, 0x33, 0x9b, 0xa6, 0x75,
+	0x4b, 0x46, 0xa2, 0x69, 0xcb, 0xce, 0xed, 0xd6, 0x86, 0x66, 0x6c, 0x07, 0x80, 0xba, 0xd3, 0xa4,
+	0xe7, 0xcd, 0xd5, 0x1d, 0xa8, 0x48, 0x53, 0x7a, 0x6f, 0xae, 0x1a, 0x98, 0x34, 0xd6, 0x25, 0xdc,
+	0x55, 0x62, 0xfb, 0x4f, 0x0b, 0xea, 0xa2, 0x60, 0xc6, 0xfe, 0x16, 0x2c, 0xc5, 0xea, 0x53, 0x3a,
+	0x68, 0xb8, 0x66, 0x88, 0x0e, 0xa1, 0xe5, 0xab, 0x33, 0xdb, 0x9c, 0x42, 0x0b, 0x6f, 0x3e, 0xd1,
+	0x9b, 0x7e, 0x71, 0x12, 0x7d, 0x0a, 0xcd, 0x24, 0x65, 0xaf, 0x2e, 0x3c, 0x1c, 0x04, 0xa2, 0xf9,
+	0x8f, 0x9a, 0x7d, 0x29, 0x88, 0xf6, 0x89, 0xc0, 0x3c, 0x52, 0x10, 0xb7, 0x91, 0x14, 0x46, 0x33,
+	0x6f, 0x2e, 0x8b, 0x33, 0x6f, 0x2e, 0x77, 0x60, 0xa5, 0x74, 0x75, 0x1b, 0xdd, 0xba, 0x9a, 0x85,
+	0xe2, 0x38, 0x81, 0xfd, 0xe3, 0x02, 0x5c, 0x3f, 0x25, 0xfe, 0xe0, 0x44, 0x3d, 0x0e, 0x26, 0x37,
+	0xdc, 0xa5, 0x2d, 0x66, 0x03, 0xaa, 0x39, 0x0d, 0xcd, 0xed, 0xa9, 0xe9, 0x56, 0x72, 0x1a, 0xaa,
+	0x73, 0x9d, 0x27, 0x5e, 0x48, 0x33, 0x8e, 0xa9, 0x4f, 0xbc, 0x04, 0xf3, 0x81, 0xbe, 0x34, 0xb6,
+	0x78, 0xe2, 0xe8, 0xe9, 0x13, 0xcc, 0x07, 0xe8, 0x08, 0xea, 0x05, 0xa4, 0x6e, 0x85, 0x37, 0xdb,
+	0xa5, 0xd7, 0x4a, 0x81, 0x9c, 0xd1, 0xfd, 0xec, 0x5f, 0x2e, 0x8c, 0x2d, 0xa1, 0xcf, 0x61, 0x95,
+	0x24, 0x8c, 0x7a, 0x45, 0x53, 0x15, 0x69, 0xea, 0xff, 0x65, 0x53, 0x8f, 0x13, 0x46, 0x67, 0x9b,
+	0x6b, 0x09, 0x03, 0xa7, 0x23, 0x93, 0x87, 0x08, 0x56, 0xa5, 0x66, 0xc1, 0xa4, 0xfd, 0x83, 0x05,
+	0xeb, 0x47, 0x24, 0x22, 0x9c, 0x3c, 0x25, 0xb1, 0x38, 0xb7, 0xdf, 0x4d, 0x8e, 0xfe, 0x0b, 0xf5,
+	0x3e, 0x89, 0xd5, 0xed, 0x61, 0x7c, 0xb1, 0xee, 0x2b, 0x0a, 0x4e, 0x60, 0x7f, 0x6b, 0x01, 0x52,
+	0xb4, 0x4e, 0xc5, 0x2b, 0xf3, 0xdd, 0x90, 0xfa, 0x37, 0x2c, 0xe3, 0x28, 0x62, 0xfe, 0x98, 0xd1,
+	0x92, 0x1c, 0x3b, 0x81, 0x9d, 0xc2, 0xfa, 0x31, 0xcd, 0x1d, 0x1a, 0x84, 0xbe, 0x7c, 0xfb, 0xcd,
+	0x45, 0xe8, 0x13, 0x68, 0xc9, 0xbb, 0xfd, 0x48, 0x4b, 0x77, 0x96, 0xcd, 0xb6, 0x79, 0x97, 0x96,
+	0x6c, 0xba, 0x4d, 0x56, 0x1c, 0xda, 0xbf, 0x5b, 0x70, 0x73, 0x46, 0x61, 0x75, 0x2b, 0x9c, 0x8b,
+	0xc1, 0xac, 0xd8, 0x17, 0x66, 0xc6, 0x3e, 0x6b, 0x0f, 0x5e, 0x79, 0xdd, 0x1e, 0x1c, 0x3d, 0xb2,
+	0xa8, 0xac, 0xa0, 0x4e, 0x56, 0xd3, 0xbc, 0xb2, 0xa8, 0x28, 0x62, 0xe1, 0x65, 0x53, 0x29, 0xbe,
+	0x36, 0xc7, 0x55, 0xaa, 0x16, 0xaa, 0x74, 0xf8, 0x1c, 0xae, 0xb2, 0xb4, 0x2f, 0x13, 0xe3, 0xb3,
+	0x34, 0xd0, 0xcd, 0xe3, 0xcb, 0x8f, 0xfa, 0x21, 0x1f, 0xe4, 0x67, 0xa2, 0x1d, 0xed, 0x19, 0x99,
+	0xfe, 0xd3, 0xe0, 0x7d, 0xf3, 0x17, 0xc2, 0x83, 0xbd, 0x3e, 0x9b, 0xfc, 0x47, 0xe2, 0xac, 0x2a,
+	0xa5, 0x07, 0x7f, 0x05, 0x00, 0x00, 0xff, 0xff, 0x9f, 0x82, 0x0a, 0xe6, 0xb9, 0x10, 0x00, 0x00,
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/omci/omci_alarm_db.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/omci/omci_alarm_db.pb.go
similarity index 82%
rename from vendor/github.com/opencord/voltha-protos/v4/go/omci/omci_alarm_db.pb.go
rename to vendor/github.com/opencord/voltha-protos/v5/go/omci/omci_alarm_db.pb.go
index 58824c0..7b8e492 100644
--- a/vendor/github.com/opencord/voltha-protos/v4/go/omci/omci_alarm_db.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/omci/omci_alarm_db.pb.go
@@ -6,7 +6,7 @@
 import (
 	fmt "fmt"
 	proto "github.com/golang/protobuf/proto"
-	_ "github.com/opencord/voltha-protos/v4/go/common"
+	_ "github.com/opencord/voltha-protos/v5/go/common"
 	math "math"
 )
 
@@ -477,40 +477,40 @@
 	// 606 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xc1, 0x6e, 0xd3, 0x40,
 	0x10, 0x25, 0x6d, 0xda, 0xa6, 0x93, 0xa4, 0x4d, 0x97, 0xaa, 0x2c, 0x95, 0x2a, 0x15, 0x0b, 0x50,
-	0x0f, 0xe0, 0x48, 0x85, 0x1b, 0x48, 0x55, 0xd3, 0x46, 0x22, 0x07, 0x54, 0xe1, 0xf6, 0xc4, 0xc5,
-	0xda, 0xd8, 0x83, 0xbb, 0x92, 0x77, 0x1d, 0xbc, 0x1b, 0x4b, 0x39, 0x70, 0xe1, 0xab, 0xf8, 0x89,
-	0xfe, 0x04, 0x27, 0xbe, 0xa0, 0x67, 0xe4, 0xb1, 0x9d, 0x38, 0xb2, 0x84, 0xb8, 0xed, 0x7b, 0x33,
-	0xf3, 0x66, 0x76, 0xde, 0x6a, 0xe1, 0x45, 0x96, 0xc4, 0xf6, 0x5e, 0xf8, 0xb3, 0x34, 0xb1, 0x89,
-	0x19, 0x26, 0x2a, 0x90, 0xbe, 0x88, 0x45, 0xaa, 0xfc, 0x70, 0xea, 0x12, 0xc9, 0xda, 0x39, 0x79,
-	0xcc, 0xd7, 0x13, 0x15, 0x5a, 0x51, 0xc4, 0x9d, 0x31, 0xb0, 0xcb, 0xbc, 0xe2, 0xd2, 0xda, 0x54,
-	0x4e, 0xe7, 0x16, 0xaf, 0x85, 0x15, 0xec, 0x39, 0xb4, 0xb5, 0x50, 0xc8, 0x5b, 0xa7, 0xad, 0xb3,
-	0xdd, 0xd1, 0xd6, 0x9f, 0xc7, 0x87, 0x93, 0x96, 0x47, 0x14, 0x3b, 0x84, 0xad, 0x4c, 0xc4, 0x73,
-	0xe4, 0x1b, 0x79, 0xcc, 0x2b, 0x80, 0xf3, 0xab, 0x05, 0x07, 0xa4, 0x33, 0xd1, 0xc6, 0x0a, 0x1d,
-	0x14, 0x32, 0xaf, 0xa1, 0x2b, 0x4b, 0xec, 0xcb, 0x90, 0xd4, 0xfa, 0x95, 0x1a, 0x54, 0x91, 0x49,
-	0xc8, 0x38, 0xec, 0x04, 0x29, 0x0a, 0x8b, 0x61, 0xa9, 0x5a, 0x41, 0x76, 0x0c, 0x1d, 0x95, 0x84,
-	0xf2, 0x9b, 0xc4, 0x90, 0x6f, 0x52, 0x68, 0x89, 0xd9, 0x18, 0x40, 0x54, 0x53, 0x1b, 0xde, 0x3e,
-	0xdd, 0x3c, 0xeb, 0x9e, 0x73, 0x37, 0xbf, 0xaf, 0xdb, 0xbc, 0xd2, 0xa8, 0xfb, 0xfb, 0xf1, 0xe1,
-	0x64, 0xbb, 0xb8, 0x97, 0x57, 0x2b, 0x74, 0x7e, 0xc0, 0x1e, 0xa5, 0x5f, 0xc5, 0xc2, 0x18, 0x1a,
-	0xfb, 0x14, 0x3a, 0x41, 0x0e, 0x1a, 0x33, 0xef, 0x10, 0x3d, 0x09, 0xd9, 0x04, 0x76, 0xab, 0xf1,
-	0x0d, 0xdf, 0xa0, 0xce, 0xcf, 0x6a, 0x9d, 0xeb, 0x4b, 0x18, 0xb1, 0xbc, 0x71, 0x7f, 0x6d, 0x13,
-	0xde, 0xaa, 0xda, 0xf9, 0x52, 0x1a, 0xf0, 0x59, 0x68, 0x11, 0x61, 0x38, 0xd6, 0x56, 0xda, 0xc5,
-	0x7f, 0x8c, 0x50, 0x59, 0xb4, 0xd1, 0xb0, 0xc8, 0xf9, 0x08, 0x83, 0x42, 0x12, 0x8d, 0x11, 0x11,
-	0xde, 0x2d, 0x66, 0xc8, 0xce, 0xa0, 0xa7, 0x0a, 0xe8, 0xdb, 0xc5, 0x0c, 0xd7, 0x45, 0xbb, 0x6a,
-	0x95, 0xe9, 0xfc, 0xdc, 0x84, 0x7d, 0x2a, 0xbf, 0xc6, 0x4c, 0x96, 0x46, 0x3a, 0xb0, 0x1b, 0x12,
-	0xaa, 0xe6, 0x59, 0x76, 0xec, 0x14, 0xfc, 0x3f, 0x4d, 0x74, 0xe1, 0x69, 0x2c, 0x8c, 0x2d, 0x9f,
-	0xa6, 0xc1, 0xef, 0x73, 0xd4, 0x01, 0x92, 0x9f, 0x7d, 0xef, 0x20, 0x0f, 0x51, 0xbf, 0xdb, 0x32,
-	0xc0, 0x5e, 0xc2, 0x1e, 0xe5, 0x9b, 0x85, 0x0e, 0x7c, 0x2b, 0x15, 0xf2, 0x36, 0x09, 0xf6, 0x72,
-	0xf6, 0x76, 0xa1, 0x83, 0x3b, 0xa9, 0x30, 0xef, 0x97, 0x61, 0x6a, 0x64, 0xa2, 0xf9, 0x16, 0x29,
-	0x55, 0x90, 0x5d, 0x40, 0xb1, 0x25, 0x34, 0x7c, 0x9b, 0xbc, 0x39, 0xac, 0x79, 0xb3, 0xb4, 0x79,
-	0xb4, 0x9f, 0x1b, 0x03, 0xab, 0x45, 0x7b, 0x55, 0x15, 0xbb, 0x82, 0x81, 0x2a, 0xec, 0xf0, 0x31,
-	0xf7, 0x43, 0xa2, 0xe1, 0x3b, 0x8d, 0xf7, 0xb5, 0xe6, 0x98, 0xb7, 0xaf, 0x6a, 0x50, 0xa2, 0x61,
-	0x1f, 0xa0, 0x5f, 0xdf, 0xb8, 0xe1, 0x1d, 0x52, 0x38, 0xaa, 0x2b, 0xac, 0xd6, 0xee, 0xf5, 0x6a,
-	0x1e, 0x18, 0xe7, 0x02, 0x8e, 0x28, 0xe3, 0x66, 0x86, 0xfa, 0x46, 0x05, 0x72, 0x9c, 0xa1, 0xb6,
-	0x64, 0xcf, 0x2b, 0x38, 0x68, 0x90, 0x6c, 0x00, 0x3d, 0x63, 0x85, 0x45, 0x3f, 0xb8, 0x17, 0x3a,
-	0xc2, 0xc1, 0x13, 0x27, 0x2e, 0x9f, 0xd5, 0x5a, 0x2e, 0x1b, 0x41, 0x7b, 0xe9, 0xfe, 0xde, 0xb9,
-	0x5b, 0x1b, 0xa5, 0xa1, 0xe9, 0x36, 0x18, 0x8f, 0x6a, 0x19, 0x83, 0x76, 0x28, 0xac, 0x28, 0x4d,
-	0xa6, 0xf3, 0xe8, 0x13, 0xf0, 0x24, 0x8d, 0xdc, 0x64, 0x86, 0x3a, 0x48, 0xd2, 0xd0, 0x2d, 0xbe,
-	0x1b, 0x92, 0xff, 0xfa, 0x26, 0x92, 0xf6, 0x7e, 0x3e, 0x75, 0x83, 0x44, 0x0d, 0xab, 0x84, 0x61,
-	0x91, 0xf0, 0xb6, 0xfc, 0x8f, 0xb2, 0xf7, 0xc3, 0x28, 0xa1, 0xef, 0x6b, 0xba, 0x4d, 0xd4, 0xbb,
-	0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x32, 0x98, 0x5c, 0x3a, 0xdb, 0x04, 0x00, 0x00,
+	0x0f, 0xe0, 0x48, 0x45, 0x9c, 0x40, 0xaa, 0x9a, 0x36, 0x12, 0x39, 0xa0, 0x0a, 0xb7, 0x27, 0x2e,
+	0xd6, 0xc6, 0x1e, 0xdc, 0x95, 0xbc, 0xeb, 0xe0, 0xdd, 0x58, 0xca, 0x81, 0x0b, 0x5f, 0xc5, 0x4f,
+	0xf4, 0x27, 0x38, 0xf1, 0x05, 0x3d, 0x23, 0x8f, 0xed, 0xc4, 0x91, 0x25, 0xc4, 0x6d, 0xdf, 0x9b,
+	0x99, 0x37, 0xb3, 0xf3, 0x56, 0x0b, 0x2f, 0xb2, 0x24, 0xb6, 0xf7, 0xc2, 0x9f, 0xa5, 0x89, 0x4d,
+	0xcc, 0x30, 0x51, 0x81, 0xf4, 0x45, 0x2c, 0x52, 0xe5, 0x87, 0x53, 0x97, 0x48, 0xd6, 0xce, 0xc9,
+	0x63, 0xbe, 0x9e, 0xa8, 0xd0, 0x8a, 0x22, 0xee, 0x8c, 0x81, 0x5d, 0xe6, 0x15, 0x97, 0xd6, 0xa6,
+	0x72, 0x3a, 0xb7, 0x78, 0x2d, 0xac, 0x60, 0xcf, 0xa1, 0xad, 0x85, 0x42, 0xde, 0x3a, 0x6d, 0x9d,
+	0xed, 0x8e, 0xb6, 0xfe, 0x3c, 0x3e, 0x9c, 0xb4, 0x3c, 0xa2, 0xd8, 0x21, 0x6c, 0x65, 0x22, 0x9e,
+	0x23, 0xdf, 0xc8, 0x63, 0x5e, 0x01, 0x9c, 0x5f, 0x2d, 0x38, 0x20, 0x9d, 0x89, 0x36, 0x56, 0xe8,
+	0xa0, 0x90, 0x79, 0x0d, 0x5d, 0x59, 0x62, 0x5f, 0x86, 0xa4, 0xd6, 0xaf, 0xd4, 0xa0, 0x8a, 0x4c,
+	0x42, 0xc6, 0x61, 0x27, 0x48, 0x51, 0x58, 0x0c, 0x4b, 0xd5, 0x0a, 0xb2, 0x63, 0xe8, 0xa8, 0x24,
+	0x94, 0xdf, 0x24, 0x86, 0x7c, 0x93, 0x42, 0x4b, 0xcc, 0xc6, 0x00, 0xa2, 0x9a, 0xda, 0xf0, 0xf6,
+	0xe9, 0xe6, 0x59, 0xf7, 0x9c, 0xbb, 0xf9, 0x7d, 0xdd, 0xe6, 0x95, 0x46, 0xdd, 0xdf, 0x8f, 0x0f,
+	0x27, 0xdb, 0xc5, 0xbd, 0xbc, 0x5a, 0xa1, 0xf3, 0x03, 0xf6, 0x28, 0xfd, 0x2a, 0x16, 0xc6, 0xd0,
+	0xd8, 0xa7, 0xd0, 0x09, 0x72, 0xd0, 0x98, 0x79, 0x87, 0xe8, 0x49, 0xc8, 0x26, 0xb0, 0x5b, 0x8d,
+	0x6f, 0xf8, 0x06, 0x75, 0x7e, 0x56, 0xeb, 0x5c, 0x5f, 0xc2, 0x88, 0xe5, 0x8d, 0xfb, 0x6b, 0x9b,
+	0xf0, 0x56, 0xd5, 0xce, 0x97, 0xd2, 0x80, 0xcf, 0x42, 0x8b, 0x08, 0xc3, 0xb1, 0xb6, 0xd2, 0x2e,
+	0xfe, 0x63, 0x84, 0xca, 0xa2, 0x8d, 0x86, 0x45, 0xce, 0x47, 0x18, 0x14, 0x92, 0x68, 0x8c, 0x88,
+	0xf0, 0x6e, 0x31, 0x43, 0x76, 0x06, 0x3d, 0x55, 0x40, 0xdf, 0x2e, 0x66, 0xb8, 0x2e, 0xda, 0x55,
+	0xab, 0x4c, 0xe7, 0xe7, 0x26, 0xec, 0x53, 0xf9, 0x35, 0x66, 0xb2, 0x34, 0xd2, 0x81, 0xdd, 0x90,
+	0x50, 0x35, 0xcf, 0xb2, 0x63, 0xa7, 0xe0, 0xff, 0x69, 0xa2, 0x0b, 0x4f, 0x63, 0x61, 0x6c, 0xf9,
+	0x34, 0x0d, 0x7e, 0x9f, 0xa3, 0x0e, 0x90, 0xfc, 0xec, 0x7b, 0x07, 0x79, 0x88, 0xfa, 0xdd, 0x96,
+	0x01, 0xf6, 0x12, 0xf6, 0x28, 0xdf, 0x2c, 0x74, 0xe0, 0x5b, 0xa9, 0x90, 0xb7, 0x49, 0xb0, 0x97,
+	0xb3, 0xb7, 0x0b, 0x1d, 0xdc, 0x49, 0x85, 0x79, 0xbf, 0x0c, 0x53, 0x23, 0x13, 0xcd, 0xb7, 0x48,
+	0xa9, 0x82, 0xec, 0x02, 0x8a, 0x2d, 0xa1, 0xe1, 0xdb, 0xe4, 0xcd, 0x61, 0xcd, 0x9b, 0xa5, 0xcd,
+	0xa3, 0xfd, 0xdc, 0x18, 0x58, 0x2d, 0xda, 0xab, 0xaa, 0xd8, 0x15, 0x0c, 0x54, 0x61, 0x87, 0x8f,
+	0xb9, 0x1f, 0x12, 0x0d, 0xdf, 0x69, 0xbc, 0xaf, 0x35, 0xc7, 0xbc, 0x7d, 0x55, 0x83, 0x12, 0x0d,
+	0xfb, 0x00, 0xfd, 0xfa, 0xc6, 0x0d, 0xef, 0x90, 0xc2, 0x51, 0x5d, 0x61, 0xb5, 0x76, 0xaf, 0x57,
+	0xf3, 0xc0, 0x38, 0x17, 0x70, 0x44, 0x19, 0x37, 0x33, 0xd4, 0x37, 0x2a, 0x90, 0xe3, 0x0c, 0xb5,
+	0x25, 0x7b, 0x5e, 0xc1, 0x41, 0x83, 0x64, 0x03, 0xe8, 0x19, 0x2b, 0x2c, 0xfa, 0xc1, 0xbd, 0xd0,
+	0x11, 0x0e, 0x9e, 0x38, 0x71, 0xf9, 0xac, 0xd6, 0x72, 0xd9, 0x08, 0xda, 0x4b, 0xf7, 0xf7, 0xce,
+	0xdd, 0xda, 0x28, 0x0d, 0x4d, 0xb7, 0xc1, 0x78, 0x54, 0xcb, 0x18, 0xb4, 0x43, 0x61, 0x45, 0x69,
+	0x32, 0x9d, 0x47, 0x9f, 0x80, 0x27, 0x69, 0xe4, 0x26, 0x33, 0xd4, 0x41, 0x92, 0x86, 0x6e, 0xf1,
+	0xdd, 0x90, 0xfc, 0xd7, 0x37, 0x91, 0xb4, 0xf7, 0xf3, 0xa9, 0x1b, 0x24, 0x6a, 0x58, 0x25, 0x0c,
+	0x8b, 0x84, 0xb7, 0xe5, 0x7f, 0x94, 0xbd, 0x1f, 0x46, 0x09, 0x7d, 0x5f, 0xd3, 0x6d, 0xa2, 0xde,
+	0xfd, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x71, 0x53, 0xfa, 0xbd, 0xdb, 0x04, 0x00, 0x00,
 }
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/omci/omci_mib_db.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/omci/omci_mib_db.pb.go
similarity index 98%
rename from vendor/github.com/opencord/voltha-protos/v4/go/omci/omci_mib_db.pb.go
rename to vendor/github.com/opencord/voltha-protos/v5/go/omci/omci_mib_db.pb.go
index d644bf8..faa0975 100644
--- a/vendor/github.com/opencord/voltha-protos/v4/go/omci/omci_mib_db.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/omci/omci_mib_db.pb.go
@@ -6,7 +6,7 @@
 import (
 	fmt "fmt"
 	proto "github.com/golang/protobuf/proto"
-	_ "github.com/opencord/voltha-protos/v4/go/common"
+	_ "github.com/opencord/voltha-protos/v5/go/common"
 	math "math"
 )
 
@@ -477,7 +477,7 @@
 	// 598 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xdf, 0x6a, 0xd4, 0x4e,
 	0x14, 0xfe, 0xed, 0x76, 0xb7, 0xdd, 0x9e, 0xdd, 0xb4, 0xdb, 0xf9, 0xa9, 0x4c, 0x0b, 0xc5, 0x12,
-	0x44, 0x7a, 0xa1, 0x59, 0xac, 0xa2, 0x57, 0x0a, 0x6e, 0x5b, 0xb4, 0x60, 0x28, 0xc4, 0xe2, 0x85,
+	0x44, 0x7a, 0xa1, 0x59, 0xac, 0xa8, 0x57, 0x0a, 0x6e, 0x5b, 0xb4, 0x60, 0x28, 0xc4, 0xe2, 0x85,
 	0x37, 0x61, 0x92, 0x1c, 0xd3, 0x81, 0x9d, 0xc9, 0x92, 0x99, 0x06, 0xf2, 0x5e, 0xbe, 0x46, 0x5f,
 	0xc2, 0xab, 0x3e, 0x41, 0xaf, 0x65, 0x26, 0x7f, 0x36, 0x6b, 0x41, 0xbc, 0xcb, 0x77, 0xfe, 0x7c,
 	0xe7, 0x9c, 0xef, 0x0b, 0x03, 0x4f, 0x8b, 0x6c, 0xa1, 0xaf, 0x59, 0xb8, 0xcc, 0x33, 0x9d, 0xa9,
@@ -511,6 +511,6 @@
 	0xb6, 0x94, 0x10, 0x18, 0x18, 0x91, 0x6b, 0x97, 0xec, 0xf7, 0xfc, 0x33, 0xd0, 0x2c, 0x4f, 0xbd,
 	0x6c, 0x89, 0x32, 0xce, 0xf2, 0xc4, 0xab, 0x5e, 0x06, 0xcb, 0xf6, 0xfd, 0x45, 0xca, 0xf5, 0xf5,
 	0x4d, 0xe4, 0xc5, 0x99, 0x98, 0x35, 0x05, 0xb3, 0xaa, 0xe0, 0x65, 0xfd, 0x74, 0x14, 0x6f, 0x66,
-	0x69, 0x66, 0xdf, 0x99, 0x68, 0xd3, 0x86, 0x5e, 0xff, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x6d, 0xd7,
-	0xc0, 0x7b, 0x84, 0x04, 0x00, 0x00,
+	0x69, 0x66, 0xdf, 0x99, 0x68, 0xd3, 0x86, 0x5e, 0xff, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x2e, 0x1c,
+	0x66, 0xfc, 0x84, 0x04, 0x00, 0x00,
 }
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/openflow_13/openflow_13.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/openflow_13/openflow_13.pb.go
similarity index 90%
rename from vendor/github.com/opencord/voltha-protos/v4/go/openflow_13/openflow_13.pb.go
rename to vendor/github.com/opencord/voltha-protos/v5/go/openflow_13/openflow_13.pb.go
index a1889d1..fdf9a77 100644
--- a/vendor/github.com/opencord/voltha-protos/v4/go/openflow_13/openflow_13.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/openflow_13/openflow_13.pb.go
@@ -9332,7 +9332,7 @@
 	// 8470 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x7d, 0x5b, 0x8c, 0x1c, 0x59,
 	0x96, 0x90, 0xf3, 0x51, 0x55, 0x99, 0x37, 0xab, 0xca, 0xe1, 0xf0, 0x2b, 0xed, 0xb2, 0xdb, 0x76,
-	0x76, 0xf7, 0x4c, 0x4f, 0x0e, 0xdb, 0x6e, 0xbb, 0x3d, 0x9e, 0xd9, 0x79, 0x2c, 0x8e, 0xcc, 0x8c,
+	0x4e, 0xf7, 0x4c, 0x4f, 0x0e, 0xdb, 0x6e, 0xbb, 0xdd, 0x9e, 0xd9, 0x79, 0x2c, 0x8e, 0xcc, 0x8c,
 	0xac, 0xcc, 0x76, 0xbe, 0x1c, 0x11, 0x55, 0xb6, 0x07, 0x41, 0x28, 0x2b, 0x33, 0x5c, 0x95, 0xeb,
 	0xcc, 0x8c, 0x9c, 0x88, 0xa8, 0x72, 0x79, 0x99, 0x45, 0x86, 0x15, 0x42, 0x02, 0x76, 0x17, 0xb4,
 	0x1f, 0x23, 0xc1, 0x22, 0xb1, 0x02, 0x3e, 0x10, 0x12, 0x1f, 0x48, 0x48, 0x48, 0x7c, 0xaf, 0x04,
@@ -9340,18 +9340,18 @@
 	0xb9, 0xe7, 0xdc, 0x1b, 0x37, 0xf2, 0x51, 0x5d, 0x33, 0xf4, 0xf0, 0xc1, 0x97, 0x33, 0xce, 0xeb,
 	0x9e, 0x7b, 0xee, 0x39, 0xe7, 0x9e, 0x7b, 0xe2, 0x46, 0x99, 0xdd, 0x39, 0xf1, 0xc7, 0xd1, 0x51,
 	0xdf, 0x9d, 0x05, 0x7e, 0xe4, 0x87, 0xf7, 0xfd, 0x99, 0x37, 0x7d, 0x35, 0xf6, 0xdf, 0xb8, 0x0f,
-	0x3e, 0xff, 0x94, 0x83, 0xf4, 0x82, 0x02, 0xba, 0x79, 0xeb, 0xd0, 0xf7, 0x0f, 0xc7, 0xde, 0xfd,
+	0x3e, 0xfb, 0x84, 0x83, 0xf4, 0x82, 0x02, 0xba, 0x79, 0xeb, 0xd0, 0xf7, 0x0f, 0xc7, 0xde, 0xfd,
 	0xfe, 0x6c, 0x74, 0xbf, 0x3f, 0x9d, 0xfa, 0x51, 0x3f, 0x1a, 0xf9, 0xd3, 0x10, 0x49, 0x4b, 0x03,
 	0xc6, 0xfc, 0x57, 0x33, 0xf7, 0xc8, 0xeb, 0x0f, 0xbd, 0x40, 0x2f, 0xb2, 0x8d, 0x13, 0x2f, 0x08,
-	0x47, 0xfe, 0xb4, 0x98, 0xba, 0x9b, 0xfa, 0x64, 0xcb, 0x12, 0x8f, 0xfa, 0x37, 0x58, 0x36, 0x7a,
-	0x3b, 0xf3, 0x8a, 0xe9, 0xbb, 0xa9, 0x4f, 0xb6, 0x1f, 0x5e, 0xfd, 0x54, 0x1d, 0x14, 0x04, 0x00,
+	0x47, 0xfe, 0xb4, 0x98, 0xba, 0x9b, 0xfa, 0x78, 0xcb, 0x12, 0x8f, 0xfa, 0x37, 0x59, 0x36, 0x7a,
+	0x3b, 0xf3, 0x8a, 0xe9, 0xbb, 0xa9, 0x8f, 0xb7, 0x1f, 0x5e, 0xfd, 0x44, 0x1d, 0x14, 0x04, 0x00,
 	0xd2, 0xe2, 0x24, 0xba, 0xc6, 0x32, 0xa7, 0xa3, 0x61, 0x31, 0xc3, 0x05, 0xc0, 0xcf, 0xd2, 0xbf,
 	0x4a, 0xb1, 0xab, 0x38, 0xca, 0x78, 0xec, 0xbb, 0xde, 0xd8, 0x9b, 0x88, 0x01, 0x1f, 0x91, 0xd8,
 	0x14, 0x17, 0x7b, 0x77, 0x41, 0xac, 0xc2, 0xa1, 0x8c, 0xf0, 0x8c, 0x6d, 0x91, 0x5e, 0x07, 0xa3,
-	0x68, 0xd2, 0x9f, 0x71, 0xad, 0x0a, 0x0f, 0xbf, 0x71, 0x16, 0x7b, 0x82, 0xa1, 0x71, 0xc1, 0x4a,
+	0x68, 0xd2, 0x9f, 0x71, 0xad, 0x0a, 0x0f, 0xbf, 0x79, 0x16, 0x7b, 0x82, 0xa1, 0x71, 0xc1, 0x4a,
 	0x4a, 0xa8, 0xe4, 0xd9, 0x06, 0x90, 0x79, 0xd3, 0xa8, 0xf4, 0x1d, 0x76, 0xeb, 0x2c, 0x5e, 0x30,
 	0x12, 0xfe, 0x0a, 0x8b, 0xe9, 0xbb, 0x19, 0x30, 0x12, 0x3d, 0x96, 0x9e, 0xb2, 0xbc, 0xe4, 0xd4,
-	0x7f, 0x8d, 0xe5, 0x48, 0x62, 0x58, 0x4c, 0xdd, 0xcd, 0x7c, 0x52, 0x78, 0x58, 0x3a, 0x4b, 0x3f,
+	0x7f, 0x8d, 0xe5, 0x48, 0x62, 0x58, 0x4c, 0xdd, 0xcd, 0x7c, 0x5c, 0x78, 0x58, 0x3a, 0x4b, 0x3f,
 	0x34, 0x88, 0x25, 0x79, 0x4a, 0x6d, 0x76, 0x09, 0x48, 0xc2, 0x37, 0xa3, 0x68, 0x70, 0xe4, 0x0e,
 	0xfc, 0xe9, 0xab, 0xd1, 0xa1, 0x7e, 0x85, 0xad, 0xbd, 0x1a, 0xf7, 0x0f, 0x43, 0x5a, 0x1e, 0x7c,
 	0xd0, 0x4b, 0x6c, 0x6b, 0x32, 0x0a, 0x43, 0x37, 0xf4, 0xa6, 0x43, 0x77, 0xec, 0x4d, 0xb9, 0x3d,
@@ -9384,13 +9384,13 @@
 	0x52, 0xff, 0x36, 0x03, 0xb4, 0x3b, 0x18, 0xf7, 0xc3, 0x90, 0x54, 0x58, 0x2e, 0x8b, 0x53, 0x58,
 	0x39, 0xff, 0x74, 0x52, 0x85, 0x5f, 0xfa, 0x0f, 0x60, 0x0e, 0x07, 0x28, 0x85, 0x4f, 0xbd, 0xf0,
 	0xf0, 0x83, 0xa5, 0x8c, 0x92, 0xaa, 0x71, 0xc1, 0xca, 0xf9, 0xaf, 0x0e, 0xb8, 0x2a, 0xfa, 0x0b,
-	0xa6, 0x7b, 0xa7, 0x33, 0x2f, 0x18, 0x41, 0x02, 0xf2, 0x02, 0x92, 0xb3, 0xc6, 0xe5, 0x7c, 0x7d,
+	0xa6, 0x7b, 0xa7, 0x33, 0x2f, 0x18, 0x41, 0x02, 0xf2, 0x02, 0x92, 0xb3, 0xc6, 0xe5, 0x7c, 0x63,
 	0xa9, 0x9c, 0x45, 0xf2, 0xc6, 0x05, 0xeb, 0x92, 0x0a, 0xe5, 0x92, 0x2b, 0x1b, 0x6c, 0x8d, 0x63,
 	0x4b, 0x7f, 0xbc, 0x8d, 0x59, 0x2d, 0xa1, 0xc4, 0xd9, 0xbb, 0x80, 0x4a, 0xc9, 0x4d, 0x1e, 0x92,
 	0xcd, 0x6f, 0xb0, 0xdc, 0x51, 0x3f, 0x74, 0xf9, 0x3a, 0x83, 0xb7, 0xe5, 0xac, 0x8d, 0xa3, 0x7e,
 	0xd8, 0x86, 0xa5, 0xbe, 0xc2, 0xb2, 0xe0, 0x39, 0xe8, 0x14, 0x8d, 0x0b, 0x16, 0x7f, 0xd2, 0x3f,
-	0x66, 0x5b, 0xb3, 0xa3, 0xb7, 0xe1, 0x68, 0xd0, 0x1f, 0x73, 0x9f, 0x43, 0xef, 0x68, 0x5c, 0xb0,
-	0x36, 0x05, 0xb8, 0x07, 0x64, 0x5f, 0x67, 0xdb, 0x94, 0x25, 0xbd, 0xa8, 0x0f, 0x11, 0xca, 0x4d,
+	0x62, 0x5b, 0xb3, 0xa3, 0xb7, 0xe1, 0x68, 0xd0, 0x1f, 0x73, 0x9f, 0x43, 0xef, 0x68, 0x5c, 0xb0,
+	0x36, 0x05, 0xb8, 0x07, 0x64, 0xdf, 0x60, 0xdb, 0x94, 0x25, 0xbd, 0xa8, 0x0f, 0x11, 0xca, 0x4d,
 	0x90, 0x85, 0x3d, 0x83, 0xc3, 0xdb, 0x04, 0xd6, 0x6f, 0xb0, 0x0d, 0x2f, 0x3a, 0x72, 0x87, 0x61,
 	0xc4, 0x13, 0xd2, 0x66, 0xe3, 0x82, 0xb5, 0xee, 0x45, 0x47, 0xb5, 0x30, 0x12, 0xa8, 0x30, 0x18,
 	0xf0, 0x8c, 0x24, 0x50, 0x76, 0x30, 0xd0, 0x77, 0x58, 0x0e, 0x50, 0x7c, 0xc2, 0x39, 0x52, 0x00,
@@ -9412,194 +9412,194 @@
 	0x35, 0x1e, 0x73, 0x23, 0xdc, 0x26, 0x1c, 0xac, 0xc6, 0xe3, 0x78, 0xa9, 0x1e, 0x73, 0x23, 0x7c,
 	0xa0, 0x22, 0x85, 0x11, 0x00, 0xf9, 0x6a, 0xdc, 0x3f, 0xf0, 0xc6, 0xc5, 0x3b, 0x72, 0x86, 0xb3,
 	0x93, 0xc7, 0x75, 0x0e, 0x93, 0x46, 0x78, 0x8c, 0x76, 0xba, 0x9b, 0x30, 0xc2, 0xe3, 0x84, 0x9d,
-	0x1e, 0xa3, 0x9d, 0xee, 0x25, 0x49, 0xb8, 0x9d, 0xbe, 0xc6, 0xb6, 0xf9, 0x40, 0xd3, 0xa1, 0x1b,
+	0x1e, 0xa3, 0x9d, 0xee, 0x25, 0x49, 0xb8, 0x9d, 0xbe, 0xce, 0xb6, 0xf9, 0x40, 0xd3, 0xa1, 0x1b,
 	0xf5, 0x83, 0x43, 0x2f, 0x2a, 0x96, 0x48, 0x97, 0x4d, 0x80, 0x77, 0x86, 0x0e, 0x87, 0xea, 0x77,
-	0x49, 0xa1, 0xe9, 0xd0, 0x0d, 0xc3, 0x71, 0xf1, 0x43, 0x22, 0xca, 0x23, 0x91, 0x1d, 0x8e, 0x55,
-	0x8a, 0x68, 0x3c, 0x2e, 0x7e, 0x94, 0xa4, 0x70, 0xc6, 0x63, 0xfd, 0x0e, 0x63, 0x93, 0xd9, 0x38,
-	0x74, 0x71, 0x4e, 0x1f, 0x93, 0x36, 0x79, 0x80, 0xb5, 0xf8, 0x94, 0x6e, 0xb0, 0x0d, 0x4e, 0x10,
-	0x0d, 0x8a, 0x5f, 0x13, 0x0b, 0x00, 0x00, 0x87, 0x5b, 0x8b, 0xa3, 0x0e, 0xfc, 0xb0, 0xf8, 0x75,
-	0xe1, 0x32, 0x00, 0xa9, 0xf8, 0x21, 0x20, 0x67, 0x07, 0x07, 0xee, 0x28, 0x1c, 0x0d, 0x8b, 0x9f,
-	0x08, 0xe4, 0xec, 0xe0, 0xa0, 0x19, 0x8e, 0x86, 0xfa, 0x6d, 0x96, 0x8f, 0x8e, 0xa7, 0x53, 0x6f,
-	0x0c, 0xbb, 0xf0, 0x37, 0x28, 0x63, 0xe4, 0x10, 0xd4, 0x1c, 0x4a, 0x4b, 0x7b, 0xa7, 0xd1, 0xd1,
-	0x30, 0x28, 0x96, 0x55, 0x4b, 0x9b, 0x1c, 0xa6, 0x7f, 0xc6, 0x2e, 0x27, 0x13, 0x0f, 0xe6, 0xb6,
-	0x11, 0x97, 0x95, 0xb2, 0x2e, 0x25, 0xb2, 0x0f, 0xcf, 0x73, 0x25, 0xb6, 0x49, 0x19, 0x08, 0x49,
-	0x7f, 0x9d, 0x1b, 0x23, 0x65, 0x31, 0x4c, 0x43, 0x2a, 0x4d, 0x18, 0x0c, 0x90, 0xe6, 0xb5, 0x42,
-	0x63, 0x07, 0x03, 0x4e, 0xf3, 0x11, 0xdb, 0x12, 0x69, 0x07, 0x89, 0x26, 0x5c, 0xbd, 0x94, 0x55,
-	0xa0, 0xdc, 0x23, 0xa8, 0x44, 0x46, 0x40, 0xaa, 0x40, 0x50, 0x51, 0x5a, 0x48, 0x50, 0x49, 0xa5,
-	0x42, 0x95, 0x4a, 0xd1, 0x8a, 0xc2, 0x03, 0x89, 0x7e, 0x93, 0x88, 0x18, 0xc6, 0x88, 0x4a, 0x13,
-	0x09, 0x9a, 0xbf, 0xa1, 0xd0, 0x38, 0x44, 0xf3, 0x31, 0x1f, 0xed, 0x71, 0xac, 0xd3, 0xdf, 0x4c,
-	0xd1, 0xfc, 0x0a, 0x14, 0x00, 0x09, 0x32, 0xa9, 0xd4, 0xdf, 0x4a, 0x90, 0x09, 0xad, 0xbe, 0xc9,
-	0x34, 0x25, 0x1c, 0x90, 0xf2, 0xb7, 0x52, 0x34, 0xec, 0x76, 0x1c, 0x14, 0x42, 0xa6, 0xf0, 0x06,
-	0xa4, 0xfc, 0xfb, 0x82, 0xb2, 0x40, 0x3e, 0xc1, 0xc9, 0x60, 0x3b, 0x11, 0x7e, 0x81, 0x74, 0xbf,
-	0x9d, 0xa2, 0x15, 0xdd, 0x14, 0xde, 0x91, 0x18, 0x1c, 0x3d, 0x04, 0x49, 0x7f, 0x27, 0x31, 0x38,
-	0xfa, 0x09, 0x10, 0xc3, 0x8e, 0x7a, 0xd2, 0x1f, 0x1f, 0x7b, 0x95, 0x75, 0xac, 0x74, 0x4a, 0x2e,
-	0xbb, 0xb9, 0x7a, 0x57, 0x86, 0x92, 0x16, 0x30, 0x78, 0xc8, 0xa0, 0xe2, 0x0a, 0x8a, 0x8c, 0x06,
-	0x1e, 0xc3, 0xc0, 0x47, 0x14, 0x26, 0xaa, 0x3f, 0x13, 0xb0, 0xd2, 0xbf, 0xc9, 0xe2, 0x51, 0xb1,
-	0x3f, 0x80, 0xf3, 0xa3, 0xfe, 0x59, 0x62, 0xcf, 0x5e, 0xac, 0x0d, 0x91, 0x4c, 0xad, 0x91, 0xbe,
-	0xc3, 0xd6, 0xfd, 0xe3, 0x68, 0x76, 0x1c, 0x51, 0x6d, 0xf8, 0xc1, 0x2a, 0x1e, 0xa4, 0x82, 0xa0,
-	0xc4, 0x5f, 0xfa, 0x0f, 0x28, 0x28, 0xa3, 0x68, 0xcc, 0xb7, 0xf4, 0xc2, 0x92, 0x93, 0x22, 0xf1,
-	0x0a, 0x3a, 0x11, 0xb6, 0x4e, 0x34, 0xd6, 0x1f, 0xb2, 0xec, 0xec, 0x38, 0x3c, 0xa2, 0x8a, 0x68,
-	0xa5, 0xaa, 0x40, 0xc3, 0x6b, 0x85, 0xe3, 0xf0, 0x08, 0x86, 0x9c, 0xf9, 0x33, 0x2e, 0x8e, 0x2a,
-	0xa0, 0x95, 0x43, 0x0a, 0x3a, 0x9e, 0x0c, 0xfc, 0x59, 0x7b, 0x36, 0x0e, 0xf5, 0x6f, 0xb1, 0xb5,
+	0x49, 0xa1, 0xe9, 0xd0, 0x0d, 0xc3, 0x71, 0xf1, 0x6b, 0x44, 0x94, 0x47, 0x22, 0x3b, 0x1c, 0xab,
+	0x14, 0xd1, 0x78, 0x5c, 0xfc, 0x30, 0x49, 0xe1, 0x8c, 0xc7, 0xfa, 0x1d, 0xc6, 0x26, 0xb3, 0x71,
+	0xe8, 0xe2, 0x9c, 0x3e, 0x22, 0x6d, 0xf2, 0x00, 0x6b, 0xf1, 0x29, 0xdd, 0x60, 0x1b, 0x9c, 0x20,
+	0x1a, 0x14, 0xbf, 0x2e, 0x16, 0x00, 0x00, 0x0e, 0xb7, 0x16, 0x47, 0x1d, 0xf8, 0x61, 0xf1, 0x1b,
+	0xc2, 0x65, 0x00, 0x52, 0xf1, 0x43, 0x40, 0xce, 0x0e, 0x0e, 0xdc, 0x51, 0x38, 0x1a, 0x16, 0x3f,
+	0x16, 0xc8, 0xd9, 0xc1, 0x41, 0x33, 0x1c, 0x0d, 0xf5, 0xdb, 0x2c, 0x1f, 0x1d, 0x4f, 0xa7, 0xde,
+	0x18, 0x76, 0xe1, 0x6f, 0x52, 0xc6, 0xc8, 0x21, 0xa8, 0x39, 0x94, 0x96, 0xf6, 0x4e, 0xa3, 0xa3,
+	0x61, 0x50, 0x2c, 0xab, 0x96, 0x36, 0x39, 0x4c, 0xff, 0x94, 0x5d, 0x4e, 0x26, 0x1e, 0xcc, 0x6d,
+	0x23, 0x2e, 0x2b, 0x65, 0x5d, 0x4a, 0x64, 0x1f, 0x9e, 0xe7, 0x4a, 0x6c, 0x93, 0x32, 0x10, 0x92,
+	0xfe, 0x3a, 0x37, 0x46, 0xca, 0x62, 0x98, 0x86, 0x54, 0x9a, 0x30, 0x18, 0x20, 0xcd, 0x6b, 0x85,
+	0xc6, 0x0e, 0x06, 0x9c, 0xe6, 0x43, 0xb6, 0x25, 0xd2, 0x0e, 0x12, 0x4d, 0xb8, 0x7a, 0x29, 0xab,
+	0x40, 0xb9, 0x47, 0x50, 0x89, 0x8c, 0x80, 0x54, 0x81, 0xa0, 0xa2, 0xb4, 0x90, 0xa0, 0x92, 0x4a,
+	0x85, 0x2a, 0x95, 0xa2, 0x15, 0x85, 0x07, 0x12, 0xfd, 0x26, 0x11, 0x31, 0x8c, 0x11, 0x95, 0x26,
+	0x12, 0x34, 0x7f, 0x43, 0xa1, 0x71, 0x88, 0xe6, 0x23, 0x3e, 0xda, 0xe3, 0x58, 0xa7, 0xbf, 0x99,
+	0xa2, 0xf9, 0x15, 0x28, 0x00, 0x12, 0x64, 0x52, 0xa9, 0xbf, 0x95, 0x20, 0x13, 0x5a, 0x7d, 0x8b,
+	0x69, 0x4a, 0x38, 0x20, 0xe5, 0x6f, 0xa5, 0x68, 0xd8, 0xed, 0x38, 0x28, 0x84, 0x4c, 0xe1, 0x0d,
+	0x48, 0xf9, 0xf7, 0x05, 0x65, 0x81, 0x7c, 0x82, 0x93, 0xc1, 0x76, 0x22, 0xfc, 0x02, 0xe9, 0x7e,
+	0x3b, 0x45, 0x2b, 0xba, 0x29, 0xbc, 0x23, 0x31, 0x38, 0x7a, 0x08, 0x92, 0xfe, 0x4e, 0x62, 0x70,
+	0xf4, 0x13, 0x20, 0x86, 0x1d, 0xf5, 0xa4, 0x3f, 0x3e, 0xf6, 0x2a, 0xeb, 0x58, 0xe9, 0x94, 0x5c,
+	0x76, 0x73, 0xf5, 0xae, 0x0c, 0x25, 0x2d, 0x60, 0xf0, 0x90, 0x41, 0xc5, 0x15, 0x14, 0x19, 0x0d,
+	0x3c, 0x86, 0x81, 0x8f, 0x28, 0x4c, 0x54, 0x7f, 0x26, 0x60, 0xa5, 0x7f, 0x93, 0xc5, 0xa3, 0x62,
+	0x7f, 0x00, 0xe7, 0x47, 0xfd, 0xd3, 0xc4, 0x9e, 0xbd, 0x58, 0x1b, 0x22, 0x99, 0x5a, 0x23, 0x7d,
+	0x87, 0xad, 0xfb, 0xc7, 0xd1, 0xec, 0x38, 0xa2, 0xda, 0xf0, 0x83, 0x55, 0x3c, 0x48, 0x05, 0x41,
+	0x89, 0xbf, 0xf4, 0x1f, 0x50, 0x50, 0x46, 0xd1, 0x98, 0x6f, 0xe9, 0x85, 0x25, 0x27, 0x45, 0xe2,
+	0x15, 0x74, 0x22, 0x6c, 0x9d, 0x68, 0xac, 0x3f, 0x64, 0xd9, 0xd9, 0x71, 0x78, 0x44, 0x15, 0xd1,
+	0x4a, 0x55, 0x81, 0x86, 0xd7, 0x0a, 0xc7, 0xe1, 0x11, 0x0c, 0x39, 0xf3, 0x67, 0x5c, 0x1c, 0x55,
+	0x40, 0x2b, 0x87, 0x14, 0x74, 0x3c, 0x19, 0xf8, 0xb3, 0xf6, 0x6c, 0x1c, 0xea, 0x9f, 0xb3, 0xb5,
 	0xc3, 0xc0, 0x3f, 0x9e, 0xf1, 0xc2, 0xa0, 0xf0, 0xf0, 0xf6, 0x2a, 0x5e, 0x4e, 0x04, 0x9b, 0x06,
 	0xff, 0xa1, 0x7f, 0x9b, 0xad, 0x4f, 0xdf, 0xf0, 0x69, 0x6e, 0x9c, 0x6d, 0x22, 0xa4, 0x02, 0xc6,
 	0xe9, 0x1b, 0x98, 0xe2, 0x13, 0x96, 0x0f, 0xbd, 0x88, 0x2a, 0xb6, 0x1c, 0xe7, 0xbd, 0xb7, 0x8a,
 	0x57, 0x12, 0x42, 0x7e, 0x0a, 0xbd, 0x08, 0x8b, 0xbf, 0x2f, 0xe6, 0x5c, 0x20, 0xcf, 0x85, 0x7c,
-	0xb4, 0x4a, 0x88, 0x4a, 0x0b, 0x49, 0x5c, 0x7d, 0xae, 0xe4, 0xd8, 0x3a, 0x92, 0x95, 0x9e, 0x60,
+	0xb8, 0x4a, 0x88, 0x4a, 0x0b, 0x49, 0x5c, 0x7d, 0xae, 0xe4, 0xd8, 0x3a, 0x92, 0x95, 0x9e, 0x60,
 	0xb9, 0x97, 0x58, 0x58, 0x7e, 0xe6, 0x82, 0xf2, 0x2b, 0x45, 0x67, 0x2e, 0x3a, 0x4d, 0xc2, 0xa1,
-	0x2a, 0x3e, 0xbc, 0xae, 0x4f, 0xfa, 0xa7, 0x70, 0x6e, 0xfd, 0x0c, 0xcf, 0x53, 0x73, 0xcb, 0x0b,
+	0x2a, 0x3e, 0xbc, 0xae, 0x4f, 0xfa, 0xa7, 0x70, 0x6e, 0xfd, 0x14, 0xcf, 0x53, 0x73, 0xcb, 0x0b,
 	0xc5, 0x9f, 0x74, 0x09, 0x3a, 0xbd, 0xd2, 0x72, 0x97, 0xee, 0xe3, 0x51, 0x46, 0x59, 0x55, 0x28,
-	0xfd, 0xbd, 0xe8, 0xc8, 0x0b, 0xa4, 0xc7, 0x6e, 0x59, 0x31, 0xa0, 0xf4, 0x79, 0x62, 0x08, 0xb1,
+	0xfd, 0xbd, 0xe8, 0xc8, 0x0b, 0xa4, 0xc7, 0x6e, 0x59, 0x31, 0xa0, 0xf4, 0x59, 0x62, 0x08, 0xb1,
 	0x9c, 0x5f, 0xc2, 0xf4, 0x2b, 0x4c, 0x9b, 0x5f, 0x47, 0x50, 0x8a, 0xff, 0x50, 0x8e, 0xd4, 0xfc,
 	0xb9, 0x39, 0x2c, 0x95, 0x13, 0x86, 0xc0, 0xe5, 0xd3, 0xaf, 0xca, 0xe5, 0xa6, 0xe3, 0x3c, 0x5f,
-	0xcc, 0x52, 0x83, 0x5d, 0x59, 0xb6, 0x5c, 0xfa, 0x67, 0x54, 0x45, 0x73, 0xea, 0xb3, 0xcf, 0x17,
+	0xcc, 0x52, 0x83, 0x5d, 0x59, 0xb6, 0x5c, 0xfa, 0xa7, 0x54, 0x45, 0x73, 0xea, 0xb3, 0xcf, 0x17,
 	0x54, 0x6e, 0x3f, 0x63, 0xd7, 0x57, 0xac, 0xd9, 0x42, 0xc8, 0xa7, 0x16, 0x43, 0x1e, 0x16, 0x8a,
 	0xd7, 0xbf, 0xb0, 0x22, 0x9b, 0x16, 0xff, 0x5d, 0xfa, 0xfd, 0x0c, 0x9a, 0x77, 0x34, 0x0d, 0xa3,
 	0xe0, 0x18, 0x73, 0x81, 0xae, 0xe4, 0x82, 0x2d, 0x8a, 0xf6, 0x06, 0x63, 0x87, 0x7e, 0xe4, 0xe3,
 	0xa9, 0x95, 0x22, 0x7e, 0xf1, 0x10, 0xa1, 0x48, 0x71, 0x63, 0x72, 0xd8, 0xad, 0xe1, 0x89, 0x1f,
-	0x71, 0x75, 0x87, 0x6d, 0xbf, 0x09, 0x46, 0x91, 0x52, 0x8f, 0x63, 0x0e, 0xf8, 0xe6, 0x99, 0xd2,
+	0x71, 0x75, 0x87, 0x6d, 0xbf, 0x09, 0x46, 0x91, 0x52, 0x8f, 0x63, 0x0e, 0xf8, 0xd6, 0x99, 0xd2,
 	0x92, 0x2c, 0x50, 0xbc, 0x73, 0x88, 0x2c, 0xde, 0x9f, 0xb0, 0x0d, 0x34, 0x4b, 0x48, 0x79, 0xe1,
-	0xa3, 0x33, 0xc5, 0x11, 0x2d, 0xc4, 0x38, 0xfd, 0xd4, 0xbf, 0xcb, 0xd6, 0x26, 0x1e, 0x98, 0x0e,
+	0xc3, 0x33, 0xc5, 0x11, 0x2d, 0xc4, 0x38, 0xfd, 0xd4, 0xbf, 0xcb, 0xd6, 0x26, 0x1e, 0x98, 0x0e,
 	0xf3, 0x43, 0xe9, 0x4c, 0x7e, 0x4e, 0x09, 0xf1, 0xca, 0x7f, 0xe8, 0xdd, 0x39, 0xeb, 0xaf, 0xaf,
 	0x68, 0x60, 0xa9, 0x22, 0xce, 0x0c, 0xb9, 0x75, 0x5c, 0xaa, 0xd2, 0xb7, 0x71, 0x1b, 0x58, 0x6e,
 	0xd7, 0x33, 0x7a, 0x3e, 0xa5, 0x3e, 0xfb, 0xe0, 0x6c, 0x13, 0xea, 0x37, 0x59, 0x4e, 0xae, 0x00,
-	0xf6, 0x2f, 0xe4, 0xb3, 0xfe, 0x21, 0xdb, 0x4a, 0x16, 0x2d, 0x69, 0x4e, 0xb0, 0x39, 0x51, 0xaa,
-	0x95, 0x52, 0x0b, 0xbd, 0x71, 0x89, 0x59, 0xf5, 0x07, 0xf1, 0x6a, 0x60, 0xaf, 0xec, 0xfa, 0x8a,
-	0xc4, 0x23, 0xcd, 0x5f, 0x7a, 0x88, 0x3d, 0xc5, 0x05, 0x23, 0xf3, 0xd4, 0x00, 0x3f, 0x94, 0x49,
-	0xf2, 0xe7, 0xe6, 0xb0, 0xb4, 0x8f, 0xad, 0xbd, 0x55, 0x56, 0xfd, 0x85, 0x83, 0xe2, 0x4f, 0x32,
-	0xd8, 0xc9, 0xe0, 0xfa, 0x4e, 0x7c, 0xea, 0xa0, 0xf9, 0xaf, 0x47, 0x1e, 0x59, 0x8a, 0x9e, 0xf4,
-	0x3b, 0xac, 0x80, 0xbf, 0x54, 0x2b, 0x31, 0x04, 0xf1, 0x22, 0x40, 0x5d, 0xa1, 0x4c, 0xb2, 0x2b,
-	0xf7, 0x3d, 0xb6, 0x31, 0xf0, 0x27, 0x93, 0xfe, 0x14, 0xcf, 0xf6, 0xdb, 0x4b, 0x32, 0xbc, 0x18,
-	0xdf, 0x25, 0x42, 0x4b, 0x70, 0xe8, 0xf7, 0xd8, 0xe6, 0x68, 0x38, 0xf6, 0xdc, 0x68, 0x34, 0xf1,
-	0xfc, 0xe3, 0x88, 0xfa, 0x1f, 0x05, 0x80, 0x39, 0x08, 0x02, 0x92, 0xa3, 0x7e, 0x30, 0x94, 0x24,
-	0xd8, 0x64, 0x2b, 0x00, 0x4c, 0x90, 0xdc, 0x64, 0xb9, 0x59, 0x30, 0xf2, 0x83, 0x51, 0xf4, 0x96,
-	0x3a, 0x6d, 0xf2, 0x59, 0xdf, 0x61, 0x79, 0x6c, 0x5f, 0x81, 0xea, 0xd8, 0x67, 0xcb, 0x21, 0xa0,
-	0xc9, 0x9b, 0x8d, 0xfe, 0x71, 0x84, 0xa7, 0x6e, 0x6c, 0xb5, 0x6d, 0xf8, 0xc7, 0x11, 0x3f, 0x6e,
-	0xef, 0xb0, 0x3c, 0xa0, 0x70, 0xbb, 0xc4, 0x66, 0x1b, 0xd0, 0xee, 0xf2, 0x8c, 0x2a, 0xfb, 0x9d,
-	0x05, 0xb5, 0xdf, 0xf9, 0x97, 0xd8, 0x1a, 0xef, 0xc0, 0xf0, 0xf3, 0x6c, 0xe1, 0xe1, 0xb5, 0xe5,
-	0xfd, 0x19, 0x0b, 0x89, 0xf4, 0x27, 0x6c, 0x53, 0x59, 0xf0, 0xb0, 0xb8, 0xc5, 0x1d, 0xec, 0xd6,
-	0x59, 0xb1, 0x66, 0x25, 0x38, 0x4a, 0x3f, 0x49, 0x61, 0xe9, 0x73, 0x70, 0x3c, 0x78, 0xed, 0x45,
-	0xb0, 0xb8, 0x6f, 0xbc, 0xd1, 0xe1, 0x91, 0xd8, 0xc1, 0xe8, 0x09, 0x8a, 0xac, 0x37, 0xbc, 0x31,
-	0xc4, 0xa7, 0x89, 0xdb, 0x58, 0x9e, 0x43, 0xf8, 0x44, 0xef, 0xb0, 0x02, 0xa2, 0x71, 0xaa, 0xb8,
-	0xba, 0xc8, 0x81, 0x93, 0x7d, 0xa0, 0xa6, 0xa4, 0xf3, 0x05, 0xc1, 0x7f, 0xa2, 0xe6, 0x11, 0x6e,
-	0x3b, 0xe0, 0x79, 0xdf, 0x8f, 0xbd, 0x04, 0x4b, 0xb3, 0xc5, 0xbc, 0x24, 0x89, 0x17, 0xdd, 0xe4,
-	0x7e, 0xa2, 0xcd, 0xbf, 0xb3, 0x82, 0x55, 0x29, 0xea, 0xd4, 0x2d, 0x2f, 0x93, 0xd8, 0xf2, 0x60,
-	0x3a, 0x68, 0xb0, 0xd5, 0xd3, 0x41, 0xbc, 0x25, 0xe8, 0x4a, 0xbf, 0x9d, 0x62, 0xdb, 0xbc, 0x23,
-	0xd8, 0x87, 0x67, 0xa8, 0x17, 0x92, 0x6e, 0x95, 0x9a, 0x73, 0xab, 0xeb, 0x6c, 0x63, 0x34, 0x55,
-	0xcd, 0xbd, 0x3e, 0x9a, 0x72, 0x5b, 0x2b, 0xa6, 0xcc, 0x9c, 0xcf, 0x94, 0x32, 0xae, 0xb3, 0x6a,
-	0x5c, 0x93, 0x79, 0x49, 0x9f, 0xd1, 0xf4, 0x6c, 0x75, 0x7e, 0x55, 0x76, 0x4c, 0xd3, 0x2b, 0x02,
-	0x54, 0x0a, 0x9a, 0x6f, 0x9b, 0x9e, 0x11, 0xf7, 0x71, 0x2e, 0xc9, 0x26, 0x72, 0x89, 0x8c, 0x82,
-	0xb5, 0xf3, 0x44, 0x81, 0x98, 0xde, 0xba, 0x32, 0xbd, 0x7f, 0x92, 0xc1, 0x22, 0x86, 0x33, 0x05,
-	0xde, 0xc4, 0x3f, 0xf1, 0x56, 0xa7, 0x2e, 0x35, 0xf6, 0xd3, 0x73, 0xb1, 0xff, 0x7d, 0x39, 0xf1,
-	0x0c, 0x9f, 0xf8, 0x47, 0xcb, 0x33, 0x13, 0x0d, 0x71, 0xd6, 0xdc, 0xb3, 0xc9, 0xb9, 0xdf, 0x63,
-	0x9b, 0xc3, 0xe3, 0xa0, 0x4f, 0x85, 0xd0, 0x40, 0xa4, 0x2d, 0x01, 0xb3, 0xbd, 0x01, 0x6c, 0x3d,
-	0x92, 0x64, 0x0a, 0x34, 0x98, 0xb7, 0x24, 0x5f, 0x27, 0xf4, 0x06, 0x0b, 0xe9, 0x6f, 0xe3, 0xcb,
-	0xd3, 0x5f, 0x6e, 0x31, 0xfd, 0xdd, 0x63, 0x9b, 0xb4, 0x80, 0x03, 0xff, 0x78, 0x8a, 0x99, 0x2c,
-	0x6b, 0x15, 0x10, 0x56, 0x05, 0x10, 0xe4, 0x80, 0x83, 0xb7, 0x91, 0x47, 0x04, 0x8c, 0x13, 0xe4,
-	0x01, 0x82, 0x68, 0xb9, 0x66, 0x6f, 0xcf, 0xb1, 0x66, 0xa5, 0x3f, 0x49, 0xe3, 0x1e, 0x87, 0xdb,
-	0xd9, 0x41, 0x7f, 0x3a, 0x3c, 0xef, 0x7b, 0x33, 0x85, 0x43, 0x09, 0x56, 0x9d, 0x65, 0x83, 0x7e,
-	0xe4, 0xd1, 0xf2, 0xf1, 0xdf, 0x5c, 0xe1, 0xe3, 0x20, 0x8c, 0xdc, 0x70, 0xf4, 0x1b, 0x1e, 0xb9,
-	0x5e, 0x9e, 0x43, 0xec, 0xd1, 0x6f, 0x78, 0xfa, 0x63, 0x96, 0x1d, 0x06, 0xfe, 0x8c, 0x6a, 0xa4,
-	0x33, 0x07, 0x02, 0x3a, 0x38, 0x3f, 0xc1, 0xbf, 0xfa, 0x17, 0xac, 0x30, 0x0c, 0x07, 0x33, 0x58,
-	0xf2, 0x7e, 0xf0, 0x7a, 0x65, 0x13, 0x59, 0x65, 0x8f, 0xc9, 0x1b, 0x17, 0x2c, 0x06, 0x8f, 0x16,
-	0x7f, 0xd2, 0x3b, 0x4b, 0x8b, 0xa5, 0x4f, 0xce, 0x12, 0x76, 0xae, 0x5a, 0xe9, 0x2a, 0xd6, 0xfd,
-	0x73, 0x53, 0x28, 0x7d, 0x0f, 0x4b, 0xa8, 0xe5, 0xaa, 0x81, 0xbd, 0x66, 0x81, 0x37, 0x70, 0xc7,
-	0xde, 0x89, 0x27, 0xea, 0xf6, 0x3c, 0x40, 0x5a, 0x00, 0x28, 0x19, 0x6c, 0xe7, 0x0c, 0x55, 0xce,
-	0x53, 0x60, 0x94, 0xfe, 0x1d, 0x25, 0x1d, 0x94, 0x71, 0xce, 0x9c, 0x2e, 0x89, 0x17, 0x73, 0xba,
-	0xdc, 0x43, 0xd3, 0xea, 0x1e, 0xaa, 0x56, 0x49, 0x99, 0x44, 0x95, 0xa4, 0x7f, 0x87, 0xad, 0x81,
-	0xe6, 0x22, 0x6d, 0x97, 0xce, 0x32, 0x34, 0xbd, 0xb6, 0x44, 0x86, 0xd2, 0x8f, 0x51, 0x73, 0x2f,
-	0x08, 0xfc, 0xc0, 0x9d, 0x84, 0x87, 0xfa, 0x7d, 0xb6, 0xae, 0xf4, 0x1c, 0x96, 0xa5, 0x61, 0x12,
-	0x40, 0x64, 0xf2, 0x28, 0x91, 0x56, 0x8e, 0x12, 0x3a, 0xcb, 0xf2, 0xbe, 0x62, 0x86, 0xde, 0xfa,
-	0xf9, 0x43, 0x6f, 0x69, 0xb6, 0xfe, 0xad, 0x14, 0xae, 0x1c, 0x0e, 0x9f, 0xe8, 0x82, 0x80, 0x2e,
-	0xcb, 0x4e, 0x29, 0x37, 0x58, 0xce, 0x3b, 0xc5, 0x0d, 0x8d, 0x86, 0xdc, 0xf0, 0x4e, 0x67, 0xbc,
-	0xa9, 0x39, 0xbf, 0x54, 0x99, 0x33, 0x6a, 0x41, 0x55, 0x8b, 0x13, 0x8a, 0xd9, 0xe3, 0x71, 0x34,
-	0x9a, 0xf5, 0xf9, 0x0b, 0xb2, 0x1f, 0x1d, 0x7b, 0x61, 0xa4, 0x7f, 0x9e, 0x88, 0xd9, 0x3b, 0x8b,
-	0x56, 0x95, 0x1c, 0x4a, 0xc8, 0x2e, 0x5f, 0x3c, 0x9d, 0x65, 0x0f, 0xfc, 0xe1, 0x5b, 0xae, 0xd3,
-	0xa6, 0xc5, 0x7f, 0x97, 0x22, 0xf2, 0x66, 0x65, 0xdc, 0xd9, 0xf8, 0xed, 0x2f, 0x7b, 0xd4, 0xdf,
-	0x4d, 0xe1, 0x2b, 0xe1, 0xa1, 0x17, 0x0e, 0xb8, 0x4f, 0xbd, 0x0a, 0xf8, 0x6f, 0x3e, 0x5e, 0xde,
-	0xda, 0x98, 0xbc, 0x0a, 0x6a, 0x80, 0xc2, 0x37, 0x78, 0xf2, 0xcd, 0x60, 0xde, 0x5a, 0x3f, 0x7a,
-	0x23, 0x10, 0x21, 0x21, 0xf0, 0xbd, 0xf0, 0x7a, 0x88, 0x88, 0xdb, 0x8c, 0x85, 0x5e, 0x30, 0xea,
-	0x8f, 0xdd, 0xe9, 0xf1, 0x84, 0x5b, 0x38, 0x6f, 0xe5, 0x11, 0xd2, 0x39, 0x9e, 0x00, 0xdf, 0x10,
-	0x87, 0xe5, 0xc9, 0x25, 0x6f, 0xad, 0x0f, 0x67, 0xc0, 0x57, 0xfa, 0xa3, 0x14, 0xbb, 0x26, 0x77,
-	0x9c, 0x30, 0xea, 0x47, 0xa1, 0x5c, 0x81, 0x33, 0x5e, 0x79, 0xab, 0x05, 0x6a, 0xfa, 0x8c, 0x02,
-	0x35, 0x33, 0x57, 0xa0, 0xae, 0xda, 0x9c, 0xe7, 0x0a, 0xfd, 0xb5, 0x85, 0x42, 0x5f, 0xee, 0x04,
-	0xeb, 0xe7, 0xd9, 0x09, 0xfe, 0x30, 0x83, 0x85, 0x51, 0x3c, 0x29, 0x7d, 0x9b, 0xa5, 0x47, 0x43,
-	0xfe, 0x66, 0x26, 0x6b, 0xa5, 0x47, 0x67, 0xbe, 0xcf, 0x9f, 0xdf, 0x45, 0xd3, 0xe7, 0xd8, 0x45,
-	0x33, 0x4b, 0x76, 0x51, 0xb5, 0x04, 0xc8, 0xce, 0x95, 0x00, 0x5f, 0xcd, 0x01, 0x43, 0x3a, 0xde,
-	0x86, 0xea, 0x78, 0xb1, 0x91, 0x73, 0x09, 0x23, 0x7f, 0x85, 0xfb, 0xf1, 0xff, 0xa3, 0x93, 0xc4,
-	0x1f, 0xa7, 0x70, 0x7f, 0xe8, 0x1f, 0x1e, 0x06, 0xde, 0x61, 0x3f, 0xf2, 0xfe, 0xbf, 0xf1, 0xd0,
-	0x1f, 0xb3, 0x1b, 0xcb, 0x27, 0x06, 0x49, 0x68, 0x7e, 0xa1, 0x52, 0x5f, 0xb6, 0x50, 0xe9, 0xf9,
-	0x85, 0xba, 0xcd, 0x18, 0x1f, 0x1a, 0xd1, 0x54, 0xa6, 0x00, 0x84, 0xa3, 0x4b, 0x7f, 0x9e, 0xc1,
-	0xd4, 0x8f, 0xc6, 0xa3, 0x5b, 0x17, 0xee, 0x2c, 0xf0, 0x67, 0x5e, 0xc0, 0xeb, 0x53, 0x35, 0x09,
-	0x2e, 0x56, 0x0e, 0x8b, 0x6c, 0x6a, 0x36, 0xdc, 0x9f, 0x5b, 0x76, 0x6c, 0x66, 0x7d, 0x76, 0x1e,
-	0x29, 0x2a, 0x1f, 0x7f, 0xd7, 0xa5, 0x3c, 0xeb, 0x16, 0x2b, 0x4c, 0xbd, 0xd3, 0x48, 0xbd, 0xd8,
-	0x51, 0x78, 0x78, 0xff, 0x3c, 0x62, 0x15, 0x36, 0xa8, 0x95, 0xe0, 0x91, 0xae, 0x83, 0xec, 0xce,
-	0xb7, 0xb5, 0xbe, 0x79, 0x1e, 0x79, 0x4b, 0xba, 0x5b, 0xdf, 0x63, 0x19, 0xff, 0x74, 0xb2, 0xb2,
-	0x70, 0x5b, 0x22, 0xc4, 0x3f, 0x9d, 0x34, 0x2e, 0x58, 0xc0, 0x05, 0x16, 0x5b, 0x52, 0xb1, 0x9d,
-	0xcb, 0x62, 0x67, 0x56, 0x6e, 0xe2, 0xad, 0x47, 0xe9, 0x90, 0x7d, 0x78, 0x0e, 0x8b, 0x2f, 0x04,
-	0x6c, 0xea, 0xe7, 0x0e, 0xd8, 0x2f, 0x58, 0xe9, 0xcb, 0xd7, 0x40, 0xff, 0x88, 0x6d, 0xc7, 0x8f,
-	0xee, 0x68, 0x88, 0x23, 0x6d, 0x59, 0x9b, 0x72, 0x65, 0x9a, 0xc3, 0xb0, 0x64, 0x63, 0x8b, 0x6d,
-	0xb5, 0xfd, 0x7f, 0x91, 0x36, 0xd8, 0xb7, 0x56, 0x39, 0x3e, 0xac, 0x07, 0xec, 0x92, 0xfe, 0xe9,
-	0x84, 0x6b, 0x94, 0xc1, 0x8b, 0x33, 0xfe, 0xe9, 0x04, 0x74, 0xf9, 0x87, 0xa9, 0x95, 0x16, 0x3c,
-	0xb3, 0x60, 0x5d, 0xf2, 0x66, 0x28, 0x51, 0x44, 0x65, 0x92, 0x45, 0xd4, 0x37, 0x59, 0xe2, 0x36,
-	0x88, 0x4b, 0xd5, 0x12, 0x68, 0xa2, 0xa9, 0x88, 0x1a, 0x54, 0x4e, 0xbf, 0x97, 0x66, 0xfa, 0x82,
-	0x4e, 0xe1, 0x59, 0x39, 0x51, 0xdc, 0x28, 0x4b, 0x2b, 0x37, 0xca, 0x3e, 0x66, 0xdb, 0x4a, 0x2b,
-	0x12, 0xf2, 0x57, 0x86, 0x27, 0x93, 0xad, 0xb8, 0x17, 0x09, 0xb9, 0x5c, 0x25, 0xe3, 0x8d, 0x4e,
-	0x4a, 0x8f, 0x92, 0xec, 0x39, 0x00, 0x95, 0x0b, 0x46, 0x6b, 0x89, 0x0b, 0x46, 0x77, 0x58, 0x61,
-	0xd2, 0x3f, 0x75, 0xbd, 0x69, 0x14, 0x8c, 0xbc, 0x90, 0xb6, 0x32, 0x36, 0xe9, 0x9f, 0x9a, 0x08,
-	0xd1, 0x77, 0xe1, 0x9c, 0xc0, 0xd3, 0x0f, 0xe0, 0x37, 0xf8, 0x6a, 0x9e, 0x27, 0x8c, 0x20, 0x5f,
-	0x59, 0x0a, 0x6b, 0xe9, 0x27, 0x29, 0x6c, 0xb8, 0x23, 0x29, 0xee, 0xfd, 0x67, 0xef, 0xf5, 0xe0,
-	0x1a, 0x27, 0x6a, 0x26, 0xdd, 0xb2, 0x0a, 0x08, 0xc3, 0x5c, 0x7a, 0x8f, 0x6d, 0x8e, 0x7d, 0xff,
-	0xf5, 0xf1, 0x4c, 0xc9, 0xa6, 0x59, 0xab, 0x80, 0x30, 0x24, 0xf9, 0x90, 0x6d, 0x71, 0xdb, 0x79,
-	0x43, 0xa2, 0xc9, 0x52, 0x3f, 0x17, 0x81, 0x98, 0x74, 0x1f, 0x60, 0xa1, 0x25, 0xef, 0x8c, 0xc5,
-	0xdb, 0xd8, 0xaa, 0x8b, 0x5c, 0xa5, 0x3f, 0xa5, 0x3a, 0x26, 0xe6, 0x59, 0x7d, 0xe9, 0xeb, 0x36,
-	0x63, 0xc1, 0x29, 0x75, 0x4c, 0x42, 0xb1, 0x23, 0x04, 0xa7, 0x3d, 0x04, 0x00, 0x3a, 0x8a, 0xd1,
-	0x38, 0x87, 0x7c, 0x24, 0xd1, 0x37, 0x58, 0x2e, 0x38, 0x75, 0x61, 0x03, 0x09, 0x49, 0xf9, 0x8d,
-	0xe0, 0xb4, 0x02, 0x8f, 0xdc, 0x7a, 0x02, 0x85, 0xdb, 0xde, 0x46, 0x44, 0x28, 0x1c, 0x13, 0x8e,
-	0x81, 0x33, 0x6f, 0xc8, 0x57, 0x95, 0x8f, 0x59, 0x43, 0x00, 0x8d, 0x29, 0xd0, 0x1b, 0x62, 0x4c,
-	0x81, 0xde, 0x61, 0xf9, 0xe0, 0x14, 0x8f, 0x1f, 0x21, 0x95, 0x2a, 0xb9, 0xe0, 0xd4, 0xe4, 0xcf,
-	0x80, 0x8c, 0x24, 0x12, 0x2b, 0x95, 0x5c, 0x24, 0x90, 0x77, 0xd9, 0x66, 0x70, 0xea, 0xbe, 0x0a,
-	0xfa, 0x13, 0x0f, 0x48, 0xa8, 0x50, 0x61, 0xc1, 0x69, 0x1d, 0x40, 0x26, 0xbf, 0xe6, 0x58, 0x08,
-	0x4e, 0x5d, 0xff, 0xc4, 0x0b, 0x38, 0x41, 0x41, 0xa8, 0xd6, 0x3d, 0xf1, 0x02, 0xc0, 0xdf, 0xe2,
-	0x9a, 0x0f, 0x82, 0x01, 0x47, 0x6f, 0x8a, 0xc1, 0xab, 0xc1, 0x00, 0xb9, 0xd9, 0xc0, 0x1f, 0x8f,
-	0x47, 0x21, 0xd5, 0x2d, 0xb4, 0xd7, 0x0b, 0xc8, 0x42, 0x85, 0xb8, 0x7d, 0x8e, 0x0a, 0xf1, 0xe2,
-	0x62, 0x85, 0x58, 0x7a, 0x84, 0x2d, 0x7e, 0x6c, 0x09, 0x2e, 0x94, 0x36, 0xab, 0x5e, 0x8e, 0xed,
-	0x63, 0xdc, 0x63, 0x17, 0x10, 0x1d, 0xce, 0x0b, 0xfe, 0xef, 0x8b, 0x86, 0xd2, 0x4f, 0xd2, 0x18,
-	0x3a, 0x8a, 0x3a, 0x67, 0xa8, 0xc1, 0x97, 0xcf, 0x7b, 0x95, 0x88, 0x9b, 0x5c, 0xe0, 0xbd, 0x92,
-	0x41, 0x93, 0xd0, 0x26, 0xf3, 0x65, 0xda, 0x64, 0xe7, 0x4b, 0x98, 0xaf, 0xaa, 0x97, 0x55, 0x61,
-	0x9b, 0x64, 0x29, 0x3e, 0x23, 0xca, 0x2d, 0x77, 0x56, 0x34, 0x57, 0x85, 0x39, 0xad, 0x02, 0x3e,
-	0xdb, 0xc0, 0x03, 0xc7, 0xb6, 0xed, 0xd8, 0x32, 0xfc, 0xf0, 0xf6, 0x65, 0x77, 0x1e, 0xcf, 0x6c,
-	0xfd, 0xa6, 0x57, 0xb6, 0x7e, 0x33, 0xe7, 0x6c, 0xfd, 0x9e, 0xa8, 0x4b, 0x05, 0x69, 0xf5, 0x2d,
-	0x68, 0x24, 0x8f, 0x92, 0x85, 0x95, 0x1a, 0x01, 0x09, 0x5e, 0x28, 0xd5, 0x1f, 0xe2, 0xa5, 0x61,
-	0x51, 0xa1, 0xdd, 0x5a, 0xc1, 0xc1, 0x69, 0xf0, 0x4a, 0x71, 0x58, 0xfa, 0x3b, 0x29, 0x74, 0x3e,
-	0x44, 0xc9, 0x4d, 0xe7, 0x0a, 0x5b, 0xe3, 0x77, 0x0d, 0xc5, 0x9b, 0x59, 0xfe, 0xb0, 0x70, 0x93,
-	0x36, 0xbd, 0x78, 0x93, 0x16, 0xbc, 0x00, 0x76, 0x06, 0x2e, 0x4f, 0xec, 0xba, 0xf9, 0x49, 0xff,
-	0x94, 0x57, 0xe3, 0xa1, 0x5e, 0x4c, 0x36, 0xf9, 0xb7, 0xe2, 0x9d, 0xfc, 0x3b, 0x6a, 0xeb, 0x68,
-	0xb1, 0x7d, 0x70, 0xc6, 0x6b, 0xad, 0x5f, 0xc7, 0x17, 0xc6, 0x4a, 0x5b, 0x06, 0x7d, 0xbd, 0xcc,
-	0x2e, 0x91, 0xcf, 0x72, 0xa0, 0x1a, 0x46, 0x17, 0x11, 0x51, 0xe9, 0x4f, 0x31, 0x99, 0xeb, 0x5f,
-	0x63, 0x17, 0xb9, 0xf3, 0x2a, 0x94, 0x18, 0x4f, 0x5b, 0x00, 0x96, 0x74, 0xa5, 0x3f, 0xa0, 0x98,
-	0xc2, 0xc1, 0x64, 0x4c, 0xad, 0x50, 0x6d, 0xae, 0x6e, 0x4f, 0xcf, 0xd5, 0xed, 0x30, 0x6a, 0xdc,
-	0x12, 0x57, 0x03, 0x6b, 0x0b, 0xc1, 0xcd, 0x29, 0xd2, 0x95, 0x18, 0x57, 0x23, 0xa6, 0xc2, 0xe8,
-	0x2a, 0x00, 0x50, 0xd0, 0x7c, 0x55, 0xf1, 0xf5, 0x84, 0xb1, 0xd8, 0x86, 0x14, 0x5d, 0xf7, 0xce,
-	0xea, 0x81, 0xa1, 0x3f, 0xe5, 0xe1, 0x37, 0x46, 0xd7, 0x6f, 0x62, 0x5b, 0x1d, 0x49, 0xce, 0xbc,
-	0xb9, 0xaf, 0x5a, 0x2e, 0xbd, 0xa2, 0x0b, 0x97, 0xf9, 0x79, 0xbb, 0x70, 0xff, 0x9a, 0x5c, 0x1a,
-	0x09, 0xa4, 0x4b, 0xd3, 0xbd, 0x75, 0x7c, 0x67, 0x9d, 0x92, 0xf7, 0xd6, 0xdb, 0xfc, 0xa5, 0xe9,
-	0x6d, 0x9a, 0x34, 0x3a, 0x3d, 0xad, 0x13, 0x40, 0x9c, 0xa5, 0x8e, 0x9f, 0x59, 0xe2, 0xf8, 0x24,
-	0x5f, 0xb4, 0x0e, 0x85, 0x7c, 0x70, 0x1d, 0x89, 0x1c, 0xf8, 0x63, 0x3f, 0xa0, 0x95, 0x01, 0x64,
-	0x15, 0x9e, 0x4b, 0x3f, 0x56, 0x5d, 0x0a, 0x63, 0xff, 0x5b, 0xb2, 0xee, 0x4a, 0xad, 0xb8, 0x41,
-	0xa3, 0x5a, 0x57, 0x96, 0x65, 0x5f, 0x9a, 0x01, 0x14, 0xb7, 0x15, 0x19, 0xe0, 0x84, 0xdd, 0xe3,
-	0x5d, 0xc3, 0x44, 0xbf, 0x50, 0x86, 0xdf, 0xd1, 0xf2, 0x1b, 0x52, 0xa9, 0x2f, 0xa9, 0x83, 0xe7,
-	0x9a, 0x89, 0xa2, 0x51, 0x98, 0x51, 0x1a, 0x85, 0x63, 0xdc, 0x2b, 0x13, 0xe3, 0xfe, 0xf2, 0x46,
-	0x33, 0xb1, 0x2d, 0xf9, 0xa3, 0x63, 0xef, 0x98, 0xea, 0x7c, 0x1a, 0x8b, 0x37, 0x75, 0xb0, 0xee,
-	0x14, 0x5e, 0x21, 0xcf, 0xcd, 0x1a, 0xcb, 0xc4, 0x37, 0x72, 0xe0, 0x67, 0x29, 0x40, 0xa5, 0x15,
-	0x31, 0x93, 0xd1, 0xd4, 0xe5, 0x6f, 0x12, 0xaa, 0xac, 0xa0, 0xc8, 0xa5, 0x75, 0x5b, 0x74, 0xdb,
-	0x05, 0x0d, 0xb0, 0xda, 0x6d, 0xc8, 0xfe, 0xef, 0xfc, 0x2b, 0x8a, 0x65, 0x63, 0xf6, 0x4f, 0x7f,
-	0xc9, 0x63, 0xfe, 0x23, 0x6a, 0xd4, 0x28, 0x9c, 0x09, 0xeb, 0x7f, 0x25, 0x03, 0x9f, 0xe7, 0x70,
-	0xb5, 0x6c, 0x2d, 0xff, 0x76, 0x0a, 0x13, 0x0c, 0xa5, 0x4e, 0x3e, 0x08, 0xf8, 0x03, 0x8e, 0x16,
-	0x27, 0x61, 0xfe, 0x8c, 0xc7, 0x24, 0xa5, 0x6d, 0x84, 0x17, 0xae, 0x2a, 0x89, 0xf3, 0xc9, 0xaa,
-	0x4e, 0xff, 0x0a, 0xfd, 0xe9, 0x68, 0xf2, 0x10, 0xaf, 0x53, 0x20, 0xd1, 0x21, 0xaf, 0x35, 0x20,
-	0x0a, 0xe5, 0x96, 0xb5, 0xe4, 0xa2, 0x57, 0xe9, 0x10, 0xcf, 0xab, 0x4b, 0x78, 0x66, 0xe3, 0xb7,
-	0x4b, 0xaf, 0x86, 0x7d, 0x8b, 0xad, 0x73, 0x6a, 0xf1, 0x5d, 0xc5, 0xed, 0x55, 0x6f, 0x55, 0x39,
+	0xf6, 0x2f, 0xe4, 0xb3, 0xfe, 0x35, 0xb6, 0x95, 0x2c, 0x5a, 0xd2, 0x9c, 0x60, 0x73, 0xa2, 0x54,
+	0x2b, 0xa5, 0x16, 0x7a, 0xe3, 0x12, 0xb3, 0xea, 0x0f, 0xe2, 0xd5, 0xc0, 0x5e, 0xd9, 0xf5, 0x15,
+	0x89, 0x47, 0x9a, 0xbf, 0xf4, 0x10, 0x7b, 0x8a, 0x0b, 0x46, 0xe6, 0xa9, 0x01, 0x7e, 0x28, 0x93,
+	0xe4, 0xcf, 0xcd, 0x61, 0x69, 0x1f, 0x5b, 0x7b, 0xab, 0xac, 0xfa, 0x0b, 0x07, 0xc5, 0x9f, 0x64,
+	0xb0, 0x93, 0xc1, 0xf5, 0x9d, 0xf8, 0xd4, 0x41, 0xf3, 0x5f, 0x8f, 0x3c, 0xb2, 0x14, 0x3d, 0xe9,
+	0x77, 0x58, 0x01, 0x7f, 0xa9, 0x56, 0x62, 0x08, 0xe2, 0x45, 0x80, 0xba, 0x42, 0x99, 0x64, 0x57,
+	0xee, 0x7b, 0x6c, 0x63, 0xe0, 0x4f, 0x26, 0xfd, 0x29, 0x9e, 0xed, 0xb7, 0x97, 0x64, 0x78, 0x31,
+	0xbe, 0x4b, 0x84, 0x96, 0xe0, 0xd0, 0xef, 0xb1, 0xcd, 0xd1, 0x70, 0xec, 0xb9, 0xd1, 0x68, 0xe2,
+	0xf9, 0xc7, 0x11, 0xf5, 0x3f, 0x0a, 0x00, 0x73, 0x10, 0x04, 0x24, 0x47, 0xfd, 0x60, 0x28, 0x49,
+	0xb0, 0xc9, 0x56, 0x00, 0x98, 0x20, 0xb9, 0xc9, 0x72, 0xb3, 0x60, 0xe4, 0x07, 0xa3, 0xe8, 0x2d,
+	0x75, 0xda, 0xe4, 0xb3, 0xbe, 0xc3, 0xf2, 0xd8, 0xbe, 0x02, 0xd5, 0xb1, 0xcf, 0x96, 0x43, 0x40,
+	0x93, 0x37, 0x1b, 0xfd, 0xe3, 0x08, 0x4f, 0xdd, 0xd8, 0x6a, 0xdb, 0xf0, 0x8f, 0x23, 0x7e, 0xdc,
+	0xde, 0x61, 0x79, 0x40, 0xe1, 0x76, 0x89, 0xcd, 0x36, 0xa0, 0xdd, 0xe5, 0x19, 0x55, 0xf6, 0x3b,
+	0x0b, 0x6a, 0xbf, 0xf3, 0x2f, 0xb1, 0x35, 0xde, 0x81, 0xe1, 0xe7, 0xd9, 0xc2, 0xc3, 0x6b, 0xcb,
+	0xfb, 0x33, 0x16, 0x12, 0xe9, 0x4f, 0xd8, 0xa6, 0xb2, 0xe0, 0x61, 0x71, 0x8b, 0x3b, 0xd8, 0xad,
+	0xb3, 0x62, 0xcd, 0x4a, 0x70, 0x94, 0x7e, 0x92, 0xc2, 0xd2, 0xe7, 0xe0, 0x78, 0xf0, 0xda, 0x8b,
+	0x60, 0x71, 0xdf, 0x78, 0xa3, 0xc3, 0x23, 0xb1, 0x83, 0xd1, 0x13, 0x14, 0x59, 0x6f, 0x78, 0x63,
+	0x88, 0x4f, 0x13, 0xb7, 0xb1, 0x3c, 0x87, 0xf0, 0x89, 0xde, 0x61, 0x05, 0x44, 0xe3, 0x54, 0x71,
+	0x75, 0x91, 0x03, 0x27, 0xfb, 0x40, 0x4d, 0x49, 0xe7, 0x0b, 0x82, 0xff, 0x44, 0xcd, 0x23, 0xdc,
+	0x76, 0xc0, 0xf3, 0xbe, 0x1f, 0x7b, 0x09, 0x96, 0x66, 0x8b, 0x79, 0x49, 0x12, 0x2f, 0xba, 0xc9,
+	0xfd, 0x44, 0x9b, 0x7f, 0x67, 0x05, 0xab, 0x52, 0xd4, 0xa9, 0x5b, 0x5e, 0x26, 0xb1, 0xe5, 0xc1,
+	0x74, 0xd0, 0x60, 0xab, 0xa7, 0x83, 0x78, 0x4b, 0xd0, 0x95, 0x7e, 0x3b, 0xc5, 0xb6, 0x79, 0x47,
+	0xb0, 0x0f, 0xcf, 0x50, 0x2f, 0x24, 0xdd, 0x2a, 0x35, 0xe7, 0x56, 0xd7, 0xd9, 0xc6, 0x68, 0xaa,
+	0x9a, 0x7b, 0x7d, 0x34, 0xe5, 0xb6, 0x56, 0x4c, 0x99, 0x39, 0x9f, 0x29, 0x65, 0x5c, 0x67, 0xd5,
+	0xb8, 0x26, 0xf3, 0x92, 0x3e, 0xa3, 0xe9, 0xd9, 0xea, 0xfc, 0xaa, 0xec, 0x98, 0xa6, 0x57, 0x04,
+	0xa8, 0x14, 0x34, 0xdf, 0x36, 0x3d, 0x23, 0xee, 0xe3, 0x5c, 0x92, 0x4d, 0xe4, 0x12, 0x19, 0x05,
+	0x6b, 0xe7, 0x89, 0x02, 0x31, 0xbd, 0x75, 0x65, 0x7a, 0xff, 0x24, 0x83, 0x45, 0x0c, 0x67, 0x0a,
+	0xbc, 0x89, 0x7f, 0xe2, 0xad, 0x4e, 0x5d, 0x6a, 0xec, 0xa7, 0xe7, 0x62, 0xff, 0xfb, 0x72, 0xe2,
+	0x19, 0x3e, 0xf1, 0x0f, 0x97, 0x67, 0x26, 0x1a, 0xe2, 0xac, 0xb9, 0x67, 0x93, 0x73, 0xbf, 0xc7,
+	0x36, 0x87, 0xc7, 0x41, 0x9f, 0x0a, 0xa1, 0x81, 0x48, 0x5b, 0x02, 0x66, 0x7b, 0x03, 0xd8, 0x7a,
+	0x24, 0xc9, 0x14, 0x68, 0x30, 0x6f, 0x49, 0xbe, 0x4e, 0xe8, 0x0d, 0x16, 0xd2, 0xdf, 0xc6, 0x97,
+	0xa7, 0xbf, 0xdc, 0x62, 0xfa, 0xbb, 0xc7, 0x36, 0x69, 0x01, 0x07, 0xfe, 0xf1, 0x14, 0x33, 0x59,
+	0xd6, 0x2a, 0x20, 0xac, 0x0a, 0x20, 0xc8, 0x01, 0x07, 0x6f, 0x23, 0x8f, 0x08, 0x18, 0x27, 0xc8,
+	0x03, 0x04, 0xd1, 0x72, 0xcd, 0xde, 0x9e, 0x63, 0xcd, 0x4a, 0x7f, 0x92, 0xc6, 0x3d, 0x0e, 0xb7,
+	0xb3, 0x83, 0xfe, 0x74, 0x78, 0xde, 0xf7, 0x66, 0x0a, 0x87, 0x12, 0xac, 0x3a, 0xcb, 0x06, 0xfd,
+	0xc8, 0xa3, 0xe5, 0xe3, 0xbf, 0xb9, 0xc2, 0xc7, 0x41, 0x18, 0xb9, 0xe1, 0xe8, 0x37, 0x3c, 0x72,
+	0xbd, 0x3c, 0x87, 0xd8, 0xa3, 0xdf, 0xf0, 0xf4, 0xc7, 0x2c, 0x3b, 0x0c, 0xfc, 0x19, 0xd5, 0x48,
+	0x67, 0x0e, 0x04, 0x74, 0x70, 0x7e, 0x82, 0x7f, 0xf5, 0x2f, 0x58, 0x61, 0x18, 0x0e, 0x66, 0xb0,
+	0xe4, 0xfd, 0xe0, 0xf5, 0xca, 0x26, 0xb2, 0xca, 0x1e, 0x93, 0x37, 0x2e, 0x58, 0x0c, 0x1e, 0x2d,
+	0xfe, 0xa4, 0x77, 0x96, 0x16, 0x4b, 0x1f, 0x9f, 0x25, 0xec, 0x5c, 0xb5, 0xd2, 0x55, 0xac, 0xfb,
+	0xe7, 0xa6, 0x50, 0xfa, 0x1e, 0x96, 0x50, 0xcb, 0x55, 0x03, 0x7b, 0xcd, 0x02, 0x6f, 0xe0, 0x8e,
+	0xbd, 0x13, 0x4f, 0xd4, 0xed, 0x79, 0x80, 0xb4, 0x00, 0x50, 0x32, 0xd8, 0xce, 0x19, 0xaa, 0x9c,
+	0xa7, 0xc0, 0x28, 0xfd, 0x3b, 0x4a, 0x3a, 0x28, 0xe3, 0x9c, 0x39, 0x5d, 0x12, 0x2f, 0xe6, 0x74,
+	0xb9, 0x87, 0xa6, 0xd5, 0x3d, 0x54, 0xad, 0x92, 0x32, 0x89, 0x2a, 0x49, 0xff, 0x0e, 0x5b, 0x03,
+	0xcd, 0x45, 0xda, 0x2e, 0x9d, 0x65, 0x68, 0x7a, 0x6d, 0x89, 0x0c, 0xa5, 0x1f, 0xa3, 0xe6, 0x5e,
+	0x10, 0xf8, 0x81, 0x3b, 0x09, 0x0f, 0xf5, 0xfb, 0x6c, 0x5d, 0xe9, 0x39, 0x2c, 0x4b, 0xc3, 0x24,
+	0x80, 0xc8, 0xe4, 0x51, 0x22, 0xad, 0x1c, 0x25, 0x74, 0x96, 0xe5, 0x7d, 0xc5, 0x0c, 0xbd, 0xf5,
+	0xf3, 0x87, 0xde, 0xd2, 0x6c, 0xfd, 0x5b, 0x29, 0x5c, 0x39, 0x1c, 0x3e, 0xd1, 0x05, 0x01, 0x5d,
+	0x96, 0x9d, 0x52, 0x6e, 0xb0, 0x9c, 0x77, 0x8a, 0x1b, 0x1a, 0x0d, 0xb9, 0xe1, 0x9d, 0xce, 0x78,
+	0x53, 0x73, 0x7e, 0xa9, 0x32, 0x67, 0xd4, 0x82, 0xaa, 0x16, 0x27, 0x14, 0xb3, 0xc7, 0xe3, 0x68,
+	0x34, 0xeb, 0xf3, 0x17, 0x64, 0x3f, 0x3a, 0xf6, 0xc2, 0x48, 0xff, 0x2c, 0x11, 0xb3, 0x77, 0x16,
+	0xad, 0x2a, 0x39, 0x94, 0x90, 0x5d, 0xbe, 0x78, 0x3a, 0xcb, 0x1e, 0xf8, 0xc3, 0xb7, 0x5c, 0xa7,
+	0x4d, 0x8b, 0xff, 0x2e, 0x45, 0xe4, 0xcd, 0xca, 0xb8, 0xb3, 0xf1, 0xdb, 0x5f, 0xf6, 0xa8, 0xbf,
+	0x9b, 0xc2, 0x57, 0xc2, 0x43, 0x2f, 0x1c, 0x70, 0x9f, 0x7a, 0x15, 0xf0, 0xdf, 0x7c, 0xbc, 0xbc,
+	0xb5, 0x31, 0x79, 0x15, 0xd4, 0x00, 0x85, 0x6f, 0xf0, 0xe4, 0x9b, 0xc1, 0xbc, 0xb5, 0x7e, 0xf4,
+	0x46, 0x20, 0x42, 0x42, 0xe0, 0x7b, 0xe1, 0xf5, 0x10, 0x11, 0xb7, 0x19, 0x0b, 0xbd, 0x60, 0xd4,
+	0x1f, 0xbb, 0xd3, 0xe3, 0x09, 0xb7, 0x70, 0xde, 0xca, 0x23, 0xa4, 0x73, 0x3c, 0x01, 0xbe, 0x21,
+	0x0e, 0xcb, 0x93, 0x4b, 0xde, 0x5a, 0x1f, 0xce, 0x80, 0xaf, 0xf4, 0x47, 0x29, 0x76, 0x4d, 0xee,
+	0x38, 0x61, 0xd4, 0x8f, 0x42, 0xb9, 0x02, 0x67, 0xbc, 0xf2, 0x56, 0x0b, 0xd4, 0xf4, 0x19, 0x05,
+	0x6a, 0x66, 0xae, 0x40, 0x5d, 0xb5, 0x39, 0xcf, 0x15, 0xfa, 0x6b, 0x0b, 0x85, 0xbe, 0xdc, 0x09,
+	0xd6, 0xcf, 0xb3, 0x13, 0xfc, 0x61, 0x06, 0x0b, 0xa3, 0x78, 0x52, 0xfa, 0x36, 0x4b, 0x8f, 0x86,
+	0xfc, 0xcd, 0x4c, 0xd6, 0x4a, 0x8f, 0xce, 0x7c, 0x9f, 0x3f, 0xbf, 0x8b, 0xa6, 0xcf, 0xb1, 0x8b,
+	0x66, 0x96, 0xec, 0xa2, 0x6a, 0x09, 0x90, 0x9d, 0x2b, 0x01, 0xbe, 0x9a, 0x03, 0x86, 0x74, 0xbc,
+	0x0d, 0xd5, 0xf1, 0x62, 0x23, 0xe7, 0x12, 0x46, 0xfe, 0x0a, 0xf7, 0xe3, 0xff, 0x47, 0x27, 0x89,
+	0x3f, 0x4e, 0xe1, 0xfe, 0xd0, 0x3f, 0x3c, 0x0c, 0xbc, 0xc3, 0x7e, 0xe4, 0xfd, 0x7f, 0xe3, 0xa1,
+	0x3f, 0x66, 0x37, 0x96, 0x4f, 0x0c, 0x92, 0xd0, 0xfc, 0x42, 0xa5, 0xbe, 0x6c, 0xa1, 0xd2, 0xf3,
+	0x0b, 0x75, 0x9b, 0x31, 0x3e, 0x34, 0xa2, 0xa9, 0x4c, 0x01, 0x08, 0x47, 0x97, 0xfe, 0x3c, 0x83,
+	0xa9, 0x1f, 0x8d, 0x47, 0xb7, 0x2e, 0xdc, 0x59, 0xe0, 0xcf, 0xbc, 0x80, 0xd7, 0xa7, 0x6a, 0x12,
+	0x5c, 0xac, 0x1c, 0x16, 0xd9, 0xd4, 0x6c, 0xb8, 0x3f, 0xb7, 0xec, 0xd8, 0xcc, 0xfa, 0xf4, 0x3c,
+	0x52, 0x54, 0x3e, 0xfe, 0xae, 0x4b, 0x79, 0xd6, 0x2d, 0x56, 0x98, 0x7a, 0xa7, 0x91, 0x7a, 0xb1,
+	0xa3, 0xf0, 0xf0, 0xfe, 0x79, 0xc4, 0x2a, 0x6c, 0x50, 0x2b, 0xc1, 0x23, 0x5d, 0x07, 0xd9, 0x9d,
+	0x6f, 0x6b, 0x7d, 0xeb, 0x3c, 0xf2, 0x96, 0x74, 0xb7, 0xbe, 0xc7, 0x32, 0xfe, 0xe9, 0x64, 0x65,
+	0xe1, 0xb6, 0x44, 0x88, 0x7f, 0x3a, 0x69, 0x5c, 0xb0, 0x80, 0x0b, 0x2c, 0xb6, 0xa4, 0x62, 0x3b,
+	0x97, 0xc5, 0xce, 0xac, 0xdc, 0xc4, 0x5b, 0x8f, 0xd2, 0x21, 0xfb, 0xda, 0x39, 0x2c, 0xbe, 0x10,
+	0xb0, 0xa9, 0x9f, 0x3b, 0x60, 0xbf, 0x60, 0xa5, 0x2f, 0x5f, 0x03, 0xfd, 0x43, 0xb6, 0x1d, 0x3f,
+	0xba, 0xa3, 0x21, 0x8e, 0xb4, 0x65, 0x6d, 0xca, 0x95, 0x69, 0x0e, 0xc3, 0x92, 0x8d, 0x2d, 0xb6,
+	0xd5, 0xf6, 0xff, 0x45, 0xda, 0x60, 0x9f, 0xaf, 0x72, 0x7c, 0x58, 0x0f, 0xd8, 0x25, 0xfd, 0xd3,
+	0x09, 0xd7, 0x28, 0x83, 0x17, 0x67, 0xfc, 0xd3, 0x09, 0xe8, 0xf2, 0x0f, 0x53, 0x2b, 0x2d, 0x78,
+	0x66, 0xc1, 0xba, 0xe4, 0xcd, 0x50, 0xa2, 0x88, 0xca, 0x24, 0x8b, 0xa8, 0x6f, 0xb1, 0xc4, 0x6d,
+	0x10, 0x97, 0xaa, 0x25, 0xd0, 0x44, 0x53, 0x11, 0x35, 0xa8, 0x9c, 0x7e, 0x2f, 0xcd, 0xf4, 0x05,
+	0x9d, 0xc2, 0xb3, 0x72, 0xa2, 0xb8, 0x51, 0x96, 0x56, 0x6e, 0x94, 0x7d, 0xc4, 0xb6, 0x95, 0x56,
+	0x24, 0xe4, 0xaf, 0x0c, 0x4f, 0x26, 0x5b, 0x71, 0x2f, 0x12, 0x72, 0xb9, 0x4a, 0xc6, 0x1b, 0x9d,
+	0x94, 0x1e, 0x25, 0xd9, 0x73, 0x00, 0x2a, 0x17, 0x8c, 0xd6, 0x12, 0x17, 0x8c, 0xee, 0xb0, 0xc2,
+	0xa4, 0x7f, 0xea, 0x7a, 0xd3, 0x28, 0x18, 0x79, 0x21, 0x6d, 0x65, 0x6c, 0xd2, 0x3f, 0x35, 0x11,
+	0xa2, 0xef, 0xc2, 0x39, 0x81, 0xa7, 0x1f, 0xc0, 0x6f, 0xf0, 0xd5, 0x3c, 0x4f, 0x18, 0x41, 0xbe,
+	0xb2, 0x14, 0xd6, 0xd2, 0x4f, 0x52, 0xd8, 0x70, 0x47, 0x52, 0xdc, 0xfb, 0xcf, 0xde, 0xeb, 0xc1,
+	0x35, 0x4e, 0xd4, 0x4c, 0xba, 0x65, 0x15, 0x10, 0x86, 0xb9, 0xf4, 0x1e, 0xdb, 0x1c, 0xfb, 0xfe,
+	0xeb, 0xe3, 0x99, 0x92, 0x4d, 0xb3, 0x56, 0x01, 0x61, 0x48, 0xf2, 0x35, 0xb6, 0xc5, 0x6d, 0xe7,
+	0x0d, 0x89, 0x26, 0x4b, 0xfd, 0x5c, 0x04, 0x62, 0xd2, 0x7d, 0x80, 0x85, 0x96, 0xbc, 0x33, 0x16,
+	0x6f, 0x63, 0xab, 0x2e, 0x72, 0x95, 0xfe, 0x94, 0xea, 0x98, 0x98, 0x67, 0xf5, 0xa5, 0xaf, 0xdb,
+	0x8c, 0x05, 0xa7, 0xd4, 0x31, 0x09, 0xc5, 0x8e, 0x10, 0x9c, 0xf6, 0x10, 0x00, 0xe8, 0x28, 0x46,
+	0xe3, 0x1c, 0xf2, 0x91, 0x44, 0xdf, 0x60, 0xb9, 0xe0, 0xd4, 0x85, 0x0d, 0x24, 0x24, 0xe5, 0x37,
+	0x82, 0xd3, 0x0a, 0x3c, 0x72, 0xeb, 0x09, 0x14, 0x6e, 0x7b, 0x1b, 0x11, 0xa1, 0x70, 0x4c, 0x38,
+	0x06, 0xce, 0xbc, 0x21, 0x5f, 0x55, 0x3e, 0x66, 0x0d, 0x01, 0x34, 0xa6, 0x40, 0x6f, 0x88, 0x31,
+	0x05, 0x7a, 0x87, 0xe5, 0x83, 0x53, 0x3c, 0x7e, 0x84, 0x54, 0xaa, 0xe4, 0x82, 0x53, 0x93, 0x3f,
+	0x03, 0x32, 0x92, 0x48, 0xac, 0x54, 0x72, 0x91, 0x40, 0xde, 0x65, 0x9b, 0xc1, 0xa9, 0xfb, 0x2a,
+	0xe8, 0x4f, 0x3c, 0x20, 0xa1, 0x42, 0x85, 0x05, 0xa7, 0x75, 0x00, 0x99, 0xfc, 0x9a, 0x63, 0x21,
+	0x38, 0x75, 0xfd, 0x13, 0x2f, 0xe0, 0x04, 0x05, 0xa1, 0x5a, 0xf7, 0xc4, 0x0b, 0x00, 0x7f, 0x8b,
+	0x6b, 0x3e, 0x08, 0x06, 0x1c, 0xbd, 0x29, 0x06, 0xaf, 0x06, 0x03, 0xe4, 0x66, 0x03, 0x7f, 0x3c,
+	0x1e, 0x85, 0x54, 0xb7, 0xd0, 0x5e, 0x2f, 0x20, 0x0b, 0x15, 0xe2, 0xf6, 0x39, 0x2a, 0xc4, 0x8b,
+	0x8b, 0x15, 0x62, 0xe9, 0x11, 0xb6, 0xf8, 0xb1, 0x25, 0xb8, 0x50, 0xda, 0xac, 0x7a, 0x39, 0xb6,
+	0x8f, 0x71, 0x8f, 0x5d, 0x40, 0x74, 0x38, 0x2f, 0xf8, 0xbf, 0x2f, 0x1a, 0x4a, 0x3f, 0x49, 0x63,
+	0xe8, 0x28, 0xea, 0x9c, 0xa1, 0x06, 0x5f, 0x3e, 0xef, 0x55, 0x22, 0x6e, 0x72, 0x81, 0xf7, 0x4a,
+	0x06, 0x4d, 0x42, 0x9b, 0xcc, 0x97, 0x69, 0x93, 0x9d, 0x2f, 0x61, 0xbe, 0xaa, 0x5e, 0x56, 0x85,
+	0x6d, 0x92, 0xa5, 0xf8, 0x8c, 0x28, 0xb7, 0xdc, 0x59, 0xd1, 0x5c, 0x15, 0xe6, 0xb4, 0x0a, 0xf8,
+	0x6c, 0x03, 0x0f, 0x1c, 0xdb, 0xb6, 0x63, 0xcb, 0xf0, 0xc3, 0xdb, 0x97, 0xdd, 0x79, 0x3c, 0xb3,
+	0xf5, 0x9b, 0x5e, 0xd9, 0xfa, 0xcd, 0x9c, 0xb3, 0xf5, 0x7b, 0xa2, 0x2e, 0x15, 0xa4, 0xd5, 0xb7,
+	0xa0, 0x91, 0x3c, 0x4a, 0x16, 0x56, 0x6a, 0x04, 0x24, 0x78, 0xa1, 0x54, 0x7f, 0x88, 0x97, 0x86,
+	0x45, 0x85, 0x76, 0x6b, 0x05, 0x07, 0xa7, 0xc1, 0x2b, 0xc5, 0x61, 0xe9, 0xef, 0xa4, 0xd0, 0xf9,
+	0x10, 0x25, 0x37, 0x9d, 0x2b, 0x6c, 0x8d, 0xdf, 0x35, 0x14, 0x6f, 0x66, 0xf9, 0xc3, 0xc2, 0x4d,
+	0xda, 0xf4, 0xe2, 0x4d, 0x5a, 0xf0, 0x02, 0xd8, 0x19, 0xb8, 0x3c, 0xb1, 0xeb, 0xe6, 0x27, 0xfd,
+	0x53, 0x5e, 0x8d, 0x87, 0x7a, 0x31, 0xd9, 0xe4, 0xdf, 0x8a, 0x77, 0xf2, 0xef, 0xa8, 0xad, 0xa3,
+	0xc5, 0xf6, 0xc1, 0x19, 0xaf, 0xb5, 0x7e, 0x1d, 0x5f, 0x18, 0x2b, 0x6d, 0x19, 0xf4, 0xf5, 0x32,
+	0xbb, 0x44, 0x3e, 0xcb, 0x81, 0x6a, 0x18, 0x5d, 0x44, 0x44, 0xa5, 0x3f, 0xc5, 0x64, 0xae, 0x7f,
+	0x9d, 0x5d, 0xe4, 0xce, 0xab, 0x50, 0x62, 0x3c, 0x6d, 0x01, 0x58, 0xd2, 0x95, 0xfe, 0x80, 0x62,
+	0x0a, 0x07, 0x93, 0x31, 0xb5, 0x42, 0xb5, 0xb9, 0xba, 0x3d, 0x3d, 0x57, 0xb7, 0xc3, 0xa8, 0x71,
+	0x4b, 0x5c, 0x0d, 0xac, 0x2d, 0x04, 0x37, 0xa7, 0x48, 0x57, 0x62, 0x5c, 0x8d, 0x98, 0x0a, 0xa3,
+	0xab, 0x00, 0x40, 0x41, 0xf3, 0x55, 0xc5, 0xd7, 0x13, 0xc6, 0x62, 0x1b, 0x52, 0x74, 0xdd, 0x3b,
+	0xab, 0x07, 0x86, 0xfe, 0x94, 0x87, 0xdf, 0x18, 0x5d, 0xbf, 0x89, 0x6d, 0x75, 0x24, 0x39, 0xf3,
+	0xe6, 0xbe, 0x6a, 0xb9, 0xf4, 0x8a, 0x2e, 0x5c, 0xe6, 0xe7, 0xed, 0xc2, 0xfd, 0x6b, 0x72, 0x69,
+	0x24, 0x90, 0x2e, 0x4d, 0xf7, 0xd6, 0xf1, 0x9d, 0x75, 0x4a, 0xde, 0x5b, 0x6f, 0xf3, 0x97, 0xa6,
+	0xb7, 0x69, 0xd2, 0xe8, 0xf4, 0xb4, 0x4e, 0x00, 0x71, 0x96, 0x3a, 0x7e, 0x66, 0x89, 0xe3, 0x93,
+	0x7c, 0xd1, 0x3a, 0x14, 0xf2, 0xc1, 0x75, 0x24, 0x72, 0xe0, 0x8f, 0xfd, 0x80, 0x56, 0x06, 0x90,
+	0x55, 0x78, 0x2e, 0xfd, 0x58, 0x75, 0x29, 0x8c, 0xfd, 0xcf, 0x65, 0xdd, 0x95, 0x5a, 0x71, 0x83,
+	0x46, 0xb5, 0xae, 0x2c, 0xcb, 0xbe, 0x34, 0x03, 0x28, 0x6e, 0x2b, 0x32, 0xc0, 0x09, 0xbb, 0xc7,
+	0xbb, 0x86, 0x89, 0x7e, 0xa1, 0x0c, 0xbf, 0xa3, 0xe5, 0x37, 0xa4, 0x52, 0x5f, 0x52, 0x07, 0xcf,
+	0x35, 0x13, 0x45, 0xa3, 0x30, 0xa3, 0x34, 0x0a, 0xc7, 0xb8, 0x57, 0x26, 0xc6, 0xfd, 0xe5, 0x8d,
+	0x66, 0x62, 0x5b, 0xf2, 0x47, 0xc7, 0xde, 0x31, 0xd5, 0xf9, 0x34, 0x16, 0x6f, 0xea, 0x60, 0xdd,
+	0x29, 0xbc, 0x42, 0x9e, 0x9b, 0x35, 0x96, 0x89, 0x6f, 0xe4, 0xc0, 0xcf, 0x52, 0x80, 0x4a, 0x2b,
+	0x62, 0x26, 0xa3, 0xa9, 0xcb, 0xdf, 0x24, 0x54, 0x59, 0x41, 0x91, 0x4b, 0xeb, 0xb6, 0xe8, 0xb6,
+	0x0b, 0x1a, 0x60, 0xb5, 0xdb, 0x90, 0xfd, 0xdf, 0xf9, 0x57, 0x14, 0xcb, 0xc6, 0xec, 0x9f, 0xfe,
+	0x92, 0xc7, 0xfc, 0x47, 0xd4, 0xa8, 0x51, 0x38, 0x13, 0xd6, 0xff, 0x4a, 0x06, 0x3e, 0xcf, 0xe1,
+	0x6a, 0xd9, 0x5a, 0xfe, 0xed, 0x14, 0x26, 0x18, 0x4a, 0x9d, 0x7c, 0x10, 0xf0, 0x07, 0x1c, 0x2d,
+	0x4e, 0xc2, 0xfc, 0x19, 0x8f, 0x49, 0x4a, 0xdb, 0x08, 0x2f, 0x5c, 0x55, 0x12, 0xe7, 0x93, 0x55,
+	0x9d, 0xfe, 0x15, 0xfa, 0xd3, 0xd1, 0xe4, 0x21, 0x5e, 0xa7, 0x40, 0xa2, 0x43, 0x5e, 0x6b, 0x40,
+	0x14, 0xca, 0x2d, 0x6b, 0xc9, 0x45, 0xaf, 0xd2, 0x21, 0x9e, 0x57, 0x97, 0xf0, 0xcc, 0xc6, 0x6f,
+	0x97, 0x5e, 0x0d, 0xfb, 0x9c, 0xad, 0x73, 0x6a, 0xf1, 0x5d, 0xc5, 0xed, 0x55, 0x6f, 0x55, 0x39,
 	0x95, 0x45, 0xc4, 0x25, 0x73, 0xe1, 0x16, 0x15, 0xda, 0x69, 0xc5, 0x6b, 0x00, 0x69, 0xbb, 0x4c,
 	0xc2, 0x76, 0xa5, 0xb6, 0xea, 0x7c, 0xe7, 0x3b, 0xe5, 0x24, 0xc4, 0xa5, 0x93, 0xe2, 0xfe, 0x8c,
 	0x4e, 0x73, 0x8a, 0xbc, 0x5f, 0x44, 0x4e, 0xe2, 0x0c, 0x93, 0x59, 0x38, 0xc3, 0x28, 0x07, 0xa3,
@@ -9607,8 +9607,8 @@
 	0x43, 0x37, 0x96, 0x9c, 0x03, 0x26, 0xe8, 0xa0, 0x81, 0x3f, 0xf6, 0xa4, 0xb9, 0x1e, 0xb1, 0x2c,
 	0x3c, 0xaf, 0x7c, 0x67, 0x39, 0xf0, 0xa7, 0x51, 0xe0, 0x8f, 0xc7, 0x5e, 0xc0, 0xf9, 0x2c, 0x4e,
 	0x0d, 0xc3, 0x1d, 0x7a, 0x53, 0x8f, 0x06, 0x24, 0x43, 0x64, 0xad, 0xcd, 0x18, 0xd8, 0x1c, 0x96,
-	0x7e, 0x87, 0x02, 0xa2, 0x1f, 0xbe, 0x9d, 0x0e, 0xc4, 0x8e, 0xfb, 0x11, 0xdb, 0x8e, 0x6b, 0x0b,
-	0xde, 0xe3, 0xa4, 0xa6, 0x8c, 0x28, 0x2d, 0x78, 0x97, 0xf3, 0x13, 0xa6, 0x29, 0x1f, 0x3d, 0x89,
+	0x7e, 0x87, 0x02, 0xa2, 0x1f, 0xbe, 0x9d, 0x0e, 0xc4, 0x8e, 0xfb, 0x21, 0xdb, 0x8e, 0x6b, 0x0b,
+	0xde, 0xe3, 0xa4, 0xa6, 0x8c, 0x28, 0x2d, 0x78, 0x97, 0xf3, 0x63, 0xa6, 0x29, 0x1f, 0x3d, 0x89,
 	0x6b, 0x39, 0x40, 0xb7, 0x0d, 0x70, 0x9b, 0x83, 0x39, 0x65, 0x99, 0x5d, 0x4a, 0xbc, 0xc5, 0xe6,
 	0xa4, 0x58, 0xdf, 0x5d, 0x04, 0x84, 0x85, 0x70, 0x7e, 0xd5, 0xe9, 0x35, 0xdb, 0xe6, 0xfb, 0x6a,
 	0xdb, 0x1f, 0xee, 0xcd, 0x86, 0x90, 0xa9, 0xb0, 0x5d, 0x8f, 0x6f, 0x45, 0xd2, 0x23, 0xfe, 0x91,
@@ -9621,10 +9621,10 @@
 	0x21, 0xbf, 0xcb, 0xd6, 0x60, 0xc8, 0x50, 0x7f, 0xc0, 0xd6, 0x46, 0x91, 0x37, 0x11, 0xf6, 0xd9,
 	0x59, 0x3e, 0x01, 0x2a, 0x16, 0x38, 0x65, 0xe9, 0xfb, 0x6c, 0x9d, 0xdb, 0x3a, 0x84, 0x52, 0x43,
 	0x65, 0x5e, 0x65, 0x5c, 0x5e, 0xce, 0x08, 0xee, 0x27, 0x8c, 0xc9, 0xc9, 0x9e, 0x43, 0x82, 0x72,
-	0x18, 0x12, 0x12, 0x46, 0xac, 0x00, 0x12, 0xaa, 0x47, 0xfd, 0xe9, 0xa1, 0x17, 0xea, 0xdf, 0x60,
+	0x18, 0x12, 0x12, 0x46, 0xac, 0x00, 0x12, 0xaa, 0x47, 0xfd, 0xe9, 0xa1, 0x17, 0xea, 0xdf, 0x64,
 	0xeb, 0x91, 0xef, 0xf6, 0x87, 0xe2, 0x4e, 0xa8, 0x9e, 0x90, 0xc1, 0x67, 0x69, 0xad, 0x45, 0xbe,
 	0x31, 0x1c, 0xea, 0xf7, 0x59, 0x3e, 0xf2, 0xc9, 0x79, 0xc9, 0x80, 0xcb, 0xa8, 0x73, 0x91, 0x8f,
-	0x8e, 0x0c, 0x65, 0xa4, 0x26, 0xb5, 0x15, 0x03, 0x7e, 0x3a, 0x37, 0xe0, 0xf5, 0x05, 0x11, 0x38,
+	0x8e, 0x0c, 0x65, 0xa4, 0x26, 0xb5, 0x15, 0x03, 0x7e, 0x32, 0x37, 0xe0, 0xf5, 0x05, 0x11, 0x38,
 	0x39, 0x31, 0xea, 0xa3, 0xc5, 0x51, 0x57, 0xb2, 0xc8, 0xa1, 0x89, 0xeb, 0x98, 0x7b, 0x02, 0xf5,
 	0xc5, 0xcf, 0xe2, 0x42, 0x97, 0x29, 0xd9, 0x2c, 0xd7, 0xa3, 0xd0, 0x5e, 0xe6, 0x3e, 0x32, 0x19,
 	0xac, 0x74, 0x1f, 0x49, 0x61, 0xe5, 0x44, 0x8e, 0x28, 0x3d, 0x67, 0x79, 0x14, 0xda, 0x3d, 0x8e,
@@ -9676,7 +9676,7 @@
 	0xe2, 0x4c, 0xf1, 0xa4, 0xfc, 0x57, 0xe6, 0x5e, 0xd7, 0x08, 0xd3, 0xf7, 0xec, 0xc5, 0x61, 0x6d,
 	0xb7, 0xd5, 0xec, 0x3c, 0x9d, 0x1b, 0xd6, 0x96, 0xb3, 0x48, 0x53, 0x7a, 0xe5, 0x74, 0xfb, 0xa6,
 	0x96, 0x2d, 0xff, 0x69, 0x1a, 0x3f, 0x8a, 0xe1, 0xd2, 0x65, 0x9b, 0x8a, 0x18, 0xeb, 0xca, 0x00,
-	0x12, 0xf4, 0xe0, 0xb3, 0x76, 0xc5, 0x6d, 0xd4, 0x62, 0xf1, 0x04, 0xaa, 0xd7, 0xa4, 0xdf, 0x71,
+	0x12, 0xf4, 0xe0, 0xd3, 0x76, 0xc5, 0x6d, 0xd4, 0x62, 0xf1, 0x04, 0xaa, 0xd7, 0xa4, 0xdf, 0x71,
 	0x10, 0x91, 0x65, 0xe7, 0x61, 0xf5, 0x9a, 0x96, 0x13, 0xb3, 0xaf, 0xbb, 0x0f, 0x76, 0x39, 0x95,
 	0x96, 0x84, 0xd4, 0xc1, 0x1e, 0x8a, 0x78, 0x04, 0x3d, 0xd1, 0x75, 0x01, 0x7a, 0x44, 0xa0, 0x77,
 	0xe0, 0xff, 0xb1, 0x78, 0x02, 0xa6, 0xf5, 0x4b, 0x52, 0x9a, 0x83, 0x20, 0x30, 0x78, 0x01, 0x41,
@@ -9685,10 +9685,10 @@
 	0xfa, 0x25, 0x41, 0xd5, 0x33, 0xf6, 0x6c, 0x53, 0x7b, 0xf7, 0x2e, 0xa5, 0x5f, 0xe3, 0xae, 0x24,
 	0x40, 0x90, 0x33, 0xda, 0xda, 0xbb, 0x77, 0xe9, 0x72, 0x4d, 0x71, 0x1a, 0xba, 0x6a, 0xbb, 0xc5,
 	0xa3, 0xa2, 0x67, 0xb9, 0x46, 0x0d, 0xf7, 0xf0, 0x4d, 0x7c, 0xac, 0x99, 0x2d, 0xd3, 0x31, 0xb5,
-	0x54, 0x0c, 0x69, 0x77, 0x6b, 0xcd, 0xfa, 0x4b, 0x2d, 0x5d, 0xfe, 0x1c, 0x5d, 0x20, 0xfe, 0x93,
+	0x54, 0x0c, 0x69, 0x77, 0x6b, 0xcd, 0xfa, 0x4b, 0x2d, 0x5d, 0xfe, 0x0c, 0x5d, 0x20, 0xfe, 0x93,
 	0x07, 0x64, 0xd4, 0x36, 0x77, 0xfa, 0x4e, 0xcd, 0xb0, 0x40, 0x12, 0x0a, 0x6e, 0x3b, 0x6e, 0xf7,
 	0x45, 0x5b, 0x4b, 0x95, 0x5f, 0xc7, 0x7f, 0xd3, 0x80, 0xff, 0x91, 0x02, 0x92, 0xfb, 0xa2, 0x5d,
-	0x75, 0x3b, 0x2f, 0xda, 0xee, 0x67, 0x72, 0x6c, 0x01, 0x79, 0xa0, 0xa5, 0xf4, 0x1d, 0x1e, 0xfd,
+	0x75, 0x3b, 0x2f, 0xda, 0xee, 0xa7, 0x72, 0x6c, 0x01, 0x79, 0xa0, 0xa5, 0xf4, 0x1d, 0x1e, 0xfd,
 	0x00, 0xe9, 0xf6, 0xcc, 0x0e, 0x8f, 0xc0, 0x8a, 0x61, 0x37, 0xab, 0x30, 0x19, 0xfd, 0x06, 0xdf,
 	0x2d, 0x01, 0x99, 0xd8, 0x61, 0xdf, 0xbf, 0xcf, 0x94, 0xff, 0x41, 0x8e, 0x5d, 0x5e, 0xf2, 0x67,
 	0x02, 0xc8, 0xa9, 0x5f, 0x80, 0x52, 0xf5, 0x8a, 0xac, 0x4a, 0x2e, 0x50, 0x5a, 0x56, 0xe1, 0x8d,
@@ -9708,156 +9708,156 @@
 	0x6f, 0x2f, 0xc7, 0xf1, 0xc9, 0x7f, 0xa0, 0xdf, 0x66, 0x37, 0xe6, 0x65, 0x76, 0x6a, 0xae, 0x63,
 	0x58, 0xbb, 0xa6, 0xa3, 0xdd, 0x59, 0x36, 0x64, 0xa7, 0xe6, 0xda, 0xad, 0x96, 0x76, 0x77, 0x05,
 	0xce, 0x69, 0xb5, 0xb4, 0x7b, 0xb4, 0x5b, 0xcb, 0x58, 0xe9, 0xb5, 0x6c, 0x17, 0x35, 0x2d, 0xcd,
-	0xd9, 0x83, 0xa3, 0x9c, 0xaa, 0xf6, 0xe1, 0x7c, 0x78, 0x01, 0xbc, 0xd2, 0xb5, 0xb5, 0x8f, 0xe6,
-	0x10, 0xbd, 0x4a, 0xc5, 0x6d, 0xda, 0xcd, 0x9a, 0xf6, 0x31, 0x95, 0x2e, 0xd2, 0xd5, 0xf6, 0x3a,
-	0x1d, 0xb3, 0xe5, 0x36, 0x6b, 0xda, 0xd7, 0x96, 0xa9, 0x66, 0xbe, 0x70, 0x1a, 0x35, 0x4b, 0xfb,
-	0x7a, 0xf9, 0x73, 0x3c, 0xbd, 0xf0, 0x8f, 0xda, 0x47, 0x43, 0xfd, 0x22, 0x4f, 0x9a, 0xfb, 0xcd,
-	0x9a, 0xdb, 0xe9, 0x76, 0x4c, 0xbe, 0x65, 0x6d, 0x13, 0xa0, 0x67, 0x99, 0xb6, 0xd9, 0x71, 0xb4,
-	0x77, 0x77, 0xcb, 0xff, 0x31, 0x85, 0x2d, 0xc3, 0xd1, 0xec, 0xe4, 0x31, 0x7d, 0x84, 0x2d, 0x2e,
-	0xbe, 0x02, 0x75, 0xd3, 0x6c, 0x2c, 0xec, 0x49, 0x00, 0x03, 0x91, 0x2f, 0x20, 0x77, 0xe0, 0xfe,
-	0x06, 0x20, 0xd3, 0xee, 0x69, 0x69, 0x1a, 0x15, 0x9e, 0x8d, 0x3d, 0xa7, 0xa1, 0x65, 0x15, 0x40,
-	0x0d, 0x8a, 0xc0, 0x9c, 0x02, 0x80, 0x62, 0x49, 0xd3, 0x14, 0xa9, 0x56, 0x77, 0x0f, 0xf2, 0xdb,
-	0x5d, 0x45, 0x6a, 0xa3, 0xdb, 0xd3, 0x9e, 0xd0, 0xce, 0x01, 0xcf, 0x7b, 0x1d, 0xcb, 0xec, 0xc1,
-	0x36, 0xa4, 0x82, 0x6c, 0xf3, 0x19, 0x14, 0x0c, 0x3f, 0x4d, 0x27, 0xbe, 0x82, 0xa5, 0x3f, 0xcc,
-	0x05, 0x64, 0x06, 0xaf, 0xe1, 0x7b, 0x7b, 0x90, 0x09, 0x71, 0x99, 0x0c, 0x28, 0x72, 0x7b, 0x2f,
-	0x5d, 0xc7, 0x69, 0xf1, 0xf2, 0xbe, 0x40, 0xd1, 0xa2, 0xc2, 0x9b, 0x1d, 0x99, 0x0e, 0x0c, 0x2c,
-	0x4d, 0x71, 0x51, 0x9d, 0x96, 0x0c, 0x6f, 0xc3, 0x71, 0x6b, 0x66, 0x35, 0x86, 0x6b, 0x54, 0x18,
-	0x18, 0x8e, 0xdb, 0xdb, 0xb3, 0x1b, 0x3c, 0xa3, 0x69, 0x97, 0xc8, 0x98, 0x00, 0xec, 0xf6, 0x10,
-	0xa6, 0xcf, 0x11, 0x82, 0x04, 0xed, 0x72, 0x92, 0x90, 0xc3, 0xae, 0xc4, 0x84, 0xa0, 0x01, 0x2f,
-	0x9d, 0xb4, 0xab, 0x64, 0x45, 0x83, 0x8e, 0x1e, 0xda, 0x35, 0xaa, 0xad, 0x88, 0xaa, 0xf3, 0x9c,
-	0x6b, 0x73, 0x3d, 0x86, 0x82, 0x96, 0x04, 0x2d, 0x26, 0x25, 0xd6, 0x9b, 0x66, 0xab, 0xa6, 0xdd,
-	0x50, 0x86, 0x06, 0x7d, 0x7a, 0x95, 0x8a, 0x76, 0x93, 0x96, 0x86, 0xd4, 0x01, 0xd0, 0x8e, 0x5e,
-	0x14, 0xf3, 0x5e, 0xd8, 0x92, 0xf6, 0xf1, 0x6a, 0x8c, 0xd2, 0xd2, 0xa4, 0xaf, 0x9b, 0x45, 0x75,
-	0xdc, 0x6e, 0x25, 0x8e, 0xd2, 0x8c, 0x60, 0x50, 0xbc, 0xfe, 0xb7, 0xf7, 0x19, 0xda, 0xd2, 0x01,
-	0xd2, 0xe9, 0xba, 0x95, 0xbd, 0x7a, 0x9d, 0xe4, 0xfe, 0x17, 0xe1, 0xa2, 0xca, 0x17, 0x8c, 0x7c,
-	0x6d, 0xc9, 0x71, 0xd4, 0x8a, 0x18, 0xe7, 0xdb, 0x74, 0xdc, 0xdd, 0xae, 0xd3, 0xa5, 0xe3, 0x77,
-	0x8a, 0xe2, 0xa9, 0xe9, 0xb8, 0xcf, 0xad, 0xa6, 0x63, 0xaa, 0x3b, 0x1c, 0x86, 0xa0, 0xc4, 0x18,
-	0x55, 0xa7, 0xd9, 0xed, 0xd8, 0x5a, 0x26, 0x46, 0x18, 0xbd, 0x5e, 0xeb, 0xa5, 0x44, 0x64, 0x63,
-	0x44, 0xb5, 0x65, 0x1a, 0x96, 0x44, 0xac, 0x09, 0xbf, 0xa6, 0xf3, 0x8a, 0xb6, 0x4e, 0x96, 0x6a,
-	0x2e, 0xb1, 0xd4, 0x5f, 0xc7, 0x09, 0xcd, 0x7f, 0xb9, 0x48, 0x05, 0x45, 0xbd, 0x9a, 0xa8, 0x54,
-	0xea, 0x55, 0x51, 0x97, 0x88, 0x9d, 0x5a, 0x42, 0x5c, 0xdb, 0xb1, 0x9a, 0x55, 0x38, 0x9e, 0x4b,
-	0x52, 0x2a, 0x6a, 0x32, 0x31, 0x29, 0x42, 0x04, 0x69, 0xb6, 0xfc, 0xcf, 0xe8, 0x4d, 0xa9, 0x1c,
-	0x1d, 0xe3, 0x1d, 0x8d, 0x59, 0x57, 0x4b, 0x50, 0x12, 0x51, 0x77, 0x6d, 0xb3, 0x53, 0x93, 0x07,
-	0xe7, 0x58, 0x8d, 0xba, 0x5b, 0x6d, 0x98, 0xd5, 0xa7, 0x6e, 0x77, 0xdf, 0xb4, 0x5a, 0x46, 0x4f,
-	0x16, 0x0c, 0xf5, 0xba, 0x0b, 0x09, 0x06, 0x22, 0x69, 0xaf, 0xe3, 0xc4, 0x46, 0xab, 0xd7, 0x79,
-	0xa9, 0xfd, 0x54, 0x22, 0x72, 0x09, 0x44, 0xe5, 0xa5, 0x44, 0x68, 0x65, 0x1b, 0x8f, 0x3e, 0xf8,
-	0x8d, 0x39, 0xce, 0x6e, 0x77, 0xa1, 0x11, 0xb3, 0xab, 0x34, 0x62, 0x04, 0x24, 0xee, 0x9a, 0x48,
-	0x88, 0x6c, 0x84, 0x7c, 0x81, 0xef, 0x03, 0x17, 0xbe, 0x05, 0x24, 0xc3, 0xef, 0x26, 0x0d, 0xbf,
-	0xab, 0x18, 0x5e, 0x42, 0xc8, 0xbe, 0xe9, 0xb2, 0xad, 0x5e, 0x26, 0xe1, 0xee, 0x48, 0x42, 0xf0,
-	0xf4, 0x25, 0x85, 0x40, 0x90, 0xb5, 0xcc, 0x2a, 0xe4, 0x4a, 0x0c, 0x83, 0x5d, 0xf0, 0xd7, 0x5a,
-	0xd3, 0x32, 0xf9, 0xc2, 0x6d, 0xa2, 0x92, 0x8e, 0x5b, 0xaf, 0x6b, 0x99, 0x72, 0x0f, 0x1d, 0x63,
-	0xfe, 0x8b, 0x39, 0x5a, 0x1c, 0x0b, 0xac, 0xd4, 0x36, 0x9c, 0x6a, 0x43, 0xbb, 0x40, 0xee, 0x26,
-	0x1c, 0x50, 0x1e, 0xd8, 0x2c, 0x61, 0x24, 0x1e, 0xea, 0xe9, 0xf2, 0xdf, 0x4b, 0xe1, 0xbb, 0x9c,
-	0x25, 0xdf, 0xa2, 0xd1, 0x6a, 0x59, 0x96, 0xdb, 0xac, 0xb5, 0x4c, 0xd7, 0x69, 0xb6, 0xcd, 0xae,
-	0x92, 0x21, 0x2d, 0xcb, 0x6d, 0x18, 0x56, 0x4d, 0xc2, 0x85, 0x11, 0x2c, 0x59, 0x39, 0xa7, 0x63,
-	0x4a, 0x3c, 0xfa, 0x49, 0xe7, 0x93, 0x70, 0x3c, 0xbb, 0x13, 0x3c, 0x5b, 0x9e, 0xd2, 0x1f, 0x1c,
-	0xe3, 0xaf, 0xdf, 0xa9, 0x7c, 0x76, 0x7f, 0x68, 0x5a, 0x5d, 0xb9, 0xa4, 0x6d, 0x5c, 0xd2, 0x77,
-	0x3f, 0x7d, 0xbf, 0xa1, 0x5f, 0xe5, 0xb3, 0x6e, 0xbb, 0x76, 0xab, 0xfb, 0xbc, 0x67, 0x38, 0x0d,
-	0x6a, 0x7a, 0x61, 0x37, 0xac, 0xad, 0x76, 0xc3, 0xd4, 0xce, 0x57, 0x1b, 0x4f, 0xbf, 0x7c, 0xc1,
-	0x27, 0x0b, 0x5f, 0x3b, 0xa9, 0xc5, 0x7c, 0x45, 0xcd, 0x1c, 0x68, 0x4f, 0x80, 0xd1, 0x39, 0x1f,
-	0xe7, 0xc0, 0x01, 0x76, 0x15, 0xce, 0xb0, 0x6d, 0xc3, 0x7a, 0xaa, 0x89, 0xa2, 0x1c, 0xe0, 0x0b,
-	0x71, 0xfd, 0x85, 0xfa, 0xe9, 0xda, 0xa2, 0x7f, 0xb5, 0x93, 0xfe, 0xd5, 0x5e, 0xf0, 0xaf, 0xb6,
-	0xe2, 0x5f, 0x87, 0xea, 0xfd, 0x00, 0x35, 0x44, 0xdb, 0xf5, 0x44, 0x07, 0x80, 0x21, 0xe8, 0x69,
-	0xa5, 0x07, 0xa7, 0x76, 0x9a, 0x45, 0x1d, 0xa2, 0xac, 0x67, 0xcb, 0xfd, 0xb8, 0x5d, 0x77, 0x2b,
-	0x7b, 0x96, 0xed, 0xc8, 0xfd, 0xb8, 0x5d, 0x17, 0xe7, 0xf4, 0xf2, 0x3f, 0xa7, 0xeb, 0x89, 0xd8,
-	0x19, 0xe5, 0xf6, 0xc1, 0xa9, 0x9b, 0xd4, 0x24, 0x74, 0xeb, 0x46, 0xb3, 0x65, 0xc2, 0x68, 0xb8,
-	0x45, 0x9a, 0x8e, 0x5b, 0x31, 0x6a, 0xb2, 0xad, 0x23, 0x3c, 0x8f, 0xc0, 0xe4, 0x8f, 0x69, 0xaa,
-	0x94, 0x08, 0xda, 0xec, 0xd8, 0x8e, 0xb5, 0x87, 0xa8, 0x0c, 0xed, 0x3f, 0x84, 0x42, 0x87, 0xce,
-	0xc6, 0xf4, 0xa2, 0xbf, 0x26, 0xc6, 0x5d, 0xa3, 0xaa, 0xc7, 0x54, 0xfa, 0x6c, 0x02, 0xb7, 0x1e,
-	0xb3, 0x89, 0x7e, 0x9b, 0x40, 0x6d, 0xc4, 0x6c, 0xb2, 0xef, 0x26, 0x70, 0xb9, 0x98, 0x0d, 0x7b,
-	0x11, 0xdd, 0x9e, 0x40, 0xe5, 0xf5, 0x0f, 0xd8, 0x4d, 0x44, 0xd9, 0xcf, 0x9b, 0x4e, 0xb5, 0x21,
-	0x9a, 0x61, 0x84, 0x67, 0x54, 0x59, 0x9a, 0xc9, 0x76, 0x98, 0x40, 0x17, 0xe2, 0x51, 0x65, 0xdf,
-	0x4a, 0xe0, 0x36, 0xa9, 0xd3, 0x26, 0x35, 0x92, 0x5d, 0x50, 0x22, 0xd8, 0xa2, 0x3d, 0xc3, 0x5c,
-	0xe2, 0x5b, 0x15, 0xf5, 0xcf, 0x89, 0xbe, 0xea, 0x8f, 0xc6, 0xfc, 0x9a, 0x2a, 0xff, 0x6b, 0x5c,
-	0xe0, 0x8f, 0x8d, 0x7a, 0xd5, 0x6d, 0x76, 0xaa, 0xdd, 0x76, 0xcf, 0x70, 0x9a, 0xb0, 0xeb, 0x09,
-	0x2f, 0x03, 0x84, 0xd9, 0x33, 0x2d, 0x38, 0xa1, 0xfe, 0x79, 0x1a, 0xf3, 0xcb, 0x41, 0x7f, 0x28,
-	0xde, 0x50, 0xa2, 0x0c, 0x5c, 0xf0, 0x8a, 0x55, 0xe5, 0x2b, 0x42, 0xfd, 0x32, 0xd9, 0xe5, 0x10,
-	0x70, 0x5e, 0x75, 0x8b, 0xdd, 0x54, 0x00, 0x65, 0x8f, 0x52, 0x4b, 0x53, 0x13, 0x57, 0x60, 0x12,
-	0x53, 0x10, 0x1b, 0x92, 0x82, 0x44, 0x79, 0xa2, 0x33, 0x03, 0x08, 0xd4, 0x73, 0x8d, 0xe2, 0x53,
-	0x90, 0xb6, 0xcc, 0x8e, 0x3c, 0x29, 0x72, 0x18, 0x2f, 0x0d, 0x5c, 0xb3, 0xdd, 0x73, 0x5e, 0xca,
-	0xe6, 0xb0, 0x82, 0xd8, 0xeb, 0x3c, 0xed, 0x74, 0x9f, 0x77, 0xe4, 0xee, 0x22, 0xd5, 0xe7, 0x36,
-	0x6f, 0xc2, 0x12, 0xc7, 0xf3, 0x6a, 0xda, 0xae, 0xdd, 0x32, 0xf6, 0x4d, 0x8d, 0xcd, 0x4d, 0x96,
-	0x9f, 0x8d, 0x45, 0x55, 0x28, 0x81, 0xbc, 0x4d, 0xa4, 0x6d, 0xea, 0x1f, 0xb1, 0xbb, 0x04, 0x8e,
-	0x7b, 0xb4, 0x34, 0x3c, 0xec, 0x86, 0xe0, 0xc2, 0xda, 0x56, 0xf9, 0xf7, 0x33, 0x98, 0x7f, 0xc0,
-	0xde, 0x54, 0x94, 0x72, 0x73, 0xd3, 0x48, 0x86, 0x62, 0x56, 0xd1, 0x6b, 0x14, 0x40, 0x98, 0x74,
-	0x4a, 0x18, 0xd4, 0x58, 0x62, 0x50, 0x51, 0xbb, 0x28, 0x48, 0x94, 0x94, 0x99, 0x43, 0x74, 0xf7,
-	0x30, 0x36, 0xe4, 0x36, 0x2c, 0x10, 0x86, 0xb5, 0xbb, 0x07, 0xc2, 0xb4, 0x35, 0xb1, 0x04, 0x86,
-	0x58, 0x82, 0x75, 0x45, 0x45, 0xa7, 0x0b, 0x9b, 0x4e, 0x07, 0x4c, 0x8d, 0x81, 0x2e, 0xf8, 0xb1,
-	0x14, 0xcd, 0x09, 0x7f, 0x50, 0x86, 0xc3, 0x9a, 0x34, 0x4f, 0x91, 0x02, 0x18, 0x1e, 0xe4, 0xdc,
-	0x41, 0x3b, 0x76, 0xd3, 0x76, 0x60, 0x54, 0xa6, 0xdf, 0x62, 0x45, 0x42, 0xef, 0x75, 0xec, 0xbd,
-	0x1e, 0x28, 0x69, 0xd6, 0xdc, 0xae, 0x55, 0x33, 0x2d, 0xad, 0x30, 0x67, 0x0f, 0xc7, 0xd8, 0xd5,
-	0x36, 0xe7, 0x26, 0x00, 0x25, 0x06, 0x9f, 0xb2, 0x38, 0x9c, 0xab, 0x08, 0x30, 0xe0, 0xf6, 0x9c,
-	0x01, 0x79, 0x77, 0x59, 0xcc, 0xfa, 0x62, 0xf9, 0x2f, 0x52, 0xac, 0x28, 0x96, 0x47, 0x2d, 0x2e,
-	0x95, 0xb0, 0xaa, 0x34, 0xab, 0xc2, 0x9f, 0x78, 0x0e, 0x93, 0x49, 0x10, 0x11, 0xf6, 0x5e, 0x0f,
-	0xc1, 0x29, 0x85, 0x3e, 0xe1, 0x6b, 0x22, 0x0f, 0xc6, 0xf4, 0xb2, 0xfa, 0xcc, 0x50, 0xa6, 0x59,
-	0x44, 0x61, 0xff, 0x37, 0x2b, 0xb4, 0x6f, 0x2e, 0x59, 0xfe, 0xb5, 0xb9, 0x01, 0xe5, 0xf2, 0xaf,
-	0x0b, 0xc3, 0x35, 0x63, 0x47, 0xda, 0x10, 0x0b, 0xdc, 0x14, 0x0b, 0x9c, 0x2b, 0xff, 0x0b, 0xfa,
-	0xf0, 0x00, 0x26, 0x8f, 0x7d, 0x2e, 0xd5, 0x35, 0xdb, 0xcb, 0x5c, 0xb3, 0xad, 0xba, 0x66, 0x12,
-	0x06, 0xcb, 0x23, 0xe3, 0x9f, 0x60, 0xb5, 0x16, 0x6c, 0x77, 0x16, 0x35, 0xb3, 0xe7, 0x90, 0x9d,
-	0xe7, 0x0a, 0x32, 0x2b, 0x7c, 0x88, 0x90, 0xcf, 0x9b, 0xad, 0x5a, 0xd5, 0xb0, 0x6a, 0x50, 0x56,
-	0x93, 0xcf, 0x11, 0x06, 0x0f, 0x2b, 0xeb, 0x73, 0xd0, 0x7d, 0xa3, 0xb5, 0x67, 0x6a, 0x1b, 0x73,
-	0xca, 0x73, 0xd1, 0xa2, 0x63, 0x24, 0x80, 0x3d, 0xcb, 0xb4, 0xcc, 0x67, 0x5a, 0x5e, 0x91, 0x50,
-	0xdb, 0xeb, 0x91, 0x5c, 0x26, 0xec, 0xd4, 0x16, 0x76, 0x2a, 0x94, 0xff, 0x88, 0x9c, 0x24, 0x2e,
-	0x97, 0x95, 0xdc, 0x8b, 0x03, 0xd6, 0xdb, 0x75, 0xe9, 0x25, 0xb2, 0x7c, 0xe2, 0x40, 0x4a, 0xf3,
-	0x7b, 0xad, 0x96, 0xcc, 0x9b, 0x1c, 0x3e, 0xe7, 0x22, 0x8a, 0x18, 0x51, 0x4b, 0x67, 0x44, 0x41,
-	0xde, 0x96, 0xf9, 0x5b, 0x96, 0xd1, 0x52, 0x02, 0x55, 0x66, 0x6b, 0xf3, 0x88, 0x6a, 0xb7, 0xdd,
-	0x36, 0x3a, 0x60, 0x27, 0x9c, 0xbc, 0x44, 0xd4, 0x5b, 0xc6, 0xae, 0xad, 0x6d, 0x94, 0xff, 0x20,
-	0x83, 0x5f, 0xae, 0xc5, 0x95, 0xb0, 0x3a, 0x2b, 0x54, 0x74, 0x17, 0x98, 0x70, 0xc3, 0x35, 0x5f,
-	0x34, 0x6d, 0xc7, 0x96, 0xef, 0x2a, 0x38, 0x46, 0x94, 0x99, 0x18, 0xeb, 0x29, 0xf2, 0x65, 0x8e,
-	0x7a, 0x6e, 0x36, 0x77, 0x1b, 0x8e, 0x1a, 0xd4, 0x32, 0x0c, 0x38, 0x1e, 0x52, 0x44, 0xb7, 0x8e,
-	0x9c, 0x70, 0xd6, 0xc2, 0x1d, 0x53, 0x45, 0x55, 0xf6, 0x20, 0xcf, 0xc2, 0xc9, 0xe1, 0x2e, 0xbb,
-	0x25, 0x70, 0xd5, 0x86, 0xd1, 0xec, 0x34, 0x3b, 0xbb, 0x09, 0xc1, 0x6b, 0x94, 0x64, 0x70, 0x60,
-	0x9e, 0x65, 0x54, 0xf4, 0xba, 0x28, 0xc3, 0x01, 0xdd, 0xea, 0x76, 0x7b, 0x72, 0xc3, 0xd8, 0x55,
-	0x16, 0x8d, 0x26, 0x91, 0x53, 0x51, 0x7c, 0x34, 0xb3, 0x26, 0x73, 0x19, 0xfa, 0xcb, 0xae, 0xb4,
-	0x3d, 0x44, 0x86, 0x68, 0x2f, 0xee, 0xce, 0x1b, 0xbe, 0x40, 0x4e, 0x20, 0x11, 0x38, 0x21, 0x6d,
-	0x93, 0x16, 0x44, 0xc2, 0xb9, 0xc6, 0xf2, 0xdd, 0xe2, 0x6e, 0xbc, 0xd8, 0xdb, 0xe5, 0xdf, 0x25,
-	0xc7, 0x13, 0x7f, 0x29, 0x38, 0xb1, 0x44, 0xa8, 0x4d, 0x4f, 0x88, 0xa1, 0x26, 0x2f, 0x6a, 0x23,
-	0xa1, 0x0d, 0x8c, 0x31, 0x59, 0xcb, 0xf6, 0x62, 0x35, 0xf9, 0x8b, 0x52, 0xb1, 0x28, 0x12, 0x6e,
-	0xd4, 0xf6, 0x4d, 0xcb, 0x69, 0xda, 0xa6, 0x74, 0xbf, 0x9e, 0xe2, 0x7e, 0xe5, 0xbf, 0x8a, 0x4e,
-	0x23, 0xff, 0x9c, 0x76, 0x42, 0x23, 0x7a, 0x47, 0x98, 0xf0, 0x6e, 0x19, 0x0c, 0xce, 0xdc, 0xc8,
-	0xe2, 0x5d, 0x86, 0x13, 0x8b, 0x4f, 0x97, 0x7f, 0x88, 0xf3, 0xc5, 0xdb, 0x5f, 0xfe, 0x6c, 0xc9,
-	0x7c, 0x9f, 0x75, 0x93, 0xf3, 0xc5, 0x31, 0x25, 0x14, 0x37, 0x24, 0x21, 0x9b, 0x83, 0x85, 0xec,
-	0xbf, 0xc6, 0x6e, 0x2f, 0xfc, 0x61, 0xf1, 0x25, 0xea, 0xdb, 0xd5, 0x44, 0xa0, 0x88, 0x02, 0x48,
-	0x82, 0x31, 0xf5, 0xa1, 0x7c, 0x0e, 0x8c, 0x75, 0xbf, 0x35, 0x7f, 0xf7, 0x2b, 0x21, 0x9e, 0x0e,
-	0x70, 0x56, 0xbd, 0x0a, 0x75, 0x37, 0xb7, 0x8c, 0x02, 0xe2, 0x1e, 0x1b, 0x1f, 0xe1, 0x2c, 0x1a,
-	0x0d, 0xea, 0x4b, 0x2d, 0x5d, 0xfe, 0xc3, 0x34, 0xda, 0x3d, 0x3e, 0x56, 0x2c, 0xa6, 0xa0, 0x76,
-	0x32, 0x05, 0x61, 0x04, 0x73, 0x20, 0x56, 0xa1, 0x14, 0xc1, 0x29, 0x5a, 0xf1, 0xb6, 0x1a, 0xc1,
-	0xd8, 0xaf, 0x48, 0xab, 0x28, 0x11, 0x17, 0x88, 0x12, 0x15, 0x45, 0x7b, 0xde, 0xcd, 0xb3, 0x64,
-	0xb6, 0x76, 0x32, 0xbf, 0x88, 0xa4, 0x2d, 0xc1, 0x96, 0xe1, 0x98, 0x32, 0x19, 0xb5, 0xe3, 0x98,
-	0xb0, 0xf8, 0xdb, 0xfd, 0x39, 0xe2, 0x0a, 0x48, 0xce, 0xd1, 0x76, 0x91, 0x80, 0xba, 0x35, 0xd3,
-	0x31, 0x9a, 0x2d, 0x2d, 0xaf, 0xaa, 0x4a, 0x19, 0x83, 0x6b, 0x6a, 0x6b, 0x4c, 0x9d, 0xba, 0x48,
-	0x26, 0x46, 0xa7, 0x66, 0x6b, 0x85, 0xf2, 0xbf, 0x4c, 0x2d, 0xf9, 0x16, 0x31, 0x5c, 0xe6, 0xc4,
-	0xf5, 0x39, 0x27, 0xa6, 0xf7, 0xd6, 0x02, 0x2c, 0x77, 0x70, 0xb1, 0x62, 0x31, 0x03, 0x64, 0x05,
-	0x79, 0x59, 0xa2, 0xae, 0x78, 0x4d, 0x66, 0x5e, 0x88, 0xac, 0x43, 0xb2, 0x22, 0x16, 0xea, 0xd2,
-	0x9f, 0xd6, 0xca, 0xff, 0x99, 0x76, 0xe7, 0xe4, 0x5f, 0x2a, 0x10, 0xc7, 0x3d, 0x38, 0x69, 0xdb,
-	0xd5, 0xf8, 0xf8, 0xc7, 0xef, 0x8f, 0x3c, 0x97, 0xef, 0xa6, 0xdb, 0x3d, 0xd7, 0xd8, 0xdd, 0xb5,
-	0xcc, 0x5d, 0x83, 0x1f, 0xd2, 0xe9, 0xc4, 0x27, 0x6e, 0xa3, 0x64, 0x84, 0xc1, 0x7b, 0xc9, 0xb7,
-	0xb8, 0x92, 0x0c, 0xc3, 0x68, 0x2d, 0x06, 0x60, 0x0a, 0x5c, 0x8f, 0xf9, 0xc4, 0x69, 0xdf, 0xae,
-	0x6a, 0x1b, 0xc2, 0xe0, 0x02, 0x2a, 0xce, 0x34, 0xb2, 0xd3, 0xdb, 0xee, 0x91, 0x1b, 0xe5, 0xc5,
-	0x91, 0x9a, 0x00, 0x22, 0x19, 0xb0, 0x58, 0x04, 0xc2, 0xa5, 0x88, 0x42, 0x8c, 0x49, 0x1e, 0x98,
-	0xe4, 0x15, 0x0d, 0x31, 0x09, 0xae, 0x8b, 0x38, 0x3e, 0xb5, 0x7b, 0xcb, 0x8e, 0xe6, 0x3b, 0x4b,
-	0xff, 0x42, 0x85, 0x2b, 0xbe, 0xb6, 0x47, 0xc6, 0x3a, 0x9c, 0xe7, 0x16, 0x5e, 0xf3, 0x0a, 0x78,
-	0xbb, 0x6b, 0x99, 0x5a, 0xaa, 0xdc, 0xa2, 0x78, 0x4c, 0xfe, 0xd5, 0x09, 0x92, 0x24, 0x34, 0xae,
-	0xe3, 0xdd, 0x06, 0x45, 0x16, 0xb9, 0xbf, 0xc4, 0x90, 0xb4, 0x3f, 0xcb, 0xa0, 0x6a, 0x2b, 0xbe,
-	0xc7, 0x96, 0x7e, 0xd3, 0x73, 0xd4, 0x53, 0x34, 0x24, 0x27, 0xdc, 0xf9, 0x16, 0x30, 0x6e, 0xbb,
-	0x69, 0xdb, 0xb2, 0x22, 0xe5, 0xe8, 0x8e, 0xf9, 0x82, 0xce, 0x9c, 0xb6, 0x96, 0xa6, 0xba, 0x7b,
-	0x1e, 0x81, 0x6c, 0x19, 0x71, 0x1f, 0x01, 0xb0, 0xc9, 0xa6, 0x68, 0x96, 0xf6, 0xf8, 0x45, 0x14,
-	0xb2, 0xae, 0xa9, 0xac, 0xc9, 0xb6, 0xe9, 0xba, 0xca, 0x9a, 0x40, 0x21, 0xeb, 0x86, 0x8c, 0x81,
-	0x9e, 0x43, 0x0d, 0x81, 0x9c, 0x0c, 0x46, 0x18, 0x4d, 0x16, 0x84, 0x4c, 0x5c, 0x30, 0x89, 0x95,
-	0xb0, 0x4d, 0x07, 0xcb, 0x37, 0x71, 0xbe, 0x5e, 0x82, 0xc3, 0x61, 0xb6, 0x54, 0x66, 0x54, 0x43,
-	0x32, 0x6f, 0xab, 0xcc, 0x49, 0x1c, 0x32, 0x5f, 0xd4, 0x6f, 0xc6, 0x2b, 0x91, 0xf0, 0xaf, 0x9f,
-	0xbd, 0xcf, 0xe8, 0x77, 0xe2, 0xb5, 0x50, 0x71, 0xc8, 0x0a, 0x0e, 0xf8, 0x7b, 0xf4, 0x27, 0x3a,
-	0xb0, 0xe4, 0x4a, 0xdc, 0xc8, 0xa0, 0xbe, 0x60, 0xbd, 0xba, 0x70, 0x7b, 0x05, 0x60, 0xd8, 0x3e,
-	0xa4, 0xa2, 0x4a, 0x4b, 0x89, 0x6a, 0x29, 0xc6, 0xb4, 0x9a, 0xfb, 0x66, 0xc7, 0xb4, 0xe3, 0xeb,
-	0x19, 0xbb, 0x4a, 0xb1, 0xa4, 0x65, 0x15, 0x06, 0x59, 0x41, 0xf1, 0xbe, 0xad, 0xad, 0xe5, 0xca,
-	0xaf, 0xb1, 0x21, 0x10, 0x5f, 0x79, 0xc7, 0x5b, 0xee, 0x62, 0x0b, 0x55, 0x1b, 0x64, 0xa8, 0xe5,
-	0x33, 0xc7, 0x6d, 0x37, 0x3b, 0x98, 0xd1, 0x53, 0x0a, 0xcc, 0x78, 0x81, 0xb0, 0x34, 0xc5, 0xe0,
-	0xb3, 0x25, 0x2d, 0x8c, 0x1f, 0xe1, 0x69, 0x78, 0xee, 0xce, 0x33, 0xf9, 0x69, 0xd5, 0xc2, 0x7e,
-	0x4a, 0xa7, 0x5b, 0x6d, 0x18, 0x9d, 0x5d, 0x53, 0x36, 0xf3, 0x05, 0xc2, 0x7c, 0xb6, 0x67, 0xb4,
-	0xe4, 0x05, 0x35, 0x01, 0x6d, 0x1b, 0x36, 0xee, 0x5e, 0x49, 0x62, 0x3c, 0xd3, 0x67, 0x2a, 0x7b,
-	0xec, 0x03, 0x3f, 0x38, 0xe4, 0xb7, 0x00, 0x07, 0x7e, 0x30, 0xfc, 0x14, 0xff, 0x17, 0x19, 0x79,
-	0x2b, 0xf0, 0xc1, 0xe7, 0x3f, 0xfc, 0xfc, 0x70, 0x14, 0x1d, 0x1d, 0x1f, 0x7c, 0x3a, 0xf0, 0x27,
-	0xf7, 0x05, 0xd9, 0x7d, 0x24, 0xfb, 0x15, 0xfa, 0xcf, 0x66, 0x4e, 0x1e, 0xdd, 0x3f, 0xf4, 0xd5,
-	0xff, 0x72, 0xe6, 0x60, 0x9d, 0x63, 0x3e, 0xff, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x5b, 0x23,
-	0xc1, 0x5b, 0x96, 0x66, 0x00, 0x00,
+	0xd9, 0x83, 0xa3, 0x9c, 0xaa, 0xf6, 0xb5, 0xf9, 0xf0, 0x02, 0x78, 0xa5, 0x6b, 0x6b, 0x1f, 0xce,
+	0x21, 0x7a, 0x95, 0x8a, 0xdb, 0xb4, 0x9b, 0x35, 0xed, 0x23, 0x2a, 0x5d, 0xa4, 0xab, 0xed, 0x75,
+	0x3a, 0x66, 0xcb, 0x6d, 0xd6, 0xb4, 0xaf, 0x2f, 0x53, 0xcd, 0x7c, 0xe1, 0x34, 0x6a, 0x96, 0xf6,
+	0x8d, 0xf2, 0x67, 0x78, 0x7a, 0xe1, 0x1f, 0xb5, 0x8f, 0x86, 0xfa, 0x45, 0x9e, 0x34, 0xf7, 0x9b,
+	0x35, 0xb7, 0xd3, 0xed, 0x98, 0x7c, 0xcb, 0xda, 0x26, 0x40, 0xcf, 0x32, 0x6d, 0xb3, 0xe3, 0x68,
+	0xef, 0xee, 0x96, 0xff, 0x63, 0x0a, 0x5b, 0x86, 0xa3, 0xd9, 0xc9, 0x63, 0xfa, 0x08, 0x5b, 0x5c,
+	0x7c, 0x05, 0xea, 0xa6, 0xd9, 0x58, 0xd8, 0x93, 0x00, 0x06, 0x22, 0x5f, 0x40, 0xee, 0xc0, 0xfd,
+	0x0d, 0x40, 0xa6, 0xdd, 0xd3, 0xd2, 0x34, 0x2a, 0x3c, 0x1b, 0x7b, 0x4e, 0x43, 0xcb, 0x2a, 0x80,
+	0x1a, 0x14, 0x81, 0x39, 0x05, 0x00, 0xc5, 0x92, 0xa6, 0x29, 0x52, 0xad, 0xee, 0x1e, 0xe4, 0xb7,
+	0xbb, 0x8a, 0xd4, 0x46, 0xb7, 0xa7, 0x3d, 0xa1, 0x9d, 0x03, 0x9e, 0xf7, 0x3a, 0x96, 0xd9, 0x83,
+	0x6d, 0x48, 0x05, 0xd9, 0xe6, 0x33, 0x28, 0x18, 0x7e, 0x9a, 0x4e, 0x7c, 0x05, 0x4b, 0x7f, 0x98,
+	0x0b, 0xc8, 0x0c, 0x5e, 0xc3, 0xf7, 0xf6, 0x20, 0x13, 0xe2, 0x32, 0x19, 0x50, 0xe4, 0xf6, 0x5e,
+	0xba, 0x8e, 0xd3, 0xe2, 0xe5, 0x7d, 0x81, 0xa2, 0x45, 0x85, 0x37, 0x3b, 0x32, 0x1d, 0x18, 0x58,
+	0x9a, 0xe2, 0xa2, 0x3a, 0x2d, 0x19, 0xde, 0x86, 0xe3, 0xd6, 0xcc, 0x6a, 0x0c, 0xd7, 0xa8, 0x30,
+	0x30, 0x1c, 0xb7, 0xb7, 0x67, 0x37, 0x78, 0x46, 0xd3, 0x2e, 0x91, 0x31, 0x01, 0xd8, 0xed, 0x21,
+	0x4c, 0x9f, 0x23, 0x04, 0x09, 0xda, 0xe5, 0x24, 0x21, 0x87, 0x5d, 0x89, 0x09, 0x41, 0x03, 0x5e,
+	0x3a, 0x69, 0x57, 0xc9, 0x8a, 0x06, 0x1d, 0x3d, 0xb4, 0x6b, 0x54, 0x5b, 0x11, 0x55, 0xe7, 0x39,
+	0xd7, 0xe6, 0x7a, 0x0c, 0x05, 0x2d, 0x09, 0x5a, 0x4c, 0x4a, 0xac, 0x37, 0xcd, 0x56, 0x4d, 0xbb,
+	0xa1, 0x0c, 0x0d, 0xfa, 0xf4, 0x2a, 0x15, 0xed, 0x26, 0x2d, 0x0d, 0xa9, 0x03, 0xa0, 0x1d, 0xbd,
+	0x28, 0xe6, 0xbd, 0xb0, 0x25, 0xed, 0xe3, 0xd5, 0x18, 0xa5, 0xa5, 0x49, 0x5f, 0x37, 0x8b, 0xea,
+	0xb8, 0xdd, 0x4a, 0x1c, 0xa5, 0x19, 0xc1, 0xa0, 0x78, 0xfd, 0x6f, 0xef, 0x33, 0xb4, 0xa5, 0x03,
+	0xa4, 0xd3, 0x75, 0x2b, 0x7b, 0xf5, 0x3a, 0xc9, 0xfd, 0x2f, 0xc2, 0x45, 0x95, 0x2f, 0x18, 0xf9,
+	0xda, 0x92, 0xe3, 0xa8, 0x15, 0x31, 0xce, 0xb7, 0xe9, 0xb8, 0xbb, 0x5d, 0xa7, 0x4b, 0xc7, 0xef,
+	0x14, 0xc5, 0x53, 0xd3, 0x71, 0x9f, 0x5b, 0x4d, 0xc7, 0x54, 0x77, 0x38, 0x0c, 0x41, 0x89, 0x31,
+	0xaa, 0x4e, 0xb3, 0xdb, 0xb1, 0xb5, 0x4c, 0x8c, 0x30, 0x7a, 0xbd, 0xd6, 0x4b, 0x89, 0xc8, 0xc6,
+	0x88, 0x6a, 0xcb, 0x34, 0x2c, 0x89, 0x58, 0x13, 0x7e, 0x4d, 0xe7, 0x15, 0x6d, 0x9d, 0x2c, 0xd5,
+	0x5c, 0x62, 0xa9, 0xbf, 0x8e, 0x13, 0x9a, 0xff, 0x72, 0x91, 0x0a, 0x8a, 0x7a, 0x35, 0x51, 0xa9,
+	0xd4, 0xab, 0xa2, 0x2e, 0x11, 0x3b, 0xb5, 0x84, 0xb8, 0xb6, 0x63, 0x35, 0xab, 0x70, 0x3c, 0x97,
+	0xa4, 0x54, 0xd4, 0x64, 0x62, 0x52, 0x84, 0x08, 0xd2, 0x6c, 0xf9, 0x9f, 0xd1, 0x9b, 0x52, 0x39,
+	0x3a, 0xc6, 0x3b, 0x1a, 0xb3, 0xae, 0x96, 0xa0, 0x24, 0xa2, 0xee, 0xda, 0x66, 0xa7, 0x26, 0x0f,
+	0xce, 0xb1, 0x1a, 0x75, 0xb7, 0xda, 0x30, 0xab, 0x4f, 0xdd, 0xee, 0xbe, 0x69, 0xb5, 0x8c, 0x9e,
+	0x2c, 0x18, 0xea, 0x75, 0x17, 0x12, 0x0c, 0x44, 0xd2, 0x5e, 0xc7, 0x89, 0x8d, 0x56, 0xaf, 0xf3,
+	0x52, 0xfb, 0xa9, 0x44, 0xe4, 0x12, 0x88, 0xca, 0x4b, 0x89, 0xd0, 0xca, 0x36, 0x1e, 0x7d, 0xf0,
+	0x1b, 0x73, 0x9c, 0xdd, 0xee, 0x42, 0x23, 0x66, 0x57, 0x69, 0xc4, 0x08, 0x48, 0xdc, 0x35, 0x91,
+	0x10, 0xd9, 0x08, 0xf9, 0x02, 0xdf, 0x07, 0x2e, 0x7c, 0x0b, 0x48, 0x86, 0xdf, 0x4d, 0x1a, 0x7e,
+	0x57, 0x31, 0xbc, 0x84, 0x90, 0x7d, 0xd3, 0x65, 0x5b, 0xbd, 0x4c, 0xc2, 0xdd, 0x91, 0x84, 0xe0,
+	0xe9, 0x4b, 0x0a, 0x81, 0x20, 0x6b, 0x99, 0x55, 0xc8, 0x95, 0x18, 0x06, 0xbb, 0xe0, 0xaf, 0xb5,
+	0xa6, 0x65, 0xf2, 0x85, 0xdb, 0x44, 0x25, 0x1d, 0xb7, 0x5e, 0xd7, 0x32, 0xe5, 0x1e, 0x3a, 0xc6,
+	0xfc, 0x17, 0x73, 0xb4, 0x38, 0x16, 0x58, 0xa9, 0x6d, 0x38, 0xd5, 0x86, 0x76, 0x81, 0xdc, 0x4d,
+	0x38, 0xa0, 0x3c, 0xb0, 0x59, 0xc2, 0x48, 0x3c, 0xd4, 0xd3, 0xe5, 0xbf, 0x97, 0xc2, 0x77, 0x39,
+	0x4b, 0xbe, 0x45, 0xa3, 0xd5, 0xb2, 0x2c, 0xb7, 0x59, 0x6b, 0x99, 0xae, 0xd3, 0x6c, 0x9b, 0x5d,
+	0x25, 0x43, 0x5a, 0x96, 0xdb, 0x30, 0xac, 0x9a, 0x84, 0x0b, 0x23, 0x58, 0xb2, 0x72, 0x4e, 0xc7,
+	0x94, 0x78, 0xf4, 0x93, 0xce, 0x27, 0xe1, 0x78, 0x76, 0x27, 0x78, 0xb6, 0x3c, 0xa5, 0x3f, 0x38,
+	0xc6, 0x5f, 0xbf, 0x53, 0xf9, 0xec, 0xfe, 0xd0, 0xb4, 0xba, 0x72, 0x49, 0xdb, 0xb8, 0xa4, 0xef,
+	0x7e, 0xfa, 0x7e, 0x43, 0xbf, 0xca, 0x67, 0xdd, 0x76, 0xed, 0x56, 0xf7, 0x79, 0xcf, 0x70, 0x1a,
+	0xd4, 0xf4, 0xc2, 0x6e, 0x58, 0x5b, 0xed, 0x86, 0xa9, 0x9d, 0xaf, 0x36, 0x9e, 0x7e, 0xf9, 0x82,
+	0x4f, 0x16, 0xbe, 0x76, 0x52, 0x8b, 0xf9, 0x8a, 0x9a, 0x39, 0xd0, 0x9e, 0x00, 0xa3, 0x73, 0x3e,
+	0xce, 0x81, 0x03, 0xec, 0x2a, 0x9c, 0x61, 0xdb, 0x86, 0xf5, 0x54, 0x13, 0x45, 0x39, 0xc0, 0x17,
+	0xe2, 0xfa, 0x0b, 0xf5, 0xd3, 0xb5, 0x45, 0xff, 0x6a, 0x27, 0xfd, 0xab, 0xbd, 0xe0, 0x5f, 0x6d,
+	0xc5, 0xbf, 0x0e, 0xd5, 0xfb, 0x01, 0x6a, 0x88, 0xb6, 0xeb, 0x89, 0x0e, 0x00, 0x43, 0xd0, 0xd3,
+	0x4a, 0x0f, 0x4e, 0xed, 0x34, 0x8b, 0x3a, 0x44, 0x59, 0xcf, 0x96, 0xfb, 0x71, 0xbb, 0xee, 0x56,
+	0xf6, 0x2c, 0xdb, 0x91, 0xfb, 0x71, 0xbb, 0x2e, 0xce, 0xe9, 0xe5, 0x7f, 0x4e, 0xd7, 0x13, 0xb1,
+	0x33, 0xca, 0xed, 0x83, 0x53, 0x37, 0xa9, 0x49, 0xe8, 0xd6, 0x8d, 0x66, 0xcb, 0x84, 0xd1, 0x70,
+	0x8b, 0x34, 0x1d, 0xb7, 0x62, 0xd4, 0x64, 0x5b, 0x47, 0x78, 0x1e, 0x81, 0xc9, 0x1f, 0xd3, 0x54,
+	0x29, 0x11, 0xb4, 0xd9, 0xb1, 0x1d, 0x6b, 0x0f, 0x51, 0x19, 0xda, 0x7f, 0x08, 0x85, 0x0e, 0x9d,
+	0x8d, 0xe9, 0x45, 0x7f, 0x4d, 0x8c, 0xbb, 0x46, 0x55, 0x8f, 0xa9, 0xf4, 0xd9, 0x04, 0x6e, 0x3d,
+	0x66, 0x13, 0xfd, 0x36, 0x81, 0xda, 0x88, 0xd9, 0x64, 0xdf, 0x4d, 0xe0, 0x72, 0x31, 0x1b, 0xf6,
+	0x22, 0xba, 0x3d, 0x81, 0xca, 0xeb, 0x1f, 0xb0, 0x9b, 0x88, 0xb2, 0x9f, 0x37, 0x9d, 0x6a, 0x43,
+	0x34, 0xc3, 0x08, 0xcf, 0xa8, 0xb2, 0x34, 0x93, 0xed, 0x30, 0x81, 0x2e, 0xc4, 0xa3, 0xca, 0xbe,
+	0x95, 0xc0, 0x6d, 0x52, 0xa7, 0x4d, 0x6a, 0x24, 0xbb, 0xa0, 0x44, 0xb0, 0x45, 0x7b, 0x86, 0xb9,
+	0xc4, 0xb7, 0x2a, 0xea, 0x9f, 0x13, 0x7d, 0xd5, 0x1f, 0x8d, 0xf9, 0x35, 0x55, 0xfe, 0xd7, 0xb8,
+	0xc0, 0x1f, 0x1b, 0xf5, 0xaa, 0xdb, 0xec, 0x54, 0xbb, 0xed, 0x9e, 0xe1, 0x34, 0x61, 0xd7, 0x13,
+	0x5e, 0x06, 0x08, 0xb3, 0x67, 0x5a, 0x70, 0x42, 0xfd, 0xf3, 0x34, 0xe6, 0x97, 0x83, 0xfe, 0x50,
+	0xbc, 0xa1, 0x44, 0x19, 0xb8, 0xe0, 0x15, 0xab, 0xca, 0x57, 0x84, 0xfa, 0x65, 0xb2, 0xcb, 0x21,
+	0xe0, 0xbc, 0xea, 0x16, 0xbb, 0xa9, 0x00, 0xca, 0x1e, 0xa5, 0x96, 0xa6, 0x26, 0xae, 0xc0, 0x24,
+	0xa6, 0x20, 0x36, 0x24, 0x05, 0x89, 0xf2, 0x44, 0x67, 0x06, 0x10, 0xa8, 0xe7, 0x1a, 0xc5, 0xa7,
+	0x20, 0x6d, 0x99, 0x1d, 0x79, 0x52, 0xe4, 0x30, 0x5e, 0x1a, 0xb8, 0x66, 0xbb, 0xe7, 0xbc, 0x94,
+	0xcd, 0x61, 0x05, 0xb1, 0xd7, 0x79, 0xda, 0xe9, 0x3e, 0xef, 0xc8, 0xdd, 0x45, 0xaa, 0xcf, 0x6d,
+	0xde, 0x84, 0x25, 0x8e, 0xe7, 0xd5, 0xb4, 0x5d, 0xbb, 0x65, 0xec, 0x9b, 0x1a, 0x9b, 0x9b, 0x2c,
+	0x3f, 0x1b, 0x8b, 0xaa, 0x50, 0x02, 0x79, 0x9b, 0x48, 0xdb, 0xd4, 0x3f, 0x64, 0x77, 0x09, 0x1c,
+	0xf7, 0x68, 0x69, 0x78, 0xd8, 0x0d, 0xc1, 0x85, 0xb5, 0xad, 0xf2, 0xef, 0x67, 0x30, 0xff, 0x80,
+	0xbd, 0xa9, 0x28, 0xe5, 0xe6, 0xa6, 0x91, 0x0c, 0xc5, 0xac, 0xa2, 0xd7, 0x28, 0x80, 0x30, 0xe9,
+	0x94, 0x30, 0xa8, 0xb1, 0xc4, 0xa0, 0xa2, 0x76, 0x51, 0x90, 0x28, 0x29, 0x33, 0x87, 0xe8, 0xee,
+	0x61, 0x6c, 0xc8, 0x6d, 0x58, 0x20, 0x0c, 0x6b, 0x77, 0x0f, 0x84, 0x69, 0x6b, 0x62, 0x09, 0x0c,
+	0xb1, 0x04, 0xeb, 0x8a, 0x8a, 0x4e, 0x17, 0x36, 0x9d, 0x0e, 0x98, 0x1a, 0x03, 0x5d, 0xf0, 0x63,
+	0x29, 0x9a, 0x13, 0xfe, 0xa0, 0x0c, 0x87, 0x35, 0x69, 0x9e, 0x22, 0x05, 0x30, 0x3c, 0xc8, 0xb9,
+	0x83, 0x76, 0xec, 0xa6, 0xed, 0xc0, 0xa8, 0x4c, 0xbf, 0xc5, 0x8a, 0x84, 0xde, 0xeb, 0xd8, 0x7b,
+	0x3d, 0x50, 0xd2, 0xac, 0xb9, 0x5d, 0xab, 0x66, 0x5a, 0x5a, 0x61, 0xce, 0x1e, 0x8e, 0xb1, 0xab,
+	0x6d, 0xce, 0x4d, 0x00, 0x4a, 0x0c, 0x3e, 0x65, 0x71, 0x38, 0x57, 0x11, 0x60, 0xc0, 0xed, 0x39,
+	0x03, 0xf2, 0xee, 0xb2, 0x98, 0xf5, 0xc5, 0xf2, 0x5f, 0xa4, 0x58, 0x51, 0x2c, 0x8f, 0x5a, 0x5c,
+	0x2a, 0x61, 0x55, 0x69, 0x56, 0x85, 0x3f, 0xf1, 0x1c, 0x26, 0x93, 0x20, 0x22, 0xec, 0xbd, 0x1e,
+	0x82, 0x53, 0x0a, 0x7d, 0xc2, 0xd7, 0x44, 0x1e, 0x8c, 0xe9, 0x65, 0xf5, 0x99, 0xa1, 0x4c, 0xb3,
+	0x88, 0xc2, 0xfe, 0x6f, 0x56, 0x68, 0xdf, 0x5c, 0xb2, 0xfc, 0x6b, 0x73, 0x03, 0xca, 0xe5, 0x5f,
+	0x17, 0x86, 0x6b, 0xc6, 0x8e, 0xb4, 0x21, 0x16, 0xb8, 0x29, 0x16, 0x38, 0x57, 0xfe, 0x17, 0xf4,
+	0xe1, 0x01, 0x4c, 0x1e, 0xfb, 0x5c, 0xaa, 0x6b, 0xb6, 0x97, 0xb9, 0x66, 0x5b, 0x75, 0xcd, 0x24,
+	0x0c, 0x96, 0x47, 0xc6, 0x3f, 0xc1, 0x6a, 0x2d, 0xd8, 0xee, 0x2c, 0x6a, 0x66, 0xcf, 0x21, 0x3b,
+	0xcf, 0x15, 0x64, 0x56, 0xf8, 0x10, 0x21, 0x9f, 0x37, 0x5b, 0xb5, 0xaa, 0x61, 0xd5, 0xa0, 0xac,
+	0x26, 0x9f, 0x23, 0x0c, 0x1e, 0x56, 0xd6, 0xe7, 0xa0, 0xfb, 0x46, 0x6b, 0xcf, 0xd4, 0x36, 0xe6,
+	0x94, 0xe7, 0xa2, 0x45, 0xc7, 0x48, 0x00, 0x7b, 0x96, 0x69, 0x99, 0xcf, 0xb4, 0xbc, 0x22, 0xa1,
+	0xb6, 0xd7, 0x23, 0xb9, 0x4c, 0xd8, 0xa9, 0x2d, 0xec, 0x54, 0x28, 0xff, 0x11, 0x39, 0x49, 0x5c,
+	0x2e, 0x2b, 0xb9, 0x17, 0x07, 0xac, 0xb7, 0xeb, 0xd2, 0x4b, 0x64, 0xf9, 0xc4, 0x81, 0x94, 0xe6,
+	0xf7, 0x5a, 0x2d, 0x99, 0x37, 0x39, 0x7c, 0xce, 0x45, 0x14, 0x31, 0xa2, 0x96, 0xce, 0x88, 0x82,
+	0xbc, 0x2d, 0xf3, 0xb7, 0x2c, 0xa3, 0xa5, 0x04, 0xaa, 0xcc, 0xd6, 0xe6, 0x11, 0xd5, 0x6e, 0xbb,
+	0x6d, 0x74, 0xc0, 0x4e, 0x38, 0x79, 0x89, 0xa8, 0xb7, 0x8c, 0x5d, 0x5b, 0xdb, 0x28, 0xff, 0x41,
+	0x06, 0xbf, 0x5c, 0x8b, 0x2b, 0x61, 0x75, 0x56, 0xa8, 0xe8, 0x2e, 0x30, 0xe1, 0x86, 0x6b, 0xbe,
+	0x68, 0xda, 0x8e, 0x2d, 0xdf, 0x55, 0x70, 0x8c, 0x28, 0x33, 0x31, 0xd6, 0x53, 0xe4, 0xcb, 0x1c,
+	0xf5, 0xdc, 0x6c, 0xee, 0x36, 0x1c, 0x35, 0xa8, 0x65, 0x18, 0x70, 0x3c, 0xa4, 0x88, 0x6e, 0x1d,
+	0x39, 0xe1, 0xac, 0x85, 0x3b, 0xa6, 0x8a, 0xaa, 0xec, 0x41, 0x9e, 0x85, 0x93, 0xc3, 0x5d, 0x76,
+	0x4b, 0xe0, 0xaa, 0x0d, 0xa3, 0xd9, 0x69, 0x76, 0x76, 0x13, 0x82, 0xd7, 0x28, 0xc9, 0xe0, 0xc0,
+	0x3c, 0xcb, 0xa8, 0xe8, 0x75, 0x51, 0x86, 0x03, 0xba, 0xd5, 0xed, 0xf6, 0xe4, 0x86, 0xb1, 0xab,
+	0x2c, 0x1a, 0x4d, 0x22, 0xa7, 0xa2, 0xf8, 0x68, 0x66, 0x4d, 0xe6, 0x32, 0xf4, 0x97, 0x5d, 0x69,
+	0x7b, 0x88, 0x0c, 0xd1, 0x5e, 0xdc, 0x9d, 0x37, 0x7c, 0x81, 0x9c, 0x40, 0x22, 0x70, 0x42, 0xda,
+	0x26, 0x2d, 0x88, 0x84, 0x73, 0x8d, 0xe5, 0xbb, 0xc5, 0xdd, 0x78, 0xb1, 0xb7, 0xcb, 0xbf, 0x4b,
+	0x8e, 0x27, 0xfe, 0x52, 0x70, 0x62, 0x89, 0x50, 0x9b, 0x9e, 0x10, 0x43, 0x4d, 0x5e, 0xd4, 0x46,
+	0x42, 0x1b, 0x18, 0x63, 0xb2, 0x96, 0xed, 0xc5, 0x6a, 0xf2, 0x17, 0xa5, 0x62, 0x51, 0x24, 0xdc,
+	0xa8, 0xed, 0x9b, 0x96, 0xd3, 0xb4, 0x4d, 0xe9, 0x7e, 0x3d, 0xc5, 0xfd, 0xca, 0x7f, 0x15, 0x9d,
+	0x46, 0xfe, 0x39, 0xed, 0x84, 0x46, 0xf4, 0x8e, 0x30, 0xe1, 0xdd, 0x32, 0x18, 0x9c, 0xb9, 0x91,
+	0xc5, 0xbb, 0x0c, 0x27, 0x16, 0x9f, 0x2e, 0xff, 0x10, 0xe7, 0x8b, 0xb7, 0xbf, 0xfc, 0xd9, 0x92,
+	0xf9, 0x3e, 0xeb, 0x26, 0xe7, 0x8b, 0x63, 0x4a, 0x28, 0x6e, 0x48, 0x42, 0x36, 0x07, 0x0b, 0xd9,
+	0x7f, 0x8d, 0xdd, 0x5e, 0xf8, 0xc3, 0xe2, 0x4b, 0xd4, 0xb7, 0xab, 0x89, 0x40, 0x11, 0x05, 0x90,
+	0x04, 0x63, 0xea, 0x43, 0xf9, 0x1c, 0x18, 0xeb, 0x7e, 0x6b, 0xfe, 0xee, 0x57, 0x42, 0x3c, 0x1d,
+	0xe0, 0xac, 0x7a, 0x15, 0xea, 0x6e, 0x6e, 0x19, 0x05, 0xc4, 0x3d, 0x36, 0x3e, 0xc2, 0x59, 0x34,
+	0x1a, 0xd4, 0x97, 0x5a, 0xba, 0xfc, 0x87, 0x69, 0xb4, 0x7b, 0x7c, 0xac, 0x58, 0x4c, 0x41, 0xed,
+	0x64, 0x0a, 0xc2, 0x08, 0xe6, 0x40, 0xac, 0x42, 0x29, 0x82, 0x53, 0xb4, 0xe2, 0x6d, 0x35, 0x82,
+	0xb1, 0x5f, 0x91, 0x56, 0x51, 0x22, 0x2e, 0x10, 0x25, 0x2a, 0x8a, 0xf6, 0xbc, 0x9b, 0x67, 0xc9,
+	0x6c, 0xed, 0x64, 0x7e, 0x11, 0x49, 0x5b, 0x82, 0x2d, 0xc3, 0x31, 0x65, 0x32, 0x6a, 0xc7, 0x31,
+	0x61, 0xf1, 0xb7, 0xfb, 0x73, 0xc4, 0x15, 0x90, 0x9c, 0xa3, 0xed, 0x22, 0x01, 0x75, 0x6b, 0xa6,
+	0x63, 0x34, 0x5b, 0x5a, 0x5e, 0x55, 0x95, 0x32, 0x06, 0xd7, 0xd4, 0xd6, 0x98, 0x3a, 0x75, 0x91,
+	0x4c, 0x8c, 0x4e, 0xcd, 0xd6, 0x0a, 0xe5, 0x7f, 0x99, 0x5a, 0xf2, 0x2d, 0x62, 0xb8, 0xcc, 0x89,
+	0xeb, 0x73, 0x4e, 0x4c, 0xef, 0xad, 0x05, 0x58, 0xee, 0xe0, 0x62, 0xc5, 0x62, 0x06, 0xc8, 0x0a,
+	0xf2, 0xb2, 0x44, 0x5d, 0xf1, 0x9a, 0xcc, 0xbc, 0x10, 0x59, 0x87, 0x64, 0x45, 0x2c, 0xd4, 0xa5,
+	0x3f, 0xad, 0x95, 0xff, 0x33, 0xed, 0xce, 0xc9, 0xbf, 0x54, 0x20, 0x8e, 0x7b, 0x70, 0xd2, 0xb6,
+	0xab, 0xf1, 0xf1, 0x8f, 0xdf, 0x1f, 0x79, 0x2e, 0xdf, 0x4d, 0xb7, 0x7b, 0xae, 0xb1, 0xbb, 0x6b,
+	0x99, 0xbb, 0x06, 0x3f, 0xa4, 0xd3, 0x89, 0x4f, 0xdc, 0x46, 0xc9, 0x08, 0x83, 0xf7, 0x92, 0x6f,
+	0x71, 0x25, 0x19, 0x86, 0xd1, 0x5a, 0x0c, 0xc0, 0x14, 0xb8, 0x1e, 0xf3, 0x89, 0xd3, 0xbe, 0x5d,
+	0xd5, 0x36, 0x84, 0xc1, 0x05, 0x54, 0x9c, 0x69, 0x64, 0xa7, 0xb7, 0xdd, 0x23, 0x37, 0xca, 0x8b,
+	0x23, 0x35, 0x01, 0x44, 0x32, 0x60, 0xb1, 0x08, 0x84, 0x4b, 0x11, 0x85, 0x18, 0x93, 0x3c, 0x30,
+	0xc9, 0x2b, 0x1a, 0x62, 0x12, 0x5c, 0x17, 0x71, 0x7c, 0x6a, 0xf7, 0x96, 0x1d, 0xcd, 0x77, 0x96,
+	0xfe, 0x85, 0x0a, 0x57, 0x7c, 0x6d, 0x8f, 0x8c, 0x75, 0x38, 0xcf, 0x2d, 0xbc, 0xe6, 0x15, 0xf0,
+	0x76, 0xd7, 0x32, 0xb5, 0x54, 0xb9, 0x45, 0xf1, 0x98, 0xfc, 0xab, 0x13, 0x24, 0x49, 0x68, 0x5c,
+	0xc7, 0xbb, 0x0d, 0x8a, 0x2c, 0x72, 0x7f, 0x89, 0x21, 0x69, 0x7f, 0x96, 0x41, 0xd5, 0x56, 0x7c,
+	0x8f, 0x2d, 0xfd, 0xa6, 0xe7, 0xa8, 0xa7, 0x68, 0x48, 0x4e, 0xb8, 0xf3, 0x2d, 0x60, 0xdc, 0x76,
+	0xd3, 0xb6, 0x65, 0x45, 0xca, 0xd1, 0x1d, 0xf3, 0x05, 0x9d, 0x39, 0x6d, 0x2d, 0x4d, 0x75, 0xf7,
+	0x3c, 0x02, 0xd9, 0x32, 0xe2, 0x3e, 0x02, 0x60, 0x93, 0x4d, 0xd1, 0x2c, 0xed, 0xf1, 0x8b, 0x28,
+	0x64, 0x5d, 0x53, 0x59, 0x93, 0x6d, 0xd3, 0x75, 0x95, 0x35, 0x81, 0x42, 0xd6, 0x0d, 0x19, 0x03,
+	0x3d, 0x87, 0x1a, 0x02, 0x39, 0x19, 0x8c, 0x30, 0x9a, 0x2c, 0x08, 0x99, 0xb8, 0x60, 0x12, 0x2b,
+	0x61, 0x9b, 0x0e, 0x96, 0x6f, 0xe2, 0x7c, 0xbd, 0x04, 0x87, 0xc3, 0x6c, 0xa9, 0xcc, 0xa8, 0x86,
+	0x64, 0xde, 0x56, 0x99, 0x93, 0x38, 0x64, 0xbe, 0xa8, 0xdf, 0x8c, 0x57, 0x22, 0xe1, 0x5f, 0x3f,
+	0x7b, 0x9f, 0xd1, 0xef, 0xc4, 0x6b, 0xa1, 0xe2, 0x90, 0x15, 0x1c, 0xf0, 0xf7, 0xe8, 0x4f, 0x74,
+	0x60, 0xc9, 0x95, 0xb8, 0x91, 0x41, 0x7d, 0xc1, 0x7a, 0x75, 0xe1, 0xf6, 0x0a, 0xc0, 0xb0, 0x7d,
+	0x48, 0x45, 0x95, 0x96, 0x12, 0xd5, 0x52, 0x8c, 0x69, 0x35, 0xf7, 0xcd, 0x8e, 0x69, 0xc7, 0xd7,
+	0x33, 0x76, 0x95, 0x62, 0x49, 0xcb, 0x2a, 0x0c, 0xb2, 0x82, 0xe2, 0x7d, 0x5b, 0x5b, 0xcb, 0x95,
+	0x5f, 0x63, 0x43, 0x20, 0xbe, 0xf2, 0x8e, 0xb7, 0xdc, 0xc5, 0x16, 0xaa, 0x36, 0xc8, 0x50, 0xcb,
+	0x67, 0x8e, 0xdb, 0x6e, 0x76, 0x30, 0xa3, 0xa7, 0x14, 0x98, 0xf1, 0x02, 0x61, 0x69, 0x8a, 0xc1,
+	0x67, 0x4b, 0x5a, 0x18, 0x3f, 0xc2, 0xd3, 0xf0, 0xdc, 0x9d, 0x67, 0xf2, 0xd3, 0xaa, 0x85, 0xfd,
+	0x94, 0x4e, 0xb7, 0xda, 0x30, 0x3a, 0xbb, 0xa6, 0x6c, 0xe6, 0x0b, 0x84, 0xf9, 0x6c, 0xcf, 0x68,
+	0xc9, 0x0b, 0x6a, 0x02, 0xda, 0x36, 0x6c, 0xdc, 0xbd, 0x92, 0xc4, 0x78, 0xa6, 0xcf, 0x54, 0xf6,
+	0xd8, 0x07, 0x7e, 0x70, 0xc8, 0x6f, 0x01, 0x0e, 0xfc, 0x60, 0xf8, 0x09, 0xfe, 0x2f, 0x32, 0xf2,
+	0x56, 0xe0, 0x83, 0xcf, 0x7e, 0xf8, 0xd9, 0xe1, 0x28, 0x3a, 0x3a, 0x3e, 0xf8, 0x64, 0xe0, 0x4f,
+	0xee, 0x0b, 0xb2, 0xfb, 0x48, 0xf6, 0x2b, 0xf4, 0x9f, 0xcd, 0x9c, 0x7c, 0x7e, 0xff, 0xd0, 0x57,
+	0xff, 0xcb, 0x99, 0x83, 0x75, 0x8e, 0xf9, 0xec, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x7c, 0x46,
+	0xe4, 0xda, 0x96, 0x66, 0x00, 0x00,
 }
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/openolt/openolt.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/openolt/openolt.pb.go
similarity index 99%
rename from vendor/github.com/opencord/voltha-protos/v4/go/openolt/openolt.pb.go
rename to vendor/github.com/opencord/voltha-protos/v5/go/openolt/openolt.pb.go
index 2c8af68..b23fed0 100644
--- a/vendor/github.com/opencord/voltha-protos/v4/go/openolt/openolt.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/openolt/openolt.pb.go
@@ -7,9 +7,9 @@
 	context "context"
 	fmt "fmt"
 	proto "github.com/golang/protobuf/proto"
-	common "github.com/opencord/voltha-protos/v4/go/common"
-	config "github.com/opencord/voltha-protos/v4/go/ext/config"
-	tech_profile "github.com/opencord/voltha-protos/v4/go/tech_profile"
+	common "github.com/opencord/voltha-protos/v5/go/common"
+	config "github.com/opencord/voltha-protos/v5/go/ext/config"
+	tech_profile "github.com/opencord/voltha-protos/v5/go/tech_profile"
 	_ "google.golang.org/genproto/googleapis/api/annotations"
 	grpc "google.golang.org/grpc"
 	codes "google.golang.org/grpc/codes"
@@ -5247,8 +5247,8 @@
 	0x3e, 0x51, 0x7f, 0x99, 0xfb, 0xf4, 0xc5, 0xb7, 0xb0, 0xc2, 0x82, 0x2e, 0xda, 0x5a, 0x2c, 0xb0,
 	0xd7, 0xe4, 0xff, 0x2f, 0x10, 0x71, 0xbd, 0xa8, 0xbc, 0xc6, 0xb6, 0x28, 0x20, 0x5b, 0x7b, 0xed,
 	0x6f, 0x1e, 0x75, 0x1d, 0x7e, 0xd2, 0x3f, 0x16, 0xcb, 0xfe, 0x28, 0x32, 0x79, 0x24, 0x4d, 0x3e,
-	0x53, 0xff, 0x25, 0xc1, 0xd9, 0x93, 0x47, 0x5d, 0x16, 0xfd, 0xd7, 0x08, 0x87, 0xb9, 0xc3, 0xb1,
-	0xe3, 0x3c, 0x6a, 0x3e, 0xff, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x20, 0x58, 0x9a, 0x83, 0x3e,
+	0x53, 0xff, 0x25, 0xc1, 0xd9, 0xc6, 0xa3, 0x2e, 0x8b, 0xfe, 0x6b, 0x84, 0xc3, 0xdc, 0xe1, 0xd8,
+	0x71, 0x1e, 0x35, 0x9f, 0xff, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x07, 0x3d, 0xbf, 0x02, 0x3e,
 	0x41, 0x00, 0x00,
 }
 
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/tech_profile/tech_profile.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/tech_profile/tech_profile.pb.go
similarity index 83%
rename from vendor/github.com/opencord/voltha-protos/v4/go/tech_profile/tech_profile.pb.go
rename to vendor/github.com/opencord/voltha-protos/v5/go/tech_profile/tech_profile.pb.go
index fc70ea8..85f717a 100644
--- a/vendor/github.com/opencord/voltha-protos/v4/go/tech_profile/tech_profile.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/tech_profile/tech_profile.pb.go
@@ -1952,138 +1952,138 @@
 
 var fileDescriptor_d019a68bffe14cae = []byte{
 	// 2138 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x59, 0x4d, 0x6f, 0x1b, 0xb9,
-	0xf9, 0xb7, 0x24, 0x5b, 0x2f, 0x8f, 0x24, 0x7b, 0x4c, 0xc7, 0x1b, 0xc5, 0x71, 0x36, 0x8e, 0xf6,
-	0xbf, 0xff, 0x75, 0x5d, 0x34, 0xee, 0x3a, 0x4e, 0xf6, 0x90, 0x6d, 0x17, 0x52, 0x6c, 0x24, 0x6a,
-	0x37, 0x8e, 0x3d, 0x76, 0xeb, 0xa2, 0x87, 0x0e, 0x46, 0x33, 0x94, 0xcc, 0xee, 0x88, 0x1c, 0x93,
-	0x9c, 0x38, 0xde, 0x53, 0x51, 0xa0, 0x9f, 0xa2, 0xed, 0xa5, 0x40, 0xaf, 0xbd, 0xec, 0xa5, 0x68,
-	0x2f, 0x05, 0xfa, 0x2d, 0xfa, 0x09, 0x0a, 0xf4, 0x4b, 0x14, 0xe4, 0xcc, 0x68, 0x5e, 0xa4, 0x24,
-	0x76, 0xaa, 0x2c, 0xd0, 0xde, 0xc8, 0x87, 0x3f, 0x3e, 0x7c, 0xf8, 0xbc, 0xfc, 0x48, 0xce, 0xc0,
-	0xc6, 0x4b, 0xe6, 0xc9, 0x33, 0xdb, 0xf2, 0x39, 0x93, 0x4c, 0x6c, 0x4b, 0xec, 0x9c, 0xa9, 0xf6,
-	0x80, 0x78, 0xf8, 0xbe, 0x96, 0xa1, 0x46, 0x5a, 0xb6, 0xb6, 0x3e, 0x64, 0x6c, 0xe8, 0xe1, 0x6d,
-	0xdb, 0x27, 0xdb, 0x36, 0xa5, 0x4c, 0xda, 0x92, 0x30, 0x2a, 0x42, 0x6c, 0xfb, 0x57, 0x45, 0x58,
-	0x3a, 0x76, 0xce, 0xb0, 0x1b, 0x78, 0x98, 0x3f, 0x61, 0x74, 0x40, 0x86, 0xe8, 0x21, 0xd4, 0x5c,
-	0xc2, 0xb1, 0xa3, 0x70, 0xad, 0xc2, 0x46, 0x61, 0x73, 0x71, 0xe7, 0xe6, 0xfd, 0xcc, 0x3a, 0x7b,
-	0xf1, 0xb0, 0x99, 0x20, 0xd1, 0x17, 0xd0, 0xb4, 0x5d, 0x97, 0xa8, 0xb6, 0xed, 0x59, 0xfd, 0x8b,
-	0x56, 0x51, 0x4f, 0x5d, 0xcb, 0x4e, 0xed, 0x8c, 0x21, 0xdd, 0x53, 0xb3, 0x91, 0x4c, 0xe8, 0x5e,
-	0xa0, 0x35, 0xa8, 0xfa, 0x9c, 0x30, 0x4e, 0xe4, 0x65, 0xab, 0xb4, 0x51, 0xd8, 0xac, 0x98, 0xe3,
-	0x3e, 0xfa, 0x00, 0xca, 0x17, 0x98, 0x0c, 0xcf, 0x64, 0x6b, 0x5e, 0x8f, 0x44, 0x3d, 0xd4, 0x81,
-	0x86, 0x50, 0xe6, 0x5b, 0x3e, 0xf3, 0x88, 0x73, 0xd9, 0x5a, 0xd0, 0x6b, 0x7e, 0x98, 0x5d, 0x33,
-	0xda, 0x20, 0xa1, 0xc3, 0x43, 0x8d, 0x32, 0xeb, 0x7a, 0x4e, 0xd8, 0x69, 0xff, 0xb9, 0x00, 0xe8,
-	0x84, 0xdb, 0x83, 0x01, 0x71, 0x8e, 0xcf, 0x6c, 0x9f, 0xd0, 0x61, 0x8f, 0x0e, 0x18, 0x32, 0xa0,
-	0xe4, 0x10, 0xae, 0xf7, 0x5f, 0x31, 0x55, 0x53, 0x4b, 0xfa, 0x42, 0x6f, 0x4b, 0x49, 0xfa, 0x42,
-	0x49, 0x7c, 0xc2, 0x23, 0x63, 0x55, 0x53, 0x4b, 0xfa, 0x22, 0x32, 0x52, 0x35, 0x95, 0x64, 0x48,
-	0xb8, 0x36, 0xac, 0x62, 0xaa, 0x26, 0x7a, 0x06, 0x60, 0xbb, 0xae, 0xd5, 0xbf, 0xb0, 0x08, 0x75,
-	0x5b, 0x65, 0x6d, 0xf1, 0x56, 0xd6, 0xe2, 0x1e, 0x1d, 0x60, 0xce, 0xb1, 0x1b, 0x7b, 0xab, 0x7b,
-	0xda, 0xa3, 0x2e, 0x71, 0x74, 0xe8, 0xcc, 0xaa, 0xed, 0xba, 0xdd, 0x8b, 0x1e, 0x75, 0xdb, 0xbf,
-	0x2f, 0x82, 0x11, 0x9b, 0x1e, 0x07, 0xf1, 0x5d, 0xc3, 0x77, 0x0b, 0xaa, 0xb6, 0xe7, 0x31, 0xc7,
-	0x22, 0x6e, 0xb4, 0xc5, 0x8a, 0xee, 0xf7, 0x5c, 0xf4, 0x18, 0x6a, 0x22, 0x56, 0xaf, 0x37, 0x5b,
-	0xdf, 0xb9, 0x33, 0xd5, 0xc3, 0x71, 0x0a, 0x99, 0x09, 0x1e, 0x99, 0x70, 0x43, 0x86, 0x26, 0x5a,
-	0x22, 0x74, 0xaf, 0x45, 0xe8, 0x80, 0x69, 0x17, 0xd5, 0x77, 0x36, 0xb2, 0x7a, 0x26, 0xe3, 0x60,
-	0x22, 0x39, 0x19, 0x9b, 0xff, 0x87, 0xa5, 0xf4, 0x34, 0x65, 0x72, 0xe8, 0xdf, 0xa6, 0x12, 0x1f,
-	0x86, 0xd2, 0x9e, 0xdb, 0xfe, 0x4b, 0x01, 0x96, 0xf3, 0xfe, 0x11, 0xe8, 0x26, 0x54, 0x08, 0x95,
-	0x03, 0x35, 0x2b, 0x8c, 0x6e, 0x59, 0x75, 0x7b, 0x2e, 0x5a, 0x85, 0x32, 0xa3, 0x41, 0xe2, 0x80,
-	0x05, 0x46, 0x83, 0x50, 0x1c, 0x50, 0xa2, 0xc4, 0x61, 0x58, 0x17, 0x02, 0x4a, 0x7a, 0xae, 0x52,
-	0xe3, 0x33, 0x2e, 0x2d, 0xca, 0xa2, 0xc5, 0xcb, 0xaa, 0x7b, 0xc0, 0xd0, 0x3e, 0x2c, 0x8e, 0x77,
-	0xac, 0x56, 0x15, 0xad, 0xd2, 0x46, 0x69, 0xb3, 0x9e, 0xcf, 0xca, 0xbc, 0x61, 0x66, 0x53, 0xa6,
-	0x24, 0xa2, 0xfd, 0x08, 0x56, 0x4f, 0x6c, 0xe2, 0xed, 0x71, 0xe6, 0xef, 0x11, 0xe1, 0xd8, 0xdc,
-	0x8d, 0xea, 0xf3, 0x0e, 0xc0, 0x79, 0x80, 0x03, 0x6c, 0x09, 0xf2, 0x35, 0x8e, 0xb6, 0x50, 0xd3,
-	0x92, 0x63, 0xf2, 0x35, 0x6e, 0xff, 0xa6, 0x00, 0x86, 0x89, 0xdd, 0xec, 0x9c, 0x8f, 0xa0, 0x39,
-	0x22, 0xd4, 0x92, 0x67, 0x1c, 0x8b, 0x33, 0xe6, 0xc5, 0x3b, 0x6f, 0x8c, 0x08, 0x3d, 0x89, 0x65,
-	0x1a, 0x64, 0xbf, 0x4a, 0x81, 0x8a, 0x11, 0xc8, 0x7e, 0x95, 0x80, 0x3e, 0x81, 0x25, 0x05, 0xf2,
-	0x39, 0xeb, 0xdb, 0x7d, 0xe2, 0x25, 0xc5, 0xba, 0x38, 0xb2, 0x5f, 0x1d, 0x26, 0xd2, 0xf6, 0x37,
-	0x05, 0x58, 0x3e, 0x9d, 0x30, 0x64, 0x17, 0x16, 0x86, 0x1c, 0xe3, 0x30, 0x33, 0x27, 0x7c, 0x92,
-	0x87, 0x9b, 0x21, 0x18, 0x3d, 0x82, 0xf2, 0x25, 0xf6, 0x3c, 0x16, 0x92, 0xca, 0xdb, 0xa7, 0x45,
-	0x68, 0xf4, 0x7d, 0x28, 0x71, 0xec, 0x46, 0x39, 0xfb, 0xb6, 0x49, 0x0a, 0xda, 0xfe, 0x67, 0x11,
-	0x9a, 0x59, 0x8b, 0xbb, 0xb0, 0xe8, 0x86, 0x82, 0x98, 0x64, 0xc2, 0xa2, 0xba, 0x9d, 0x2f, 0x2a,
-	0x8d, 0x89, 0x18, 0xa6, 0xe9, 0xa6, 0xbb, 0xe8, 0x17, 0xd0, 0x92, 0x36, 0xf1, 0x2c, 0x97, 0x33,
-	0xdf, 0x8a, 0xb5, 0x39, 0x5a, 0x7f, 0xb4, 0xa3, 0x8f, 0x72, 0xc9, 0x31, 0x2d, 0xf2, 0xcf, 0xe6,
-	0xcc, 0x55, 0x39, 0x35, 0x25, 0x0e, 0x00, 0x71, 0xec, 0xe6, 0x35, 0x5f, 0x69, 0xdb, 0xcf, 0xe6,
-	0x4c, 0x83, 0xe7, 0xa3, 0x74, 0x04, 0x2b, 0x17, 0x53, 0x14, 0x86, 0x35, 0x7b, 0x37, 0xab, 0xf0,
-	0x74, 0x8a, 0xc6, 0xe5, 0x8b, 0xbc, 0xca, 0xae, 0x91, 0xb8, 0x31, 0xd4, 0xd6, 0xfe, 0x63, 0x09,
-	0x1a, 0x51, 0x11, 0x1c, 0xa9, 0xec, 0x7d, 0x57, 0xe6, 0xba, 0x03, 0x30, 0xc4, 0x23, 0x5d, 0x8b,
-	0xe3, 0xd2, 0xad, 0x45, 0x92, 0x9e, 0xab, 0x88, 0xcd, 0xef, 0x13, 0x69, 0x8d, 0x6c, 0x5f, 0x7b,
-	0xa4, 0x66, 0x56, 0x54, 0xff, 0xb9, 0xed, 0xa3, 0x8f, 0x61, 0xd1, 0xc6, 0xc2, 0xc2, 0xd4, 0xe1,
-	0x97, 0xbe, 0x5e, 0x55, 0xed, 0xb0, 0x6a, 0x36, 0x6d, 0x2c, 0xf6, 0xc7, 0xc2, 0x19, 0x1c, 0x32,
-	0x99, 0xb3, 0xad, 0xfc, 0xda, 0xb3, 0xad, 0x92, 0x39, 0xdb, 0x26, 0x13, 0xaf, 0x7a, 0xed, 0xc4,
-	0xeb, 0xe6, 0xbd, 0xde, 0xaa, 0xe9, 0x18, 0x4e, 0xd7, 0x11, 0x15, 0x42, 0xac, 0x23, 0xec, 0xb6,
-	0xff, 0x51, 0x80, 0x66, 0x3a, 0x4e, 0xef, 0x9f, 0x41, 0x3b, 0x09, 0x83, 0x6a, 0x5e, 0x13, 0xad,
-	0xb2, 0x66, 0xd0, 0xb5, 0xa9, 0x0c, 0xaa, 0x8d, 0x1a, 0xb3, 0x67, 0x64, 0xe2, 0x94, 0x23, 0xa2,
-	0x32, 0xed, 0x88, 0x18, 0xc0, 0x52, 0x8f, 0x0a, 0x69, 0x53, 0x07, 0x3f, 0x61, 0x54, 0x72, 0xe6,
-	0xa9, 0x13, 0x9b, 0xd1, 0x40, 0xef, 0xac, 0x66, 0xaa, 0xa6, 0x92, 0x04, 0x94, 0xe8, 0x3d, 0xd5,
-	0x4c, 0xd5, 0x44, 0xdb, 0x70, 0x43, 0xb1, 0xe0, 0x10, 0x8f, 0x2c, 0xdf, 0xbe, 0xf4, 0x98, 0xed,
-	0x86, 0x6c, 0x1c, 0x26, 0xd8, 0xf2, 0xc8, 0x7e, 0xf5, 0x14, 0x8f, 0x0e, 0xc3, 0x11, 0xcd, 0xca,
-	0xbf, 0x2e, 0x42, 0xfd, 0x68, 0xcc, 0xa2, 0x02, 0xdd, 0x83, 0xc6, 0x79, 0xc2, 0xb4, 0x9f, 0xea,
-	0xd5, 0x9a, 0x66, 0xfd, 0x7c, 0x0c, 0xf9, 0x34, 0x07, 0xd9, 0xd1, 0xcb, 0x67, 0x20, 0x3b, 0x39,
-	0xc8, 0x03, 0xbd, 0x7c, 0x06, 0xf2, 0x20, 0x07, 0xd9, 0xd5, 0x11, 0xc8, 0x40, 0x76, 0x73, 0x90,
-	0x87, 0x3a, 0x18, 0x19, 0xc8, 0xc3, 0x1c, 0xe4, 0x91, 0xce, 0xe1, 0x0c, 0xe4, 0x51, 0x0e, 0xf2,
-	0x99, 0x76, 0x77, 0x06, 0xf2, 0x59, 0xfb, 0x9b, 0x05, 0x58, 0x56, 0x7e, 0x61, 0x5c, 0x76, 0xa4,
-	0xe4, 0xa4, 0x1f, 0x48, 0x2c, 0x72, 0xf5, 0x5b, 0xc8, 0xd7, 0xef, 0x3a, 0x80, 0x72, 0xf5, 0x79,
-	0xe8, 0xe0, 0x30, 0x06, 0xd5, 0x91, 0xfd, 0xea, 0x48, 0xf9, 0xf5, 0xfa, 0xd5, 0x5d, 0xcb, 0x57,
-	0xf7, 0x8f, 0x61, 0x59, 0x8c, 0x6b, 0xf7, 0x7a, 0x25, 0x6e, 0x88, 0x9c, 0x44, 0xed, 0x25, 0xae,
-	0x6b, 0xeb, 0x3c, 0xaa, 0xf4, 0x5a, 0x2c, 0x39, 0x7a, 0xaf, 0xa5, 0xbe, 0xff, 0x9a, 0x52, 0x7f,
-	0xdb, 0xb1, 0x97, 0xad, 0x76, 0xf4, 0x14, 0x96, 0xb3, 0x6a, 0xac, 0x97, 0x3b, 0xad, 0xc5, 0xb7,
-	0x93, 0xc6, 0x52, 0x46, 0xcd, 0x4f, 0x75, 0x6e, 0x12, 0x61, 0x8d, 0x02, 0x4f, 0x12, 0xc7, 0x16,
-	0xb2, 0x05, 0xda, 0xf9, 0x75, 0x22, 0x9e, 0xc7, 0x22, 0xb4, 0x09, 0xc6, 0x78, 0x5c, 0xd7, 0x12,
-	0x71, 0x5b, 0xf5, 0xe8, 0x32, 0x11, 0xcb, 0x9f, 0xe2, 0x51, 0xcf, 0x45, 0x3f, 0x80, 0xdb, 0xee,
-	0x25, 0xb5, 0x47, 0xc4, 0xb1, 0x6c, 0xc7, 0xc1, 0x42, 0x28, 0xe3, 0x54, 0xb5, 0x5a, 0x1e, 0x11,
-	0xb2, 0xd5, 0xd0, 0xba, 0x5b, 0x11, 0xa4, 0xa3, 0x11, 0x51, 0x39, 0x7f, 0x49, 0x84, 0x44, 0x8f,
-	0x61, 0x4d, 0xa8, 0x87, 0xcf, 0xf4, 0xd9, 0x4d, 0x3d, 0xfb, 0x66, 0x88, 0x98, 0x98, 0xdc, 0xfe,
-	0x43, 0x11, 0x56, 0xc6, 0xb7, 0xb4, 0x54, 0xde, 0xce, 0xe8, 0xa2, 0xdd, 0x4c, 0x2e, 0xda, 0x13,
-	0x4f, 0xa8, 0xd2, 0x7f, 0xf0, 0x84, 0x9a, 0x7f, 0xed, 0x31, 0xb3, 0x90, 0xc9, 0xbd, 0x3d, 0x58,
-	0x3c, 0xb7, 0x32, 0xe7, 0x5b, 0xf9, 0x4a, 0xc9, 0xdf, 0x38, 0x3f, 0x4e, 0xbd, 0xa2, 0xfe, 0x56,
-	0x86, 0x95, 0xfd, 0xc3, 0x17, 0x07, 0x9a, 0x7e, 0x53, 0x4e, 0xca, 0x56, 0x6f, 0xe1, 0x0d, 0xd5,
-	0x5b, 0xcc, 0x56, 0x6f, 0x96, 0x15, 0x42, 0x62, 0x4b, 0xb1, 0xc2, 0x15, 0x8b, 0xfb, 0x1e, 0x34,
-	0xe2, 0x93, 0x44, 0x5e, 0xfa, 0x58, 0x6f, 0xbd, 0x66, 0xd6, 0x23, 0xd9, 0xc9, 0xa5, 0x8f, 0xd1,
-	0x2e, 0x7c, 0x10, 0x50, 0xa1, 0x76, 0x41, 0x24, 0x76, 0xad, 0x21, 0xb7, 0xa9, 0x0c, 0xad, 0x0d,
-	0x49, 0xee, 0x46, 0x6a, 0xf4, 0xa9, 0x1a, 0xd4, 0x96, 0x7f, 0x07, 0x0c, 0xca, 0x46, 0x44, 0xc5,
-	0x89, 0x50, 0x89, 0xf9, 0x4b, 0xdb, 0x8b, 0x18, 0x6f, 0x29, 0x92, 0xf7, 0x22, 0x31, 0xda, 0x81,
-	0x55, 0xc9, 0x3c, 0xcc, 0x6d, 0x19, 0xba, 0xd8, 0xb3, 0x7e, 0x49, 0xa4, 0xc4, 0x5c, 0xd7, 0x78,
-	0xd3, 0x5c, 0x19, 0x0f, 0x1e, 0x32, 0xcf, 0xfb, 0x91, 0x1e, 0x42, 0x3f, 0x84, 0xdb, 0x1c, 0x9f,
-	0x07, 0x58, 0x48, 0x4b, 0x72, 0x9b, 0x8a, 0x11, 0x11, 0x82, 0x30, 0x1a, 0x47, 0xa8, 0xa6, 0x67,
-	0xde, 0x8a, 0x20, 0x27, 0x29, 0x44, 0x44, 0x06, 0xeb, 0x00, 0x34, 0x18, 0x29, 0xb7, 0x63, 0x29,
-	0x74, 0xe9, 0x35, 0xcd, 0x2a, 0x0d, 0x46, 0x47, 0xc7, 0x58, 0x0a, 0xf4, 0x79, 0x86, 0xaa, 0x85,
-	0xae, 0xb9, 0xfa, 0xce, 0xad, 0x6c, 0xc0, 0x53, 0xa7, 0x55, 0x9a, 0xc5, 0xc5, 0x74, 0xc2, 0x6c,
-	0xcc, 0x84, 0x30, 0x9b, 0x61, 0x98, 0xa7, 0x11, 0xe6, 0xa2, 0x1e, 0x7a, 0x3d, 0x61, 0x2e, 0xcd,
-	0x80, 0x30, 0x8d, 0x99, 0x11, 0xe6, 0xf2, 0xf5, 0x09, 0xb3, 0xfd, 0xa7, 0x79, 0xa8, 0x9f, 0x24,
-	0x97, 0x13, 0x84, 0x60, 0x9e, 0xda, 0xa3, 0xb8, 0x68, 0x74, 0x1b, 0xb5, 0xa0, 0xf2, 0x12, 0x73,
-	0x15, 0xe8, 0x98, 0x3b, 0xa2, 0xae, 0xca, 0xf4, 0xf8, 0xae, 0xa3, 0x33, 0x3d, 0x3c, 0x0c, 0xeb,
-	0x91, 0x4c, 0x67, 0x7a, 0x1b, 0x9a, 0x2a, 0x29, 0xf4, 0xa5, 0x85, 0x71, 0x29, 0xe2, 0xbb, 0x00,
-	0x0d, 0x46, 0xd1, 0xa9, 0x2c, 0xd0, 0x33, 0x30, 0x48, 0x74, 0x1f, 0x8a, 0x49, 0x52, 0x17, 0xcd,
-	0xc4, 0x93, 0x3f, 0x77, 0x6b, 0x32, 0x97, 0x48, 0xee, 0x1a, 0xb5, 0x07, 0x8d, 0x40, 0x58, 0xc9,
-	0x87, 0x83, 0xb2, 0xd6, 0x72, 0xef, 0x35, 0x1f, 0x0e, 0x12, 0xca, 0x30, 0xeb, 0x81, 0x48, 0xbe,
-	0x66, 0xec, 0x41, 0xc3, 0x4d, 0x6b, 0xa9, 0x5c, 0x59, 0x8b, 0x9b, 0xd2, 0x32, 0x84, 0x8d, 0xc0,
-	0x17, 0x92, 0x63, 0x3b, 0xd9, 0xbe, 0x65, 0xc7, 0xe0, 0xf0, 0x14, 0xa8, 0xea, 0x2b, 0x66, 0xee,
-	0x71, 0x33, 0x71, 0x5b, 0x31, 0xd7, 0x63, 0x45, 0xf9, 0x21, 0x7d, 0xd0, 0x7c, 0x05, 0x6d, 0x97,
-	0x5d, 0xd0, 0xb7, 0x2c, 0x55, 0xbb, 0xda, 0x52, 0x1f, 0x26, 0xaa, 0xa6, 0x2d, 0xd6, 0xfe, 0x7b,
-	0x09, 0x96, 0xf6, 0x7d, 0x46, 0xff, 0x87, 0x92, 0x46, 0x19, 0x64, 0x3b, 0x5f, 0xd9, 0xc3, 0xc8,
-	0xa0, 0x72, 0x64, 0x50, 0x28, 0xd3, 0x06, 0xb9, 0x70, 0x67, 0x1c, 0xcb, 0xf0, 0x3b, 0x48, 0xce,
-	0xbb, 0x15, 0xed, 0xdd, 0x5c, 0x8a, 0x4c, 0x39, 0x9b, 0xcc, 0xb5, 0x58, 0x4f, 0x76, 0x40, 0x07,
-	0xf2, 0x0c, 0xee, 0xa6, 0x02, 0x39, 0x75, 0x9d, 0xea, 0x55, 0xd7, 0x59, 0x4f, 0x34, 0x4d, 0xae,
-	0xd4, 0xfe, 0xd7, 0x3c, 0xac, 0xa4, 0x22, 0x18, 0xbb, 0xe8, 0x9a, 0x91, 0x7c, 0x00, 0xab, 0x22,
-	0xe8, 0x0b, 0x87, 0x93, 0x3e, 0xe6, 0x16, 0x71, 0x31, 0x95, 0x64, 0x40, 0xa2, 0xef, 0x75, 0x35,
-	0xf3, 0x46, 0x32, 0xd8, 0x1b, 0x8f, 0x4d, 0x84, 0x7f, 0xfe, 0x0a, 0xe1, 0x5f, 0xb8, 0x5a, 0xf8,
-	0xcb, 0x33, 0xe1, 0x8c, 0xca, 0x4c, 0x38, 0xa3, 0xfa, 0xde, 0x38, 0xa3, 0xf6, 0xed, 0x71, 0x06,
-	0xcc, 0x86, 0x33, 0x7e, 0x3b, 0x0f, 0x37, 0x73, 0x9c, 0xf1, 0x5f, 0x98, 0x71, 0xe9, 0x3b, 0x74,
-	0x39, 0x7b, 0x87, 0x9e, 0x96, 0x8c, 0x95, 0x99, 0x70, 0x51, 0xf5, 0x1d, 0xb8, 0xa8, 0xf6, 0x2d,
-	0x71, 0x11, 0xcc, 0x86, 0x8b, 0xfe, 0xaa, 0xbf, 0x1d, 0x0b, 0x16, 0x70, 0x27, 0x49, 0x8b, 0x15,
-	0x58, 0x90, 0x7e, 0xfc, 0x34, 0x6f, 0x9a, 0xf3, 0xd2, 0xef, 0xb9, 0x13, 0x81, 0x2c, 0x4e, 0x06,
-	0xf2, 0x9d, 0x12, 0x24, 0x1d, 0xd9, 0xf9, 0x6c, 0x64, 0xef, 0x42, 0x3d, 0x79, 0x11, 0xa8, 0xb4,
-	0x28, 0x6d, 0x36, 0x4d, 0x18, 0x3f, 0x09, 0xc4, 0xd6, 0xe7, 0x50, 0x1b, 0xbf, 0xb8, 0x50, 0x03,
-	0xaa, 0x3f, 0x39, 0x3c, 0x3e, 0x31, 0xf7, 0x3b, 0xcf, 0x8d, 0x39, 0xb4, 0x08, 0xb0, 0xf7, 0xe2,
-	0xf4, 0x20, 0xea, 0x17, 0xd0, 0x32, 0x34, 0xbb, 0xbd, 0xbd, 0x9e, 0xb9, 0xff, 0xe4, 0xa4, 0xf7,
-	0xe2, 0xa0, 0xf3, 0xa5, 0x51, 0xdc, 0x7a, 0x0c, 0x46, 0xfe, 0xbe, 0x8a, 0x2a, 0x50, 0x3a, 0x35,
-	0x4d, 0x63, 0x0e, 0x21, 0x58, 0x3c, 0x96, 0x9c, 0x38, 0xf2, 0x30, 0xba, 0x9a, 0x1a, 0x05, 0x04,
-	0x50, 0x7e, 0x76, 0xd9, 0xe7, 0xc4, 0x35, 0x8a, 0x5b, 0x14, 0x1a, 0xe9, 0x67, 0x19, 0x5a, 0x85,
-	0xe5, 0x74, 0xdf, 0x3a, 0x60, 0x14, 0x1b, 0x73, 0x68, 0x05, 0x96, 0xb2, 0xe2, 0x8e, 0x51, 0x40,
-	0xb7, 0xe1, 0x66, 0x46, 0xd8, 0xc5, 0x42, 0xee, 0x0f, 0x06, 0x8c, 0x4b, 0xa3, 0x38, 0xa1, 0xa8,
-	0x13, 0x48, 0x66, 0x94, 0xb6, 0xbe, 0x18, 0x7f, 0xa5, 0x8e, 0x2c, 0x6d, 0x40, 0x35, 0xfe, 0x66,
-	0x6c, 0xcc, 0xa1, 0x26, 0xd4, 0x4e, 0xc7, 0xdd, 0x82, 0xda, 0x86, 0x89, 0x5d, 0xa3, 0x88, 0xaa,
-	0x30, 0x7f, 0xaa, 0x5a, 0xa5, 0xad, 0xdf, 0x15, 0x60, 0xfd, 0x4d, 0x7f, 0x99, 0xd0, 0xc7, 0x70,
-	0xef, 0x4d, 0xe3, 0xf1, 0x8e, 0x36, 0xe1, 0xff, 0xde, 0x08, 0xeb, 0x08, 0x11, 0x70, 0xec, 0x1a,
-	0x05, 0xf4, 0x5d, 0xf8, 0xe4, 0x8d, 0xc8, 0xf4, 0xb6, 0xbb, 0x3f, 0x83, 0x0d, 0xc6, 0x87, 0xf7,
-	0x99, 0x8f, 0xa9, 0xc3, 0xb8, 0x7b, 0x3f, 0xfc, 0xe9, 0x99, 0x49, 0xef, 0x9f, 0xef, 0x0e, 0x89,
-	0x3c, 0x0b, 0xfa, 0xf7, 0x1d, 0x36, 0xda, 0x8e, 0x81, 0xdb, 0x21, 0xf0, 0x7b, 0xd1, 0xdf, 0xd1,
-	0x97, 0xbb, 0xdb, 0x43, 0x96, 0xf9, 0x47, 0xda, 0x2f, 0xeb, 0xa1, 0x07, 0xff, 0x0e, 0x00, 0x00,
-	0xff, 0xff, 0xc7, 0x6b, 0x83, 0x2e, 0x48, 0x1d, 0x00, 0x00,
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x59, 0x4d, 0x73, 0x1c, 0x39,
+	0xf9, 0xf7, 0xcc, 0xd8, 0xf3, 0xf2, 0xcc, 0x8c, 0xdd, 0x96, 0xe3, 0xcd, 0xc4, 0x71, 0x36, 0xce,
+	0xec, 0x7f, 0xff, 0x6b, 0x4c, 0x11, 0xb3, 0xce, 0xcb, 0x1e, 0xb2, 0xb0, 0x65, 0xc7, 0xae, 0x64,
+	0x60, 0xe3, 0xd8, 0x6d, 0x83, 0x29, 0x0e, 0x74, 0xf5, 0xb4, 0x34, 0x63, 0xb1, 0x3d, 0x52, 0x5b,
+	0x52, 0xc7, 0xf1, 0x9e, 0x28, 0xaa, 0xf8, 0x14, 0xc0, 0x85, 0x2a, 0xae, 0x5c, 0xf6, 0x42, 0xc1,
+	0x85, 0x2a, 0xbe, 0x05, 0x9f, 0x80, 0x2a, 0xbe, 0x04, 0x25, 0x75, 0xf7, 0xf4, 0xcb, 0x4c, 0x12,
+	0x27, 0x38, 0x5b, 0x05, 0x37, 0xe9, 0xd1, 0x4f, 0x8f, 0x1e, 0x3d, 0x2f, 0x3f, 0x49, 0xdd, 0xb0,
+	0xf6, 0x82, 0xfb, 0xea, 0xd4, 0x75, 0x02, 0xc1, 0x15, 0x97, 0x9b, 0x8a, 0x78, 0xa7, 0xba, 0x3d,
+	0xa0, 0x3e, 0xb9, 0x6b, 0x64, 0xa8, 0x95, 0x95, 0xad, 0xac, 0x0e, 0x39, 0x1f, 0xfa, 0x64, 0xd3,
+	0x0d, 0xe8, 0xa6, 0xcb, 0x18, 0x57, 0xae, 0xa2, 0x9c, 0xc9, 0x08, 0xdb, 0xfd, 0x55, 0x19, 0x16,
+	0x8e, 0xbc, 0x53, 0x82, 0x43, 0x9f, 0x88, 0xc7, 0x9c, 0x0d, 0xe8, 0x10, 0x3d, 0x80, 0x06, 0xa6,
+	0x82, 0x78, 0x1a, 0xd7, 0x29, 0xad, 0x95, 0xd6, 0xe7, 0xb7, 0xae, 0xdf, 0xcd, 0xad, 0xb3, 0x9b,
+	0x0c, 0xdb, 0x29, 0x12, 0x7d, 0x01, 0x6d, 0x17, 0x63, 0xaa, 0xdb, 0xae, 0xef, 0xf4, 0xcf, 0x3b,
+	0x65, 0x33, 0x75, 0x25, 0x3f, 0x75, 0x7b, 0x0c, 0xd9, 0x39, 0xb1, 0x5b, 0xe9, 0x84, 0x9d, 0x73,
+	0xb4, 0x02, 0xf5, 0x40, 0x50, 0x2e, 0xa8, 0xba, 0xe8, 0x54, 0xd6, 0x4a, 0xeb, 0x35, 0x7b, 0xdc,
+	0x47, 0x1f, 0x40, 0xf5, 0x9c, 0xd0, 0xe1, 0xa9, 0xea, 0xcc, 0x9a, 0x91, 0xb8, 0x87, 0xb6, 0xa1,
+	0x25, 0xb5, 0xf9, 0x4e, 0xc0, 0x7d, 0xea, 0x5d, 0x74, 0xe6, 0xcc, 0x9a, 0x1f, 0xe6, 0xd7, 0x8c,
+	0x37, 0x48, 0xd9, 0xf0, 0xc0, 0xa0, 0xec, 0xa6, 0x99, 0x13, 0x75, 0xba, 0x7f, 0x2e, 0x01, 0x3a,
+	0x16, 0xee, 0x60, 0x40, 0xbd, 0xa3, 0x53, 0x37, 0xa0, 0x6c, 0xd8, 0x63, 0x03, 0x8e, 0x2c, 0xa8,
+	0x78, 0x54, 0x98, 0xfd, 0xd7, 0x6c, 0xdd, 0x34, 0x92, 0xbe, 0x34, 0xdb, 0xd2, 0x92, 0xbe, 0xd4,
+	0x92, 0x80, 0x8a, 0xd8, 0x58, 0xdd, 0x34, 0x92, 0xbe, 0x8c, 0x8d, 0xd4, 0x4d, 0x2d, 0x19, 0x52,
+	0x61, 0x0c, 0xab, 0xd9, 0xba, 0x89, 0x9e, 0x02, 0xb8, 0x18, 0x3b, 0xfd, 0x73, 0x87, 0x32, 0xdc,
+	0xa9, 0x1a, 0x8b, 0x37, 0xf2, 0x16, 0xf7, 0xd8, 0x80, 0x08, 0x41, 0x70, 0xe2, 0xad, 0x9d, 0x93,
+	0x1e, 0xc3, 0xd4, 0x33, 0xa1, 0xb3, 0xeb, 0x2e, 0xc6, 0x3b, 0xe7, 0x3d, 0x86, 0xbb, 0xbf, 0x2f,
+	0x83, 0x95, 0x98, 0x9e, 0x04, 0xf1, 0x5d, 0xc3, 0x77, 0x03, 0xea, 0xae, 0xef, 0x73, 0xcf, 0xa1,
+	0x38, 0xde, 0x62, 0xcd, 0xf4, 0x7b, 0x18, 0x3d, 0x82, 0x86, 0x4c, 0xd4, 0x9b, 0xcd, 0x36, 0xb7,
+	0x6e, 0x4d, 0xf5, 0x70, 0x92, 0x42, 0x76, 0x8a, 0x47, 0x36, 0x5c, 0x53, 0x91, 0x89, 0x8e, 0x8c,
+	0xdc, 0xeb, 0x50, 0x36, 0xe0, 0xc6, 0x45, 0xcd, 0xad, 0xb5, 0xbc, 0x9e, 0xc9, 0x38, 0xd8, 0x48,
+	0x4d, 0xc6, 0xe6, 0xff, 0x61, 0x21, 0x3b, 0x4d, 0x9b, 0x1c, 0xf9, 0xb7, 0xad, 0xc5, 0x07, 0x91,
+	0xb4, 0x87, 0xbb, 0x7f, 0x29, 0xc1, 0x62, 0xd1, 0x3f, 0x12, 0x5d, 0x87, 0x1a, 0x65, 0x6a, 0xa0,
+	0x67, 0x45, 0xd1, 0xad, 0xea, 0x6e, 0x0f, 0xa3, 0x65, 0xa8, 0x72, 0x16, 0xa6, 0x0e, 0x98, 0xe3,
+	0x2c, 0x8c, 0xc4, 0x21, 0xa3, 0x5a, 0x1c, 0x85, 0x75, 0x2e, 0x64, 0xb4, 0x87, 0xb5, 0x9a, 0x80,
+	0x0b, 0xe5, 0x30, 0x1e, 0x2f, 0x5e, 0xd5, 0xdd, 0x7d, 0x8e, 0xf6, 0x60, 0x7e, 0xbc, 0x63, 0xbd,
+	0xaa, 0xec, 0x54, 0xd6, 0x2a, 0xeb, 0xcd, 0x62, 0x56, 0x16, 0x0d, 0xb3, 0xdb, 0x2a, 0x23, 0x91,
+	0xdd, 0x87, 0xb0, 0x7c, 0xec, 0x52, 0x7f, 0x57, 0xf0, 0x60, 0x97, 0x4a, 0xcf, 0x15, 0x38, 0xae,
+	0xcf, 0x5b, 0x00, 0x67, 0x21, 0x09, 0x89, 0x23, 0xe9, 0xd7, 0x24, 0xde, 0x42, 0xc3, 0x48, 0x8e,
+	0xe8, 0xd7, 0xa4, 0xfb, 0x9b, 0x12, 0x58, 0x36, 0xc1, 0xf9, 0x39, 0x1f, 0x41, 0x7b, 0x44, 0x99,
+	0xa3, 0x4e, 0x05, 0x91, 0xa7, 0xdc, 0x4f, 0x76, 0xde, 0x1a, 0x51, 0x76, 0x9c, 0xc8, 0x0c, 0xc8,
+	0x7d, 0x99, 0x01, 0x95, 0x63, 0x90, 0xfb, 0x32, 0x05, 0x7d, 0x02, 0x0b, 0x1a, 0x14, 0x08, 0xde,
+	0x77, 0xfb, 0xd4, 0x4f, 0x8b, 0x75, 0x7e, 0xe4, 0xbe, 0x3c, 0x48, 0xa5, 0xdd, 0x6f, 0x4a, 0xb0,
+	0x78, 0x32, 0x61, 0xc8, 0x7d, 0x98, 0x1b, 0x0a, 0x42, 0xa2, 0xcc, 0x9c, 0xf0, 0x49, 0x11, 0x6e,
+	0x47, 0x60, 0xf4, 0x10, 0xaa, 0x17, 0xc4, 0xf7, 0x79, 0x44, 0x2a, 0x6f, 0x9e, 0x16, 0xa3, 0xd1,
+	0xf7, 0xa1, 0x22, 0x08, 0x8e, 0x73, 0xf6, 0x4d, 0x93, 0x34, 0xb4, 0xfb, 0xcf, 0x32, 0xb4, 0xf3,
+	0x16, 0xef, 0xc0, 0x3c, 0x8e, 0x04, 0x09, 0xc9, 0x44, 0x45, 0x75, 0xb3, 0x58, 0x54, 0x06, 0x13,
+	0x33, 0x4c, 0x1b, 0x67, 0xbb, 0xe8, 0x17, 0xd0, 0x51, 0x2e, 0xf5, 0x1d, 0x2c, 0x78, 0xe0, 0x24,
+	0xda, 0x3c, 0xa3, 0x3f, 0xde, 0xd1, 0x47, 0x85, 0xe4, 0x98, 0x16, 0xf9, 0xa7, 0x33, 0xf6, 0xb2,
+	0x9a, 0x9a, 0x12, 0xfb, 0x80, 0x04, 0xc1, 0x45, 0xcd, 0x97, 0xda, 0xf6, 0xd3, 0x19, 0xdb, 0x12,
+	0xc5, 0x28, 0x1d, 0xc2, 0xd2, 0xf9, 0x14, 0x85, 0x51, 0xcd, 0xde, 0xce, 0x2b, 0x3c, 0x99, 0xa2,
+	0x71, 0xf1, 0xbc, 0xa8, 0x72, 0xc7, 0x4a, 0xdd, 0x18, 0x69, 0xeb, 0xfe, 0xb1, 0x02, 0xad, 0xb8,
+	0x08, 0x0e, 0x75, 0xf6, 0xbe, 0x2b, 0x73, 0xdd, 0x02, 0x18, 0x92, 0x91, 0xa9, 0xc5, 0x71, 0xe9,
+	0x36, 0x62, 0x49, 0x0f, 0x6b, 0x62, 0x0b, 0xfa, 0x54, 0x39, 0x23, 0x37, 0x30, 0x1e, 0x69, 0xd8,
+	0x35, 0xdd, 0x7f, 0xe6, 0x06, 0xe8, 0x63, 0x98, 0x77, 0x89, 0x74, 0x08, 0xf3, 0xc4, 0x45, 0x60,
+	0x56, 0xd5, 0x3b, 0xac, 0xdb, 0x6d, 0x97, 0xc8, 0xbd, 0xb1, 0xf0, 0x0a, 0x0e, 0x99, 0xdc, 0xd9,
+	0x56, 0x7d, 0xe5, 0xd9, 0x56, 0xcb, 0x9d, 0x6d, 0x93, 0x89, 0x57, 0x7f, 0xeb, 0xc4, 0xdb, 0x29,
+	0x7a, 0xbd, 0xd3, 0x30, 0x31, 0x9c, 0xae, 0x23, 0x2e, 0x84, 0x44, 0x47, 0xd4, 0xed, 0xfe, 0xa3,
+	0x04, 0xed, 0x6c, 0x9c, 0xde, 0x3f, 0x83, 0x6e, 0xa7, 0x0c, 0x6a, 0x78, 0x4d, 0x76, 0xaa, 0x86,
+	0x41, 0x57, 0xa6, 0x32, 0xa8, 0x31, 0x6a, 0xcc, 0x9e, 0xb1, 0x89, 0x53, 0x8e, 0x88, 0xda, 0xb4,
+	0x23, 0x62, 0x00, 0x0b, 0x3d, 0x26, 0x95, 0xcb, 0x3c, 0xf2, 0x98, 0x33, 0x25, 0xb8, 0xaf, 0x4f,
+	0x6c, 0xce, 0x42, 0xb3, 0xb3, 0x86, 0xad, 0x9b, 0x5a, 0x12, 0x32, 0x6a, 0xf6, 0xd4, 0xb0, 0x75,
+	0x13, 0x6d, 0xc2, 0x35, 0xcd, 0x82, 0x43, 0x32, 0x72, 0x02, 0xf7, 0xc2, 0xe7, 0x2e, 0x8e, 0xd8,
+	0x38, 0x4a, 0xb0, 0xc5, 0x91, 0xfb, 0xf2, 0x09, 0x19, 0x1d, 0x44, 0x23, 0x86, 0x95, 0x7f, 0x5d,
+	0x86, 0xe6, 0xe1, 0x98, 0x45, 0x25, 0xba, 0x03, 0xad, 0xb3, 0x94, 0x69, 0x3f, 0x35, 0xab, 0xb5,
+	0xed, 0xe6, 0xd9, 0x18, 0xf2, 0x69, 0x01, 0xb2, 0x65, 0x96, 0xcf, 0x41, 0xb6, 0x0a, 0x90, 0x7b,
+	0x66, 0xf9, 0x1c, 0xe4, 0x5e, 0x01, 0x72, 0xdf, 0x44, 0x20, 0x07, 0xb9, 0x5f, 0x80, 0x3c, 0x30,
+	0xc1, 0xc8, 0x41, 0x1e, 0x14, 0x20, 0x0f, 0x4d, 0x0e, 0xe7, 0x20, 0x0f, 0x0b, 0x90, 0xcf, 0x8c,
+	0xbb, 0x73, 0x90, 0xcf, 0xba, 0xdf, 0xcc, 0xc1, 0xa2, 0xf6, 0x0b, 0x17, 0x6a, 0x5b, 0x29, 0x41,
+	0xfb, 0xa1, 0x22, 0xb2, 0x50, 0xbf, 0xa5, 0x62, 0xfd, 0xae, 0x02, 0x68, 0x57, 0x9f, 0x45, 0x0e,
+	0x8e, 0x62, 0x50, 0x1f, 0xb9, 0x2f, 0x0f, 0xb5, 0x5f, 0xdf, 0xbe, 0xba, 0x1b, 0xc5, 0xea, 0xfe,
+	0x31, 0x2c, 0xca, 0x71, 0xed, 0xbe, 0x5d, 0x89, 0x5b, 0xb2, 0x20, 0xd1, 0x7b, 0x49, 0xea, 0xda,
+	0x39, 0x8b, 0x2b, 0xbd, 0x91, 0x48, 0x0e, 0xdf, 0x6b, 0xa9, 0xef, 0xbd, 0xa2, 0xd4, 0xdf, 0x74,
+	0xec, 0xe5, 0xab, 0x1d, 0x3d, 0x81, 0xc5, 0xbc, 0x1a, 0xe7, 0xc5, 0x56, 0x67, 0xfe, 0xcd, 0xa4,
+	0xb1, 0x90, 0x53, 0xf3, 0x53, 0x93, 0x9b, 0x54, 0x3a, 0xa3, 0xd0, 0x57, 0xd4, 0x73, 0xa5, 0xea,
+	0x80, 0x71, 0x7e, 0x93, 0xca, 0x67, 0x89, 0x08, 0xad, 0x83, 0x35, 0x1e, 0x37, 0xb5, 0x44, 0x71,
+	0xa7, 0x19, 0x5f, 0x26, 0x12, 0xf9, 0x13, 0x32, 0xea, 0x61, 0xf4, 0x03, 0xb8, 0x89, 0x2f, 0x98,
+	0x3b, 0xa2, 0x9e, 0xe3, 0x7a, 0x1e, 0x91, 0x52, 0x1b, 0xa7, 0xab, 0xd5, 0xf1, 0xa9, 0x54, 0x9d,
+	0x96, 0xd1, 0xdd, 0x89, 0x21, 0xdb, 0x06, 0x11, 0x97, 0xf3, 0x97, 0x54, 0x2a, 0xf4, 0x08, 0x56,
+	0xa4, 0x7e, 0xf8, 0x4c, 0x9f, 0xdd, 0x36, 0xb3, 0xaf, 0x47, 0x88, 0x89, 0xc9, 0xdd, 0x3f, 0x94,
+	0x61, 0x69, 0x7c, 0x4b, 0xcb, 0xe4, 0xed, 0x15, 0x5d, 0xb4, 0xdb, 0xe9, 0x45, 0x7b, 0xe2, 0x09,
+	0x55, 0xf9, 0x0f, 0x9e, 0x50, 0xb3, 0xaf, 0x3c, 0x66, 0xe6, 0x72, 0xb9, 0xb7, 0x0b, 0xf3, 0x67,
+	0x4e, 0xee, 0x7c, 0xab, 0x5e, 0x2a, 0xf9, 0x5b, 0x67, 0x47, 0x99, 0x57, 0xd4, 0xdf, 0xaa, 0xb0,
+	0xb4, 0x77, 0xf0, 0x7c, 0xdf, 0xd0, 0x6f, 0xc6, 0x49, 0xf9, 0xea, 0x2d, 0xbd, 0xa6, 0x7a, 0xcb,
+	0xf9, 0xea, 0xcd, 0xb3, 0x42, 0x44, 0x6c, 0x19, 0x56, 0xb8, 0x64, 0x71, 0xdf, 0x81, 0x56, 0x72,
+	0x92, 0xa8, 0x8b, 0x80, 0x98, 0xad, 0x37, 0xec, 0x66, 0x2c, 0x3b, 0xbe, 0x08, 0x08, 0xba, 0x0f,
+	0x1f, 0x84, 0x4c, 0xea, 0x5d, 0x50, 0x45, 0xb0, 0x33, 0x14, 0x2e, 0x53, 0x91, 0xb5, 0x11, 0xc9,
+	0x5d, 0xcb, 0x8c, 0x3e, 0xd1, 0x83, 0xc6, 0xf2, 0xef, 0x80, 0xc5, 0xf8, 0x88, 0xea, 0x38, 0x51,
+	0xa6, 0x88, 0x78, 0xe1, 0xfa, 0x31, 0xe3, 0x2d, 0xc4, 0xf2, 0x5e, 0x2c, 0x46, 0x5b, 0xb0, 0xac,
+	0xb8, 0x4f, 0x84, 0xab, 0x22, 0x17, 0xfb, 0xce, 0x2f, 0xa9, 0x52, 0x44, 0x98, 0x1a, 0x6f, 0xdb,
+	0x4b, 0xe3, 0xc1, 0x03, 0xee, 0xfb, 0x3f, 0x32, 0x43, 0xe8, 0x87, 0x70, 0x53, 0x90, 0xb3, 0x90,
+	0x48, 0xe5, 0x28, 0xe1, 0x32, 0x39, 0xa2, 0x52, 0x52, 0xce, 0x92, 0x08, 0x35, 0xcc, 0xcc, 0x1b,
+	0x31, 0xe4, 0x38, 0x83, 0x88, 0xc9, 0x60, 0x15, 0x80, 0x85, 0x23, 0xed, 0x76, 0xa2, 0xa4, 0x29,
+	0xbd, 0xb6, 0x5d, 0x67, 0xe1, 0xe8, 0xf0, 0x88, 0x28, 0x89, 0x3e, 0xcf, 0x51, 0xb5, 0x34, 0x35,
+	0xd7, 0xdc, 0xba, 0x91, 0x0f, 0x78, 0xe6, 0xb4, 0xca, 0xb2, 0xb8, 0x9c, 0x4e, 0x98, 0xad, 0x2b,
+	0x21, 0xcc, 0x76, 0x14, 0xe6, 0x69, 0x84, 0x39, 0x6f, 0x86, 0x5e, 0x4d, 0x98, 0x0b, 0x57, 0x40,
+	0x98, 0xd6, 0x95, 0x11, 0xe6, 0xe2, 0xdb, 0x13, 0x66, 0xf7, 0x4f, 0xb3, 0xd0, 0x3c, 0x4e, 0x2f,
+	0x27, 0x08, 0xc1, 0x2c, 0x73, 0x47, 0x49, 0xd1, 0x98, 0x36, 0xea, 0x40, 0xed, 0x05, 0x11, 0x3a,
+	0xd0, 0x09, 0x77, 0xc4, 0x5d, 0x9d, 0xe9, 0xc9, 0x5d, 0xc7, 0x64, 0x7a, 0x74, 0x18, 0x36, 0x63,
+	0x99, 0xc9, 0xf4, 0x2e, 0xb4, 0x75, 0x52, 0x98, 0x4b, 0x0b, 0x17, 0x4a, 0x26, 0x77, 0x01, 0x16,
+	0x8e, 0xe2, 0x53, 0x59, 0xa2, 0xa7, 0x60, 0xd1, 0xf8, 0x3e, 0x94, 0x90, 0xa4, 0x29, 0x9a, 0x89,
+	0x27, 0x7f, 0xe1, 0xd6, 0x64, 0x2f, 0xd0, 0xc2, 0x35, 0x6a, 0x17, 0x5a, 0xa1, 0x74, 0xd2, 0x0f,
+	0x07, 0x55, 0xa3, 0xe5, 0xce, 0x2b, 0x3e, 0x1c, 0xa4, 0x94, 0x61, 0x37, 0x43, 0x99, 0x7e, 0xcd,
+	0xd8, 0x85, 0x16, 0xce, 0x6a, 0xa9, 0x5d, 0x5a, 0x0b, 0xce, 0x68, 0x19, 0xc2, 0x5a, 0x18, 0x48,
+	0x25, 0x88, 0x9b, 0x6e, 0xdf, 0x71, 0x13, 0x70, 0x74, 0x0a, 0xd4, 0xcd, 0x15, 0xb3, 0xf0, 0xb8,
+	0x99, 0xb8, 0xad, 0xd8, 0xab, 0x89, 0xa2, 0xe2, 0x90, 0x39, 0x68, 0xbe, 0x82, 0x2e, 0xe6, 0xe7,
+	0xec, 0x0d, 0x4b, 0x35, 0x2e, 0xb7, 0xd4, 0x87, 0xa9, 0xaa, 0x69, 0x8b, 0x75, 0xff, 0x5e, 0x81,
+	0x85, 0xbd, 0x80, 0xb3, 0xff, 0xa1, 0xa4, 0xd1, 0x06, 0xb9, 0xde, 0x57, 0xee, 0x30, 0x36, 0xa8,
+	0x1a, 0x1b, 0x14, 0xc9, 0x8c, 0x41, 0x18, 0x6e, 0x8d, 0x63, 0x19, 0x7d, 0x07, 0x29, 0x78, 0xb7,
+	0x66, 0xbc, 0x5b, 0x48, 0x91, 0x29, 0x67, 0x93, 0xbd, 0x92, 0xe8, 0xc9, 0x0f, 0x98, 0x40, 0x9e,
+	0xc2, 0xed, 0x4c, 0x20, 0xa7, 0xae, 0x53, 0xbf, 0xec, 0x3a, 0xab, 0xa9, 0xa6, 0xc9, 0x95, 0xba,
+	0xff, 0x9a, 0x85, 0xa5, 0x4c, 0x04, 0x13, 0x17, 0xbd, 0x65, 0x24, 0xef, 0xc1, 0xb2, 0x0c, 0xfb,
+	0xd2, 0x13, 0xb4, 0x4f, 0x84, 0x43, 0x31, 0x61, 0x8a, 0x0e, 0x68, 0xfc, 0xbd, 0xae, 0x61, 0x5f,
+	0x4b, 0x07, 0x7b, 0xe3, 0xb1, 0x89, 0xf0, 0xcf, 0x5e, 0x22, 0xfc, 0x73, 0x97, 0x0b, 0x7f, 0xf5,
+	0x4a, 0x38, 0xa3, 0x76, 0x25, 0x9c, 0x51, 0x7f, 0x6f, 0x9c, 0xd1, 0xf8, 0xf6, 0x38, 0x03, 0xae,
+	0x86, 0x33, 0x7e, 0x3b, 0x0b, 0xd7, 0x0b, 0x9c, 0xf1, 0x5f, 0x98, 0x71, 0xd9, 0x3b, 0x74, 0x35,
+	0x7f, 0x87, 0x9e, 0x96, 0x8c, 0xb5, 0x2b, 0xe1, 0xa2, 0xfa, 0x3b, 0x70, 0x51, 0xe3, 0x5b, 0xe2,
+	0x22, 0xb8, 0x1a, 0x2e, 0xfa, 0xab, 0xf9, 0x76, 0x2c, 0x79, 0x28, 0xbc, 0x34, 0x2d, 0x96, 0x60,
+	0x4e, 0x05, 0xc9, 0xd3, 0xbc, 0x6d, 0xcf, 0xaa, 0xa0, 0x87, 0x27, 0x02, 0x59, 0x9e, 0x0c, 0xe4,
+	0x3b, 0x25, 0x48, 0x36, 0xb2, 0xb3, 0xf9, 0xc8, 0xde, 0x86, 0x66, 0xfa, 0x22, 0xd0, 0x69, 0x51,
+	0x59, 0x6f, 0xdb, 0x30, 0x7e, 0x12, 0xc8, 0x8d, 0xcf, 0xa1, 0x31, 0x7e, 0x71, 0xa1, 0x16, 0xd4,
+	0x7f, 0x72, 0x70, 0x74, 0x6c, 0xef, 0x6d, 0x3f, 0xb3, 0x66, 0xd0, 0x3c, 0xc0, 0xee, 0xf3, 0x93,
+	0xfd, 0xb8, 0x5f, 0x42, 0x8b, 0xd0, 0xde, 0xe9, 0xed, 0xf6, 0xec, 0xbd, 0xc7, 0xc7, 0xbd, 0xe7,
+	0xfb, 0xdb, 0x5f, 0x5a, 0xe5, 0x8d, 0x47, 0x60, 0x15, 0xef, 0xab, 0xa8, 0x06, 0x95, 0x13, 0xdb,
+	0xb6, 0x66, 0x10, 0x82, 0xf9, 0x23, 0x25, 0xa8, 0xa7, 0x0e, 0xe2, 0xab, 0xa9, 0x55, 0x42, 0x00,
+	0xd5, 0xa7, 0x17, 0x7d, 0x41, 0xb1, 0x55, 0xde, 0x60, 0xd0, 0xca, 0x3e, 0xcb, 0xd0, 0x32, 0x2c,
+	0x66, 0xfb, 0xce, 0x3e, 0x67, 0xc4, 0x9a, 0x41, 0x4b, 0xb0, 0x90, 0x17, 0x6f, 0x5b, 0x25, 0x74,
+	0x13, 0xae, 0xe7, 0x84, 0x3b, 0x44, 0xaa, 0xbd, 0xc1, 0x80, 0x0b, 0x65, 0x95, 0x27, 0x14, 0x6d,
+	0x87, 0x8a, 0x5b, 0x95, 0x8d, 0x2f, 0xc6, 0x5f, 0xa9, 0x63, 0x4b, 0x5b, 0x50, 0x4f, 0xbe, 0x19,
+	0x5b, 0x33, 0xa8, 0x0d, 0x8d, 0x93, 0x71, 0xb7, 0xa4, 0xb7, 0x61, 0x13, 0x6c, 0x95, 0x51, 0x1d,
+	0x66, 0x4f, 0x74, 0xab, 0xb2, 0xf1, 0xbb, 0x12, 0xac, 0xbe, 0xee, 0x2f, 0x13, 0xfa, 0x18, 0xee,
+	0xbc, 0x6e, 0x3c, 0xd9, 0xd1, 0x3a, 0xfc, 0xdf, 0x6b, 0x61, 0xdb, 0x52, 0x86, 0x82, 0x60, 0xab,
+	0x84, 0xbe, 0x0b, 0x9f, 0xbc, 0x16, 0x99, 0xdd, 0xf6, 0xce, 0xcf, 0x60, 0x8d, 0x8b, 0xe1, 0x5d,
+	0x1e, 0x10, 0xe6, 0x71, 0x81, 0xef, 0x46, 0x3f, 0x3d, 0x73, 0xe9, 0xfd, 0xf3, 0xfb, 0x43, 0xaa,
+	0x4e, 0xc3, 0xfe, 0x5d, 0x8f, 0x8f, 0x36, 0x13, 0xe0, 0x66, 0x04, 0xfc, 0x5e, 0xfc, 0x77, 0xf4,
+	0xc5, 0x83, 0xcd, 0x21, 0xcf, 0xfd, 0x23, 0xed, 0x57, 0xcd, 0xd0, 0xbd, 0x7f, 0x07, 0x00, 0x00,
+	0xff, 0xff, 0xc9, 0xfb, 0x08, 0x8b, 0x48, 0x1d, 0x00, 0x00,
 }
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/voltha/adapter.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/voltha/adapter.pb.go
similarity index 66%
rename from vendor/github.com/opencord/voltha-protos/v4/go/voltha/adapter.pb.go
rename to vendor/github.com/opencord/voltha-protos/v5/go/voltha/adapter.pb.go
index bb868a4..85aeb0a 100644
--- a/vendor/github.com/opencord/voltha-protos/v4/go/voltha/adapter.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/voltha/adapter.pb.go
@@ -7,8 +7,7 @@
 	fmt "fmt"
 	proto "github.com/golang/protobuf/proto"
 	any "github.com/golang/protobuf/ptypes/any"
-	timestamp "github.com/golang/protobuf/ptypes/timestamp"
-	_ "github.com/opencord/voltha-protos/v4/go/common"
+	_ "github.com/opencord/voltha-protos/v5/go/common"
 	math "math"
 )
 
@@ -76,10 +75,10 @@
 	AdditionalDescription *any.Any `protobuf:"bytes,64,opt,name=additional_description,json=additionalDescription,proto3" json:"additional_description,omitempty"`
 	LogicalDeviceIds      []string `protobuf:"bytes,4,rep,name=logical_device_ids,json=logicalDeviceIds,proto3" json:"logical_device_ids,omitempty"`
 	// timestamp when the adapter last sent a message to the core
-	LastCommunication *timestamp.Timestamp `protobuf:"bytes,5,opt,name=last_communication,json=lastCommunication,proto3" json:"last_communication,omitempty"`
-	CurrentReplica    int32                `protobuf:"varint,6,opt,name=currentReplica,proto3" json:"currentReplica,omitempty"`
-	TotalReplicas     int32                `protobuf:"varint,7,opt,name=totalReplicas,proto3" json:"totalReplicas,omitempty"`
-	Endpoint          string               `protobuf:"bytes,8,opt,name=endpoint,proto3" json:"endpoint,omitempty"`
+	LastCommunication int64  `protobuf:"varint,5,opt,name=last_communication,json=lastCommunication,proto3" json:"last_communication,omitempty"`
+	CurrentReplica    int32  `protobuf:"varint,6,opt,name=currentReplica,proto3" json:"currentReplica,omitempty"`
+	TotalReplicas     int32  `protobuf:"varint,7,opt,name=totalReplicas,proto3" json:"totalReplicas,omitempty"`
+	Endpoint          string `protobuf:"bytes,8,opt,name=endpoint,proto3" json:"endpoint,omitempty"`
 	// all replicas of the same adapter will have the same type
 	// it is used to associate a device to an adapter
 	Type                 string   `protobuf:"bytes,9,opt,name=type,proto3" json:"type,omitempty"`
@@ -155,11 +154,11 @@
 	return nil
 }
 
-func (m *Adapter) GetLastCommunication() *timestamp.Timestamp {
+func (m *Adapter) GetLastCommunication() int64 {
 	if m != nil {
 		return m.LastCommunication
 	}
-	return nil
+	return 0
 }
 
 func (m *Adapter) GetCurrentReplica() int32 {
@@ -238,34 +237,33 @@
 func init() { proto.RegisterFile("voltha_protos/adapter.proto", fileDescriptor_7e998ce153307274) }
 
 var fileDescriptor_7e998ce153307274 = []byte{
-	// 455 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x52, 0xdd, 0x6e, 0xd3, 0x30,
-	0x14, 0x56, 0xda, 0xf5, 0xcf, 0x53, 0xa1, 0x33, 0x14, 0x99, 0xa2, 0x69, 0x51, 0x05, 0x28, 0x17,
-	0x2c, 0x11, 0x83, 0x07, 0xa0, 0xdd, 0x6e, 0x76, 0x6b, 0x4d, 0x5c, 0x70, 0x53, 0xb9, 0xb6, 0x97,
-	0x59, 0x4a, 0x7c, 0xa2, 0xd8, 0x8d, 0xd4, 0x87, 0xe4, 0x05, 0x78, 0x02, 0x9e, 0x80, 0x6b, 0x54,
-	0xdb, 0xa1, 0x3f, 0x48, 0xbb, 0x4a, 0xce, 0xf7, 0x73, 0x3e, 0x9f, 0x63, 0xa3, 0x77, 0x0d, 0x14,
-	0xf6, 0x89, 0xad, 0xaa, 0x1a, 0x2c, 0x98, 0x8c, 0x09, 0x56, 0x59, 0x59, 0xa7, 0xae, 0xc4, 0x7d,
-	0x4f, 0xce, 0xde, 0xe6, 0x00, 0x79, 0x21, 0x33, 0x87, 0xae, 0x37, 0x8f, 0x19, 0xd3, 0x5b, 0x2f,
-	0x99, 0x91, 0x63, 0x7f, 0x29, 0x2d, 0x0b, 0xcc, 0xd5, 0xa9, 0xc9, 0xaa, 0x52, 0x1a, 0xcb, 0xca,
-	0xca, 0x0b, 0xe6, 0x14, 0x8d, 0x17, 0x3e, 0xee, 0x16, 0xf4, 0xa3, 0xca, 0xf1, 0x02, 0x5d, 0x30,
-	0x21, 0x94, 0x55, 0xa0, 0x59, 0xb1, 0xe2, 0x0e, 0x24, 0xdf, 0xe2, 0x28, 0x39, 0xbf, 0x79, 0x9d,
-	0xfa, 0x6e, 0x69, 0xdb, 0x2d, 0x5d, 0xe8, 0x2d, 0x9d, 0xec, 0xe5, 0xbe, 0xc5, 0xfc, 0x57, 0x17,
-	0x0d, 0x42, 0x53, 0x3c, 0x45, 0x1d, 0x25, 0x48, 0x14, 0x47, 0xc9, 0x68, 0xd9, 0xfb, 0xfd, 0xe7,
-	0xe7, 0x65, 0x44, 0x3b, 0x4a, 0xe0, 0x4b, 0xd4, 0x6f, 0xa4, 0x16, 0x50, 0x93, 0xce, 0x21, 0x15,
-	0x40, 0x7c, 0x85, 0x06, 0x8d, 0xac, 0x8d, 0x02, 0x4d, 0xba, 0x87, 0x7c, 0x8b, 0xe2, 0x6b, 0xd4,
-	0x0f, 0x47, 0x9b, 0xb8, 0xa3, 0x4d, 0x53, 0xbf, 0x82, 0xf4, 0x68, 0x18, 0x1a, 0x44, 0x98, 0xa2,
-	0x37, 0x07, 0x43, 0x09, 0x69, 0x78, 0xad, 0xaa, 0x5d, 0xf5, 0xdc, 0x64, 0x6d, 0xe8, 0x74, 0x6f,
-	0xbd, 0xdb, 0x3b, 0xf1, 0x27, 0x84, 0x0b, 0xc8, 0x15, 0x77, 0x0d, 0x1b, 0xc5, 0xe5, 0x4a, 0x09,
-	0x43, 0xce, 0xe2, 0x6e, 0x32, 0xa2, 0x93, 0xc0, 0xdc, 0x39, 0xe2, 0x5e, 0x18, 0x7c, 0x8f, 0x70,
-	0xc1, 0x8c, 0x5d, 0x71, 0x28, 0xcb, 0x8d, 0x56, 0x9c, 0xb9, 0xf4, 0x9e, 0x4b, 0x9f, 0xfd, 0x97,
-	0xfe, 0xd0, 0xde, 0x12, 0xbd, 0xd8, 0xb9, 0x6e, 0x0f, 0x4d, 0xf8, 0x23, 0x7a, 0xc1, 0x37, 0x75,
-	0x2d, 0xb5, 0xa5, 0xb2, 0x2a, 0x14, 0x67, 0xa4, 0x1f, 0x47, 0x49, 0x8f, 0x9e, 0xa0, 0xf8, 0x3d,
-	0x1a, 0x5b, 0xb0, 0xac, 0x08, 0xb5, 0x21, 0x03, 0x27, 0x3b, 0x06, 0xf1, 0x0c, 0x0d, 0xa5, 0x16,
-	0x15, 0x28, 0x6d, 0xc9, 0x70, 0xb7, 0x6b, 0xfa, 0xaf, 0xc6, 0x18, 0x9d, 0xd9, 0x6d, 0x25, 0xc9,
-	0xc8, 0xe1, 0xee, 0x7f, 0xfe, 0x19, 0x0d, 0xc3, 0x8e, 0x0d, 0xfe, 0x80, 0x7a, 0xca, 0xca, 0xd2,
-	0x90, 0x28, 0xee, 0x26, 0xe7, 0x37, 0x2f, 0x4f, 0x2e, 0x81, 0x7a, 0x76, 0xf9, 0x80, 0x5e, 0x41,
-	0x9d, 0xa7, 0x50, 0x49, 0xcd, 0xa1, 0x16, 0x41, 0xb5, 0x1c, 0x7f, 0x77, 0xdf, 0x20, 0xfe, 0x91,
-	0xe6, 0xca, 0x3e, 0x6d, 0xd6, 0x29, 0x87, 0x32, 0x6b, 0xa5, 0x99, 0x97, 0x5e, 0x87, 0x87, 0xdd,
-	0x7c, 0xcd, 0x72, 0x08, 0xd8, 0xba, 0xef, 0xc0, 0x2f, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x07,
-	0xa4, 0x0d, 0x2b, 0x3d, 0x03, 0x00, 0x00,
+	// 439 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x8e, 0xd3, 0x30,
+	0x14, 0x45, 0x95, 0x66, 0x9a, 0xb6, 0x6f, 0x54, 0xe8, 0x18, 0x8a, 0x4c, 0xd1, 0x88, 0xa8, 0x02,
+	0x94, 0x05, 0x4d, 0xc4, 0x20, 0xf6, 0xb4, 0x33, 0x1b, 0xb6, 0x16, 0x62, 0xc1, 0xa6, 0x72, 0x6d,
+	0x4f, 0xc6, 0x52, 0xe2, 0x17, 0x25, 0x6e, 0xa4, 0xfe, 0x1c, 0x7f, 0xc0, 0x7f, 0xf0, 0x05, 0xac,
+	0x51, 0x1d, 0x97, 0xb6, 0xb3, 0x98, 0x55, 0x7b, 0xef, 0x3d, 0xbe, 0xf6, 0x7b, 0x0a, 0xbc, 0x69,
+	0xb1, 0xb0, 0x0f, 0x7c, 0x5d, 0xd5, 0x68, 0xb1, 0xc9, 0xb8, 0xe4, 0x95, 0x55, 0x75, 0xea, 0x24,
+	0x89, 0xba, 0x70, 0x46, 0xcf, 0xa1, 0x52, 0x59, 0xde, 0x11, 0xb3, 0xd7, 0x39, 0x62, 0x5e, 0xa8,
+	0xcc, 0xa9, 0xcd, 0xf6, 0x3e, 0xe3, 0x66, 0xd7, 0x45, 0x73, 0x06, 0xe3, 0x65, 0xd7, 0x76, 0x8b,
+	0xe6, 0x5e, 0xe7, 0x64, 0x09, 0x57, 0x5c, 0x4a, 0x6d, 0x35, 0x1a, 0x5e, 0xac, 0x85, 0x33, 0xe9,
+	0xd7, 0x38, 0x48, 0x2e, 0x6f, 0x5e, 0xa6, 0x5d, 0x4f, 0x7a, 0xe8, 0x49, 0x97, 0x66, 0xc7, 0x26,
+	0x47, 0xbc, 0xab, 0x98, 0xff, 0x0a, 0x61, 0xe0, 0x4b, 0xc9, 0x14, 0x7a, 0x5a, 0xd2, 0x20, 0x0e,
+	0x92, 0xd1, 0xaa, 0xff, 0xe7, 0xef, 0xef, 0xeb, 0x80, 0xf5, 0xb4, 0x24, 0xd7, 0x10, 0xb5, 0xca,
+	0x48, 0xac, 0x69, 0xef, 0x34, 0xf2, 0x26, 0x79, 0x0b, 0x83, 0x56, 0xd5, 0x8d, 0x46, 0x43, 0xc3,
+	0xd3, 0xfc, 0xe0, 0x92, 0x05, 0x44, 0xfe, 0x69, 0x13, 0xf7, 0xb4, 0x69, 0xda, 0x0d, 0x9f, 0x9e,
+	0x0d, 0xc3, 0x3c, 0x44, 0x18, 0xbc, 0x3a, 0x19, 0x4a, 0xaa, 0x46, 0xd4, 0xba, 0xda, 0xab, 0xa7,
+	0x26, 0x3b, 0x5c, 0x3a, 0x3d, 0x1e, 0xbd, 0x3b, 0x9e, 0x24, 0x1f, 0x81, 0x14, 0x98, 0x6b, 0xe1,
+	0x0a, 0x5b, 0x2d, 0xd4, 0x5a, 0xcb, 0x86, 0x5e, 0xc4, 0x61, 0x32, 0x62, 0x13, 0x9f, 0xdc, 0xb9,
+	0xe0, 0x9b, 0x6c, 0xc8, 0x02, 0x48, 0xc1, 0x1b, 0xbb, 0x16, 0x58, 0x96, 0x5b, 0xa3, 0x05, 0x77,
+	0xb7, 0xf7, 0xe3, 0x20, 0x09, 0xd9, 0xd5, 0x3e, 0xb9, 0x3d, 0x0d, 0xc8, 0x07, 0x78, 0x26, 0xb6,
+	0x75, 0xad, 0x8c, 0x65, 0xaa, 0x2a, 0xb4, 0xe0, 0x34, 0x8a, 0x83, 0xa4, 0xcf, 0x1e, 0xb9, 0xe4,
+	0x1d, 0x8c, 0x2d, 0x5a, 0x5e, 0x78, 0xdd, 0xd0, 0x81, 0xc3, 0xce, 0x4d, 0x32, 0x83, 0xa1, 0x32,
+	0xb2, 0x42, 0x6d, 0x2c, 0x1d, 0xee, 0xf7, 0xc9, 0xfe, 0x6b, 0x42, 0xe0, 0xc2, 0xee, 0x2a, 0x45,
+	0x47, 0xce, 0x77, 0xff, 0xe7, 0x9f, 0x60, 0xe8, 0xf7, 0xd8, 0x90, 0xf7, 0xd0, 0xd7, 0x56, 0x95,
+	0x0d, 0x0d, 0xe2, 0x30, 0xb9, 0xbc, 0x79, 0xfe, 0x68, 0xd1, 0xac, 0x4b, 0x57, 0xdf, 0xe1, 0x05,
+	0xd6, 0x79, 0x8a, 0x95, 0x32, 0x02, 0x6b, 0xe9, 0xa9, 0xd5, 0xf8, 0x87, 0xfb, 0xf5, 0xf0, 0xcf,
+	0x34, 0xd7, 0xf6, 0x61, 0xbb, 0x49, 0x05, 0x96, 0xd9, 0x01, 0xcd, 0x3a, 0x74, 0xe1, 0x3f, 0xdb,
+	0xf6, 0x4b, 0x96, 0xa3, 0xf7, 0x36, 0x91, 0x33, 0x3f, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0xe1,
+	0x88, 0x17, 0x78, 0x00, 0x03, 0x00, 0x00,
 }
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/voltha/device.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/voltha/device.pb.go
similarity index 78%
rename from vendor/github.com/opencord/voltha-protos/v4/go/voltha/device.pb.go
rename to vendor/github.com/opencord/voltha-protos/v5/go/voltha/device.pb.go
index 957af8f..7da41c9 100644
--- a/vendor/github.com/opencord/voltha-protos/v4/go/voltha/device.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/voltha/device.pb.go
@@ -7,8 +7,8 @@
 	fmt "fmt"
 	proto "github.com/golang/protobuf/proto"
 	any "github.com/golang/protobuf/ptypes/any"
-	common "github.com/opencord/voltha-protos/v4/go/common"
-	openflow_13 "github.com/opencord/voltha-protos/v4/go/openflow_13"
+	common "github.com/opencord/voltha-protos/v5/go/common"
+	openflow_13 "github.com/opencord/voltha-protos/v5/go/openflow_13"
 	math "math"
 )
 
@@ -386,14 +386,17 @@
 	VendorId  string   `protobuf:"bytes,5,opt,name=vendor_id,json=vendorId,proto3" json:"vendor_id,omitempty"`
 	VendorIds []string `protobuf:"bytes,6,rep,name=vendor_ids,json=vendorIds,proto3" json:"vendor_ids,omitempty"`
 	// Name of the adapter that handles device type
+	// Deprecated and replaced by adapterType
 	Adapter string `protobuf:"bytes,2,opt,name=adapter,proto3" json:"adapter,omitempty"`
 	// Capabilities
-	AcceptsBulkFlowUpdate           bool     `protobuf:"varint,3,opt,name=accepts_bulk_flow_update,json=acceptsBulkFlowUpdate,proto3" json:"accepts_bulk_flow_update,omitempty"`
-	AcceptsAddRemoveFlowUpdates     bool     `protobuf:"varint,4,opt,name=accepts_add_remove_flow_updates,json=acceptsAddRemoveFlowUpdates,proto3" json:"accepts_add_remove_flow_updates,omitempty"`
-	AcceptsDirectLogicalFlowsUpdate bool     `protobuf:"varint,7,opt,name=accepts_direct_logical_flows_update,json=acceptsDirectLogicalFlowsUpdate,proto3" json:"accepts_direct_logical_flows_update,omitempty"`
-	XXX_NoUnkeyedLiteral            struct{} `json:"-"`
-	XXX_unrecognized                []byte   `json:"-"`
-	XXX_sizecache                   int32    `json:"-"`
+	AcceptsBulkFlowUpdate           bool `protobuf:"varint,3,opt,name=accepts_bulk_flow_update,json=acceptsBulkFlowUpdate,proto3" json:"accepts_bulk_flow_update,omitempty"`
+	AcceptsAddRemoveFlowUpdates     bool `protobuf:"varint,4,opt,name=accepts_add_remove_flow_updates,json=acceptsAddRemoveFlowUpdates,proto3" json:"accepts_add_remove_flow_updates,omitempty"`
+	AcceptsDirectLogicalFlowsUpdate bool `protobuf:"varint,7,opt,name=accepts_direct_logical_flows_update,json=acceptsDirectLogicalFlowsUpdate,proto3" json:"accepts_direct_logical_flows_update,omitempty"`
+	// Type of adapter that can handle this device type
+	AdapterType          string   `protobuf:"bytes,8,opt,name=adapter_type,json=adapterType,proto3" json:"adapter_type,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
 func (m *DeviceType) Reset()         { *m = DeviceType{} }
@@ -470,6 +473,13 @@
 	return false
 }
 
+func (m *DeviceType) GetAdapterType() string {
+	if m != nil {
+		return m.AdapterType
+	}
+	return ""
+}
+
 // A plurality of device types
 type DeviceTypes struct {
 	Items                []*DeviceType `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
@@ -1588,7 +1598,10 @@
 	SerialNumber string  `protobuf:"bytes,10,opt,name=serial_number,json=serialNumber,proto3" json:"serial_number,omitempty"`
 	VendorId     string  `protobuf:"bytes,24,opt,name=vendor_id,json=vendorId,proto3" json:"vendor_id,omitempty"`
 	// Adapter that takes care of device
+	// Deprecated and replaced by adapter_endpoint
 	Adapter string `protobuf:"bytes,11,opt,name=adapter,proto3" json:"adapter,omitempty"`
+	// Indicates how to reach the adapter instance that manages this device
+	AdapterEndpoint string `protobuf:"bytes,25,opt,name=adapter_endpoint,json=adapterEndpoint,proto3" json:"adapter_endpoint,omitempty"`
 	// Device contact on vlan (if 0, no vlan)
 	Vlan uint32 `protobuf:"varint,12,opt,name=vlan,proto3" json:"vlan,omitempty"`
 	// Device contact MAC address (format: "xx:xx:xx:xx:xx:xx")
@@ -1731,6 +1744,13 @@
 	return ""
 }
 
+func (m *Device) GetAdapterEndpoint() string {
+	if m != nil {
+		return m.AdapterEndpoint
+	}
+	return ""
+}
+
 func (m *Device) GetVlan() uint32 {
 	if m != nil {
 		return m.Vlan
@@ -1875,6 +1895,7 @@
 	ChannelTermination   string   `protobuf:"bytes,5,opt,name=channel_termination,json=channelTermination,proto3" json:"channel_termination,omitempty"`
 	OnuId                uint32   `protobuf:"varint,6,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
 	OnuSessionId         uint32   `protobuf:"varint,7,opt,name=onu_session_id,json=onuSessionId,proto3" json:"onu_session_id,omitempty"`
+	AdapterEndpoint      string   `protobuf:"bytes,8,opt,name=adapter_endpoint,json=adapterEndpoint,proto3" json:"adapter_endpoint,omitempty"`
 	XXX_NoUnkeyedLiteral struct{} `json:"-"`
 	XXX_unrecognized     []byte   `json:"-"`
 	XXX_sizecache        int32    `json:"-"`
@@ -1954,6 +1975,13 @@
 	return 0
 }
 
+func (m *Device_ProxyAddress) GetAdapterEndpoint() string {
+	if m != nil {
+		return m.AdapterEndpoint
+	}
+	return ""
+}
+
 type Devices struct {
 	Items                []*Device `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
 	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
@@ -2140,174 +2168,177 @@
 func init() { proto.RegisterFile("voltha_protos/device.proto", fileDescriptor_200940f73d155856) }
 
 var fileDescriptor_200940f73d155856 = []byte{
-	// 2701 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0xcb, 0x72, 0x1b, 0xc7,
-	0xd5, 0x16, 0x40, 0x62, 0x80, 0x39, 0xb8, 0x70, 0xd4, 0x22, 0xa5, 0x11, 0x69, 0xfe, 0xd2, 0x0f,
-	0xd9, 0xbf, 0x69, 0xe9, 0x17, 0xa9, 0x48, 0x2e, 0xdb, 0x59, 0xb8, 0x4a, 0x20, 0x30, 0x94, 0xa6,
-	0x42, 0x0d, 0x98, 0x06, 0x40, 0xc7, 0xd9, 0x4c, 0x0d, 0x31, 0x4d, 0x72, 0x4a, 0x83, 0x19, 0xa8,
-	0x67, 0x40, 0x52, 0xde, 0xa5, 0x5c, 0xc9, 0x2a, 0x55, 0x59, 0x64, 0x95, 0x3c, 0x41, 0x96, 0xd9,
-	0x79, 0x99, 0xbc, 0x80, 0xab, 0xf2, 0x0c, 0xc9, 0x26, 0x0f, 0x90, 0xf2, 0x3a, 0xd5, 0xb7, 0xb9,
-	0x90, 0x92, 0xec, 0x54, 0xa5, 0x52, 0xd9, 0x90, 0xd3, 0xdf, 0xb9, 0x74, 0xf7, 0xe9, 0x3e, 0x5f,
-	0x9f, 0x6e, 0xc0, 0xfa, 0x59, 0x1c, 0xa6, 0xa7, 0x9e, 0x3b, 0xa7, 0x71, 0x1a, 0x27, 0x3b, 0x3e,
-	0x39, 0x0b, 0xa6, 0x64, 0x9b, 0xb7, 0x90, 0x26, 0x64, 0xeb, 0xb7, 0x4f, 0xe2, 0xf8, 0x24, 0x24,
-	0x3b, 0x1c, 0x3d, 0x5a, 0x1c, 0xef, 0x78, 0xd1, 0x6b, 0xa1, 0xb2, 0x7e, 0xc9, 0x7c, 0x1a, 0xcf,
-	0x66, 0x71, 0x24, 0x65, 0x66, 0x59, 0x36, 0x23, 0xa9, 0x27, 0x25, 0x77, 0xca, 0x92, 0x78, 0x4e,
-	0xa2, 0xe3, 0x30, 0x3e, 0x77, 0x7f, 0xf4, 0x44, 0x28, 0x74, 0xff, 0x54, 0x05, 0x18, 0xf0, 0xa1,
-	0x8c, 0x5f, 0xcf, 0x09, 0xea, 0x40, 0x35, 0xf0, 0xcd, 0xca, 0xdd, 0xca, 0x96, 0x8e, 0xab, 0x81,
-	0x8f, 0x36, 0x40, 0x3f, 0x23, 0x91, 0x1f, 0x53, 0x37, 0xf0, 0xcd, 0x1a, 0x87, 0x1b, 0x02, 0xb0,
-	0x7d, 0xb4, 0x09, 0x90, 0x09, 0x13, 0x53, 0xbb, 0xbb, 0xb4, 0xa5, 0x63, 0x5d, 0x49, 0x13, 0x64,
-	0x42, 0xdd, 0xf3, 0xbd, 0x79, 0x4a, 0xa8, 0x59, 0xe5, 0x96, 0xaa, 0x89, 0x3e, 0x05, 0xd3, 0x9b,
-	0x4e, 0xc9, 0x3c, 0x4d, 0xdc, 0xa3, 0x45, 0xf8, 0xd2, 0xe5, 0x43, 0x5a, 0xcc, 0x7d, 0x2f, 0x25,
-	0xe6, 0xd2, 0xdd, 0xca, 0x56, 0x03, 0xaf, 0x49, 0xf9, 0xee, 0x22, 0x7c, 0xb9, 0x17, 0xc6, 0xe7,
-	0x13, 0x2e, 0x44, 0x03, 0xb8, 0xa3, 0x0c, 0x3d, 0xdf, 0x77, 0x29, 0x99, 0xc5, 0x67, 0xa4, 0x68,
-	0x9e, 0x98, 0xcb, 0xdc, 0x7e, 0x43, 0xaa, 0xf5, 0x7c, 0x1f, 0x73, 0xa5, 0xdc, 0x49, 0x82, 0xf6,
-	0xe1, 0x9e, 0xf2, 0xe2, 0x07, 0x94, 0x4c, 0x53, 0x37, 0x8c, 0x4f, 0x82, 0xa9, 0x17, 0x72, 0x4f,
-	0x89, 0x1a, 0x49, 0x9d, 0x7b, 0x52, 0x1d, 0x0e, 0xb8, 0xe6, 0xbe, 0x50, 0x64, 0xde, 0x12, 0xe1,
-	0xae, 0xfb, 0x29, 0x34, 0xf3, 0x00, 0x26, 0x68, 0x0b, 0x6a, 0x41, 0x4a, 0x66, 0x89, 0x59, 0xb9,
-	0xbb, 0xb4, 0xd5, 0x7c, 0x8c, 0xb6, 0xc5, 0x0a, 0x6c, 0xe7, 0x3a, 0x58, 0x28, 0x74, 0xff, 0x5c,
-	0x81, 0xc6, 0xc1, 0xac, 0x1f, 0x47, 0xc7, 0xc1, 0x09, 0x42, 0xb0, 0x1c, 0x79, 0x33, 0x22, 0x43,
-	0xcf, 0xbf, 0xd1, 0x03, 0x58, 0x4e, 0x5f, 0xcf, 0x09, 0x8f, 0x5e, 0xe7, 0xf1, 0x2d, 0xe5, 0x49,
-	0xd9, 0x6c, 0x1f, 0xcc, 0xb8, 0x3b, 0xae, 0xc4, 0xa2, 0x4d, 0x22, 0xef, 0x28, 0x24, 0xbe, 0x0c,
-	0xa1, 0x6a, 0xa2, 0x3b, 0xd0, 0x4c, 0xbc, 0xd9, 0x3c, 0x24, 0xee, 0x31, 0x25, 0xaf, 0x78, 0x80,
-	0xda, 0x18, 0x04, 0xb4, 0x47, 0xc9, 0xab, 0xee, 0x67, 0xa0, 0x09, 0x57, 0xa8, 0x09, 0xf5, 0xfe,
-	0x70, 0xe2, 0x8c, 0x2d, 0x6c, 0x5c, 0x43, 0x3a, 0xd4, 0x9e, 0xf5, 0x26, 0xcf, 0x2c, 0xa3, 0xc2,
-	0x3e, 0x47, 0xe3, 0xde, 0xd8, 0x32, 0xaa, 0x42, 0xc5, 0x19, 0x5b, 0x3f, 0x1b, 0x1b, 0x4b, 0xdd,
-	0xdf, 0x56, 0xa0, 0x7d, 0x30, 0x7b, 0x46, 0xe3, 0xc5, 0x5c, 0xce, 0x63, 0x13, 0xe0, 0x84, 0x35,
-	0xdd, 0xc2, 0x6c, 0x74, 0x8e, 0x38, 0x6c, 0x4a, 0x99, 0x98, 0x0f, 0xa5, 0xca, 0x87, 0x22, 0xc4,
-	0x6c, 0x24, 0xef, 0x98, 0xc4, 0x7d, 0xa8, 0xcf, 0x48, 0x4a, 0x83, 0x29, 0x5b, 0x61, 0x16, 0x58,
-	0xe3, 0x72, 0x38, 0xb0, 0x52, 0xe8, 0xfe, 0xa2, 0x0a, 0xba, 0x42, 0x93, 0x2b, 0x5b, 0xfa, 0x7f,
-	0xa1, 0xe5, 0x93, 0x63, 0x6f, 0x11, 0xa6, 0xc5, 0x41, 0x34, 0x25, 0xc6, 0x87, 0x71, 0x07, 0xea,
-	0x7c, 0x4c, 0x6a, 0x18, 0xbb, 0xb5, 0xbf, 0x7f, 0xf7, 0xed, 0x66, 0x05, 0x2b, 0x14, 0xdd, 0x87,
-	0x36, 0xb3, 0x75, 0xe3, 0x33, 0x42, 0x69, 0xe0, 0x13, 0xb1, 0xeb, 0x94, 0x5a, 0x8b, 0xc9, 0x86,
-	0x52, 0x84, 0x1e, 0x82, 0xc6, 0xcd, 0x12, 0xb3, 0xc6, 0x07, 0xbe, 0x96, 0x0f, 0xbc, 0x10, 0x38,
-	0x2c, 0x95, 0x8a, 0x13, 0xd5, 0xbe, 0x67, 0xa2, 0xe8, 0x36, 0x34, 0x66, 0xde, 0x85, 0x9b, 0xbc,
-	0x24, 0xe7, 0x7c, 0xb7, 0xb6, 0x71, 0x7d, 0xe6, 0x5d, 0x8c, 0x5e, 0x92, 0xf3, 0xee, 0x6f, 0xaa,
-	0x50, 0xb3, 0x67, 0xde, 0x09, 0x79, 0xe3, 0xce, 0x32, 0xa1, 0x7e, 0x46, 0x68, 0x12, 0xc4, 0x91,
-	0x4a, 0x4d, 0xd9, 0x64, 0xda, 0xa7, 0x5e, 0x72, 0xca, 0xe7, 0xdd, 0xc6, 0xfc, 0x1b, 0x7d, 0x04,
-	0x46, 0x10, 0x25, 0xa9, 0x17, 0x86, 0x2e, 0xdb, 0xf1, 0x69, 0x30, 0x13, 0x13, 0xd6, 0xf1, 0x8a,
-	0xc4, 0x07, 0x12, 0x66, 0x7c, 0x11, 0x24, 0xae, 0x37, 0x4d, 0x83, 0x33, 0xc2, 0xf9, 0xa2, 0x81,
-	0x1b, 0x41, 0xd2, 0xe3, 0x6d, 0x16, 0xf9, 0x20, 0x71, 0x19, 0x73, 0x05, 0x69, 0x4a, 0x7c, 0x53,
-	0xe3, 0xf2, 0x66, 0x90, 0xf4, 0x15, 0xc4, 0x66, 0x14, 0x24, 0xee, 0x99, 0x17, 0x06, 0xbe, 0xcc,
-	0xbf, 0x7a, 0x90, 0x1c, 0xb2, 0x26, 0x32, 0x60, 0x69, 0x41, 0x43, 0xb3, 0xc1, 0x3b, 0x66, 0x9f,
-	0xe8, 0x26, 0x68, 0x82, 0x6d, 0x4c, 0x9d, 0x83, 0xb2, 0x85, 0x56, 0xa1, 0x36, 0xa5, 0xd3, 0x27,
-	0x8f, 0x4d, 0xe0, 0x93, 0x10, 0x8d, 0xee, 0xdf, 0xea, 0xd0, 0xe6, 0x11, 0x19, 0xc4, 0xe7, 0x51,
-	0x18, 0x7b, 0xfe, 0x95, 0x9d, 0xa1, 0x22, 0x55, 0x2d, 0x44, 0x4a, 0xf6, 0xba, 0x94, 0xf7, 0x6a,
-	0xc0, 0xd2, 0x94, 0x4e, 0x65, 0x1a, 0xb1, 0x4f, 0x34, 0x84, 0x8e, 0x2f, 0x7d, 0xba, 0x49, 0xca,
-	0xa8, 0xa3, 0xc6, 0x33, 0x76, 0x4b, 0xad, 0x5c, 0xa9, 0xdb, 0x72, 0x6b, 0xc4, 0xf4, 0x71, 0xdb,
-	0x2f, 0x36, 0xd1, 0x3d, 0x68, 0x07, 0x4c, 0xc9, 0x55, 0x8b, 0xa4, 0xf1, 0xee, 0x5b, 0x1c, 0x3c,
-	0x94, 0x2b, 0xf5, 0x11, 0x18, 0xca, 0x8a, 0xf8, 0xee, 0xd1, 0x6b, 0x46, 0x7e, 0x62, 0x13, 0xac,
-	0xe4, 0xf8, 0x2e, 0x83, 0xd1, 0x73, 0xd0, 0x28, 0xf1, 0x92, 0x38, 0xe2, 0xd1, 0xeb, 0x3c, 0x7e,
-	0xf4, 0x03, 0x06, 0xb6, 0xe7, 0x05, 0xe1, 0x82, 0x12, 0xcc, 0xed, 0xb0, 0xb4, 0x47, 0x1f, 0xc2,
-	0x8a, 0xe7, 0xfb, 0x41, 0x1a, 0xc4, 0x91, 0x17, 0xba, 0x41, 0x74, 0x1c, 0xcb, 0xd8, 0x77, 0x72,
-	0xd8, 0x8e, 0x8e, 0x63, 0x41, 0x3a, 0x67, 0xc4, 0x9d, 0xf2, 0x2d, 0xcb, 0x57, 0xa2, 0xc1, 0x48,
-	0xe7, 0x8c, 0x48, 0xa2, 0xd8, 0x00, 0x3d, 0x8c, 0x19, 0xe7, 0xfa, 0x01, 0x35, 0x9b, 0xe2, 0x64,
-	0xe1, 0xc0, 0x20, 0xa0, 0xc8, 0x86, 0xa6, 0x08, 0x80, 0x08, 0x67, 0xeb, 0x7b, 0xc3, 0xc9, 0x77,
-	0x98, 0x97, 0x12, 0x11, 0x4e, 0xe0, 0xc6, 0x22, 0x96, 0x1b, 0xa0, 0x1f, 0x07, 0x21, 0x71, 0x93,
-	0xe0, 0x2b, 0x62, 0xb6, 0x79, 0x7c, 0x1a, 0x0c, 0x18, 0x05, 0x5f, 0x91, 0xee, 0x37, 0x15, 0x40,
-	0x57, 0x97, 0x03, 0xad, 0x82, 0x31, 0x18, 0x7e, 0xe1, 0xec, 0x0f, 0x7b, 0x03, 0x77, 0xe2, 0xfc,
-	0xc4, 0x19, 0x7e, 0xe1, 0x18, 0xd7, 0xd0, 0x4d, 0x40, 0x19, 0x3a, 0x9a, 0xf4, 0xfb, 0x96, 0x35,
-	0xb0, 0x06, 0x46, 0xa5, 0x84, 0x63, 0xeb, 0xa7, 0x13, 0x6b, 0x34, 0xb6, 0x06, 0x46, 0xb5, 0xe4,
-	0x65, 0x34, 0xee, 0x61, 0x86, 0x2e, 0xa1, 0x1b, 0xb0, 0x92, 0xa1, 0x7b, 0x3d, 0x7b, 0xdf, 0x1a,
-	0x18, 0xcb, 0xc8, 0x84, 0xd5, 0x42, 0x87, 0xa3, 0xc9, 0xc1, 0xc1, 0x90, 0xab, 0xd7, 0x4a, 0xce,
-	0xfb, 0x3d, 0xa7, 0x6f, 0xed, 0x33, 0x0b, 0xad, 0xfb, 0xab, 0x0a, 0xac, 0xbf, 0x7d, 0xbd, 0x50,
-	0x0b, 0x1a, 0xce, 0xd0, 0xb5, 0x30, 0x1e, 0x32, 0x26, 0x5f, 0x81, 0xa6, 0xed, 0x1c, 0xf6, 0xf6,
-	0xed, 0x81, 0x3b, 0xc1, 0xfb, 0x46, 0x85, 0x01, 0x03, 0xeb, 0xd0, 0xee, 0x5b, 0xee, 0xee, 0x64,
-	0xf4, 0xa5, 0x51, 0x65, 0xdd, 0xd8, 0xce, 0x68, 0xb2, 0xb7, 0x67, 0xf7, 0x6d, 0xcb, 0x19, 0xbb,
-	0xa3, 0x83, 0x5e, 0xdf, 0x32, 0x96, 0xd0, 0x75, 0x68, 0xcb, 0x00, 0x48, 0x67, 0xcb, 0xa8, 0x0d,
-	0x7a, 0x3e, 0x90, 0x5a, 0xf7, 0xd7, 0x2a, 0x84, 0xa5, 0x25, 0x60, 0x86, 0xf6, 0x8b, 0xde, 0x33,
-	0xab, 0x10, 0x3f, 0x04, 0x1d, 0x01, 0xd9, 0x4e, 0xaf, 0x3f, 0xb6, 0x0f, 0xd9, 0xc1, 0xb2, 0x0a,
-	0x86, 0xc0, 0x38, 0xd2, 0x1b, 0xdb, 0xce, 0x33, 0xa3, 0x8a, 0x0c, 0x68, 0x15, 0x50, 0x4b, 0x44,
-	0x4d, 0x20, 0xd8, 0x3a, 0xb4, 0x30, 0x57, 0x5b, 0xce, 0x1d, 0x0a, 0x90, 0x0f, 0xe7, 0x73, 0xe8,
-	0x94, 0xc2, 0x92, 0xa0, 0x07, 0xea, 0x40, 0xae, 0x96, 0xe9, 0xb7, 0xa4, 0xa6, 0xce, 0xe4, 0x87,
-	0xa0, 0x71, 0x3c, 0x41, 0xf7, 0xa0, 0xc6, 0x77, 0x91, 0x3c, 0xc7, 0xdb, 0x25, 0x33, 0x2c, 0x64,
-	0xdd, 0x3f, 0x56, 0xa0, 0x31, 0x8c, 0x16, 0x82, 0x68, 0x0b, 0xa4, 0x5a, 0x29, 0x93, 0xea, 0xff,
-	0x00, 0x28, 0x92, 0x23, 0x3e, 0xa7, 0x97, 0x06, 0x2e, 0x20, 0x68, 0x1d, 0x32, 0x92, 0x94, 0xe7,
-	0x5e, 0x4e, 0x9a, 0x26, 0x28, 0x06, 0x94, 0xa5, 0x4d, 0x46, 0x88, 0x77, 0xa1, 0x39, 0xa7, 0xb1,
-	0xbf, 0x98, 0xa6, 0xfd, 0xd8, 0x27, 0xb2, 0x3a, 0x2b, 0x42, 0x19, 0x99, 0x0b, 0xfa, 0xe0, 0xdf,
-	0xdd, 0x27, 0xa0, 0xab, 0x11, 0x27, 0xe8, 0xff, 0xca, 0xc5, 0x4a, 0x76, 0xd4, 0x28, 0x0d, 0x15,
-	0x96, 0x29, 0x18, 0xa2, 0x7e, 0xb1, 0x4b, 0x89, 0x25, 0x6a, 0x58, 0x37, 0x23, 0xd1, 0x86, 0x00,
-	0x6c, 0x1f, 0x3d, 0x86, 0x42, 0x0e, 0xf2, 0x19, 0x17, 0x4a, 0xa1, 0xdc, 0x49, 0x31, 0x53, 0x19,
-	0x41, 0x43, 0xc1, 0xff, 0xdb, 0xc3, 0xb9, 0x7f, 0x85, 0x6f, 0x45, 0x85, 0xf4, 0xc1, 0xd5, 0x0e,
-	0x7e, 0x00, 0xd9, 0x7e, 0x9e, 0x91, 0xe3, 0xd2, 0xbb, 0xbd, 0xbc, 0x99, 0x11, 0x9f, 0x97, 0xa9,
-	0x6a, 0x99, 0xfb, 0xf8, 0xf0, 0x6d, 0x3e, 0x64, 0x92, 0x04, 0x71, 0x74, 0x75, 0xfe, 0x7f, 0xf9,
-	0xaf, 0x27, 0xa3, 0x5b, 0x70, 0xe3, 0x32, 0x19, 0xb1, 0x4c, 0xd4, 0xde, 0xc2, 0x52, 0xf5, 0xee,
-	0x3f, 0xd4, 0x94, 0xfe, 0x63, 0xec, 0x64, 0xc2, 0x6a, 0x36, 0x00, 0x77, 0xe8, 0xa8, 0x18, 0x18,
-	0x35, 0xb4, 0x0e, 0x37, 0x4b, 0x92, 0xa1, 0x33, 0x71, 0x45, 0x51, 0xab, 0x31, 0xd9, 0xa1, 0xe5,
-	0x0c, 0x86, 0xd8, 0x95, 0x1d, 0xbf, 0xb0, 0x47, 0x2f, 0x7a, 0xe3, 0xfe, 0x73, 0xa3, 0xce, 0x26,
-	0x3d, 0x7c, 0xd1, 0xb7, 0xdd, 0x31, 0xee, 0x39, 0xa3, 0x3d, 0x0b, 0xcb, 0xae, 0x1a, 0xac, 0x2b,
-	0x45, 0x3f, 0x7b, 0x93, 0x91, 0x35, 0x70, 0x77, 0xbf, 0x64, 0x4e, 0x0d, 0xbd, 0xfb, 0xbb, 0x2a,
-	0xac, 0xbe, 0x69, 0xb9, 0xff, 0xdd, 0xac, 0x98, 0xe9, 0xf5, 0x87, 0x2f, 0x5e, 0xd8, 0x63, 0x49,
-	0x8b, 0x19, 0x57, 0x4a, 0x94, 0x2f, 0xdd, 0x26, 0xdc, 0x2e, 0xbb, 0x1c, 0x3a, 0x6e, 0x6f, 0x77,
-	0x28, 0xa8, 0x54, 0x43, 0xef, 0x81, 0xf9, 0x66, 0x31, 0x5b, 0x46, 0x74, 0x1b, 0xd6, 0x8a, 0x1e,
-	0x73, 0xc3, 0x42, 0x10, 0x8a, 0x22, 0x6b, 0x60, 0xe8, 0x68, 0x0d, 0xae, 0x0b, 0x89, 0xda, 0x19,
-	0xcc, 0x00, 0xba, 0xdf, 0xd4, 0x60, 0xf9, 0x20, 0xa6, 0x29, 0xba, 0x05, 0xf5, 0x79, 0x4c, 0x53,
-	0x37, 0x8a, 0x79, 0x7e, 0xb7, 0xb1, 0xc6, 0x9a, 0x4e, 0xcc, 0xca, 0xb7, 0xd0, 0x3b, 0x22, 0xa1,
-	0xac, 0xc3, 0x44, 0x03, 0x7d, 0x24, 0x2f, 0x43, 0x22, 0x49, 0xf3, 0x22, 0x3a, 0xa6, 0x29, 0xff,
-	0x53, 0xb8, 0x0a, 0xfd, 0x18, 0x9a, 0x9e, 0x3f, 0x0b, 0xa2, 0x52, 0x31, 0x66, 0x6e, 0xcb, 0x2b,
-	0x73, 0x8f, 0x89, 0x44, 0x4a, 0xf2, 0x1b, 0x1b, 0x06, 0x2f, 0x43, 0x98, 0x69, 0x3c, 0x27, 0x94,
-	0x5b, 0x2e, 0x12, 0x4e, 0x9c, 0x05, 0xd3, 0xe1, 0x9c, 0xd0, 0x11, 0x97, 0x28, 0xd3, 0x38, 0x43,
-	0xca, 0x7c, 0x58, 0xbf, 0xc4, 0x87, 0x0f, 0xa0, 0x36, 0x27, 0x84, 0x26, 0x66, 0xe3, 0xd2, 0x1d,
-	0x80, 0x0f, 0x9f, 0x10, 0xca, 0x3e, 0xb0, 0xd0, 0x61, 0x97, 0x24, 0x7a, 0xe1, 0xce, 0xbd, 0xe9,
-	0x4b, 0x92, 0x26, 0xbc, 0xbe, 0xd2, 0xb0, 0x4e, 0x2f, 0x0e, 0x04, 0xc0, 0x6a, 0x64, 0x7a, 0x21,
-	0x0b, 0x3e, 0xe0, 0xc2, 0x3a, 0xbd, 0x10, 0x85, 0xde, 0x06, 0xe8, 0xf4, 0xc2, 0x25, 0x94, 0xc6,
-	0x34, 0xe1, 0x45, 0x95, 0x86, 0x1b, 0xf4, 0xc2, 0xe2, 0x6d, 0xe6, 0x36, 0xcd, 0xdd, 0xb6, 0x84,
-	0xdb, 0xb4, 0xe8, 0x36, 0x55, 0x6e, 0xdb, 0xc2, 0x6d, 0x9a, 0xbb, 0x4d, 0x33, 0xb7, 0x1d, 0xe1,
-	0x36, 0x55, 0x6e, 0x1f, 0x41, 0x23, 0x3e, 0x9e, 0xbb, 0x6c, 0xf1, 0xcc, 0x15, 0x4e, 0xf4, 0x6b,
-	0xdb, 0xc5, 0x77, 0x06, 0x25, 0xc4, 0xf5, 0xf8, 0x78, 0xce, 0xa6, 0xb9, 0xfe, 0x14, 0x1a, 0x6a,
-	0xca, 0xef, 0x3e, 0x45, 0x0a, 0x5b, 0xa4, 0x5a, 0xdc, 0x22, 0xdd, 0x04, 0x1a, 0x6a, 0xcd, 0xd9,
-	0x85, 0x34, 0xcf, 0x26, 0x03, 0x5a, 0xd6, 0xf8, 0xb9, 0x85, 0x1d, 0x6b, 0xec, 0x3a, 0x8e, 0x6d,
-	0x54, 0x4a, 0xc8, 0xc4, 0xb1, 0xc5, 0x0d, 0xf6, 0x80, 0xe5, 0xff, 0xfe, 0xd8, 0x58, 0xca, 0x1a,
-	0xce, 0x44, 0x94, 0x36, 0x87, 0x16, 0x53, 0x64, 0xb2, 0x5a, 0xa1, 0xe9, 0x4c, 0x0c, 0xad, 0xfb,
-	0x00, 0x6a, 0xac, 0xd3, 0x04, 0x75, 0xcb, 0xa7, 0x66, 0xab, 0xb8, 0x98, 0xea, 0xc4, 0xfc, 0x7d,
-	0x13, 0x34, 0x71, 0x64, 0xa2, 0xb5, 0xfc, 0x9a, 0xa1, 0x6e, 0x88, 0xec, 0xb6, 0x71, 0xbb, 0x70,
-	0xbb, 0xcf, 0x04, 0x62, 0x03, 0xdf, 0x86, 0x65, 0x1a, 0xc7, 0x69, 0xf9, 0xf2, 0xc9, 0x21, 0xd4,
-	0x05, 0x7d, 0xee, 0x51, 0x12, 0xa5, 0xae, 0x2c, 0x08, 0x32, 0xd3, 0x86, 0xc0, 0xf9, 0x66, 0xeb,
-	0x48, 0x1d, 0x15, 0xbd, 0x55, 0x16, 0xbd, 0xec, 0x7a, 0x2a, 0x84, 0x07, 0x22, 0xdb, 0x36, 0xb3,
-	0x4b, 0x54, 0xad, 0xe8, 0x4d, 0xdd, 0xa5, 0x36, 0xa0, 0x36, 0x8b, 0x7d, 0x12, 0x8a, 0x1a, 0x42,
-	0x49, 0x05, 0x86, 0x1e, 0x81, 0x71, 0xea, 0x51, 0xff, 0xdc, 0xa3, 0xf9, 0x55, 0xa5, 0x5e, 0xd4,
-	0x5b, 0x51, 0x62, 0x75, 0x69, 0x79, 0x04, 0xc6, 0x71, 0x40, 0x67, 0x25, 0x8b, 0x46, 0xc9, 0x42,
-	0x89, 0x95, 0xc5, 0x43, 0xd0, 0xf8, 0x19, 0x29, 0x12, 0xa1, 0xf9, 0xb8, 0x53, 0x3a, 0x5a, 0x93,
-	0x6c, 0xbc, 0x42, 0x89, 0xdd, 0xcc, 0x13, 0x42, 0x03, 0x2f, 0x74, 0xa3, 0xc5, 0xec, 0x88, 0x50,
-	0x9e, 0x21, 0x99, 0xf7, 0x96, 0x90, 0x39, 0x5c, 0xc4, 0x62, 0x99, 0x3f, 0x6e, 0x99, 0xa5, 0x58,
-	0x66, 0x6f, 0x5c, 0x77, 0xf2, 0x47, 0xac, 0x66, 0x51, 0x23, 0x7b, 0xcb, 0x42, 0xb0, 0x7c, 0x16,
-	0x7a, 0x11, 0xcf, 0xa7, 0x36, 0xe6, 0xdf, 0xec, 0xf2, 0x33, 0xf3, 0xa6, 0xae, 0xe7, 0xfb, 0x94,
-	0x24, 0x22, 0x9b, 0x74, 0x0c, 0x33, 0x6f, 0xda, 0x13, 0x08, 0xba, 0x07, 0xad, 0x60, 0x7e, 0xf6,
-	0x71, 0xa6, 0xc1, 0x72, 0x4a, 0x7f, 0x7e, 0x0d, 0x37, 0x19, 0x5a, 0x56, 0xfa, 0x24, 0x53, 0x5a,
-	0x29, 0x28, 0x7d, 0xa2, 0x94, 0xde, 0x87, 0xf6, 0x69, 0x9c, 0xa4, 0xae, 0x17, 0xf9, 0x22, 0x05,
-	0xd7, 0x94, 0x16, 0x83, 0x7b, 0x91, 0xcf, 0xb3, 0x6c, 0x13, 0x80, 0x5c, 0xa4, 0xd4, 0x73, 0x3d,
-	0x7a, 0x92, 0x98, 0xb7, 0xc4, 0xab, 0x0c, 0x47, 0x7a, 0xf4, 0x24, 0x41, 0x4f, 0xa1, 0x3d, 0xa7,
-	0xf1, 0xc5, 0xeb, 0xac, 0xab, 0x1b, 0x3c, 0xd4, 0x1b, 0xe5, 0xb7, 0xab, 0xed, 0x03, 0xa6, 0x23,
-	0x3b, 0xc6, 0xad, 0x79, 0xa1, 0x75, 0x99, 0x72, 0x8d, 0x7f, 0x81, 0x72, 0x9f, 0x96, 0x29, 0xf7,
-	0xfa, 0xbb, 0x29, 0x57, 0xc5, 0xbf, 0xc8, 0xbc, 0x9b, 0x59, 0x05, 0x77, 0xb3, 0xb4, 0x85, 0x65,
-	0x85, 0x66, 0x43, 0x67, 0x1a, 0x47, 0x11, 0x99, 0xa6, 0xaa, 0x0f, 0xc4, 0xfb, 0xd8, 0x50, 0x7d,
-	0xf4, 0x85, 0xf4, 0x4d, 0xdd, 0xb4, 0xa7, 0x45, 0x19, 0xfa, 0x7f, 0xd0, 0xa6, 0x8b, 0x24, 0x8d,
-	0x67, 0xe6, 0x53, 0x1e, 0xa1, 0xd5, 0x6d, 0xf1, 0x60, 0xbb, 0xad, 0x1e, 0x6c, 0xb7, 0x7b, 0xd1,
-	0x6b, 0x2c, 0x75, 0xd0, 0x67, 0x00, 0xf3, 0x99, 0xbc, 0x01, 0x27, 0xe6, 0xd7, 0x15, 0x6e, 0x72,
-	0xfd, 0xf2, 0x73, 0x4e, 0xb2, 0x5b, 0xfb, 0xeb, 0x77, 0xdf, 0x6e, 0x5e, 0xc3, 0xfa, 0x3c, 0x7b,
-	0xb3, 0xda, 0x87, 0x15, 0x51, 0x54, 0xaa, 0x52, 0x35, 0x31, 0x7f, 0x59, 0x79, 0xc7, 0xf5, 0x65,
-	0xb7, 0xc9, 0x5c, 0x68, 0xe2, 0xfd, 0x02, 0x77, 0x82, 0xd2, 0x0d, 0x68, 0xfd, 0xeb, 0x2a, 0xb4,
-	0x8a, 0x6b, 0xf7, 0x6e, 0xd2, 0xbd, 0x03, 0x4d, 0x29, 0xcc, 0xe9, 0x09, 0x83, 0x9f, 0xbf, 0x11,
-	0x6f, 0x02, 0x4c, 0x4f, 0xbd, 0x28, 0x22, 0x21, 0x33, 0x17, 0x0f, 0x45, 0xba, 0x44, 0x6c, 0x1f,
-	0x6d, 0x81, 0xa1, 0xc4, 0xe2, 0xa9, 0x4f, 0x12, 0x55, 0x1b, 0x77, 0x24, 0xce, 0x9f, 0xbd, 0x6c,
-	0x1f, 0xed, 0xc0, 0x0d, 0xa5, 0x99, 0x12, 0x3a, 0x0b, 0x22, 0x5e, 0x29, 0xc9, 0x8b, 0x0c, 0x92,
-	0xa2, 0x71, 0x2e, 0x41, 0x6b, 0xa0, 0xc5, 0xd1, 0x82, 0x39, 0xd4, 0xc4, 0xcb, 0x4e, 0x1c, 0x2d,
-	0x6c, 0x1f, 0xbd, 0x0f, 0x1d, 0x06, 0x27, 0x24, 0x61, 0x8c, 0xa1, 0x8e, 0xdf, 0x36, 0x6e, 0xc5,
-	0xd1, 0x62, 0x24, 0x40, 0xdb, 0xdf, 0xd5, 0x59, 0x26, 0xf3, 0xf9, 0x77, 0x77, 0xa0, 0x2e, 0xb6,
-	0x34, 0xcb, 0x9f, 0x12, 0x97, 0x77, 0xca, 0x5b, 0x5e, 0xb1, 0xf9, 0x1f, 0x96, 0x60, 0x75, 0x14,
-	0xcc, 0x16, 0xa1, 0x97, 0x92, 0x5e, 0xe8, 0xd1, 0x19, 0x26, 0xaf, 0x16, 0x24, 0x49, 0xaf, 0x3c,
-	0x21, 0xbd, 0x07, 0x7a, 0x10, 0xf9, 0xc1, 0xd4, 0x4b, 0x63, 0xf5, 0xea, 0x9d, 0x03, 0xec, 0x3c,
-	0x0b, 0xa2, 0xf4, 0x58, 0x85, 0x4d, 0xc7, 0x1a, 0x6b, 0x8a, 0x19, 0x70, 0xaa, 0x66, 0x11, 0x17,
-	0x2f, 0xa7, 0xe2, 0x7d, 0xad, 0x35, 0x97, 0xa7, 0x1c, 0x7f, 0x3c, 0xed, 0x42, 0x9b, 0xcd, 0x33,
-	0x5f, 0x3a, 0x79, 0xe5, 0x8b, 0xa3, 0xc5, 0x40, 0xad, 0xde, 0x13, 0xb8, 0x19, 0x44, 0x8c, 0x59,
-	0x89, 0x7b, 0x14, 0xa4, 0xe2, 0xcc, 0x76, 0x29, 0xcb, 0x49, 0x16, 0xb2, 0x1a, 0xbe, 0x21, 0xa5,
-	0xbb, 0x41, 0xca, 0xcf, 0x6f, 0x2c, 0xae, 0x18, 0x35, 0x9f, 0x06, 0xc7, 0x29, 0x8f, 0x5b, 0x0d,
-	0x8b, 0x06, 0x1b, 0x6d, 0x44, 0xce, 0x5d, 0xf2, 0xca, 0xe7, 0x14, 0x5d, 0xc3, 0x5a, 0x44, 0xce,
-	0xad, 0x57, 0x3e, 0xba, 0x0f, 0xd7, 0x45, 0xbc, 0x8b, 0x3c, 0x2b, 0x9e, 0x81, 0x56, 0x78, 0xc8,
-	0x0b, 0x1c, 0xfb, 0x1c, 0x74, 0x96, 0xa9, 0x62, 0x65, 0x81, 0xe7, 0xdd, 0x7d, 0x15, 0xe3, 0x37,
-	0x45, 0x94, 0x27, 0x3c, 0xd7, 0xe6, 0x05, 0x5d, 0x6e, 0xdc, 0xfd, 0x00, 0xda, 0x25, 0x19, 0xd2,
-	0xa1, 0x86, 0x7b, 0xf6, 0xc8, 0x12, 0x4f, 0xd5, 0xfd, 0x7d, 0xab, 0x87, 0x8d, 0xca, 0xee, 0x08,
-	0x6e, 0xc4, 0xf4, 0x84, 0x57, 0x20, 0xd3, 0x98, 0xfa, 0xb2, 0xaf, 0xdd, 0xd6, 0x21, 0xff, 0x2f,
-	0xe2, 0xf4, 0xf3, 0xed, 0x93, 0x20, 0x3d, 0x5d, 0x1c, 0x31, 0x02, 0xd8, 0x51, 0x9a, 0x3b, 0x42,
-	0xf3, 0xa1, 0xfc, 0xa9, 0xe4, 0xec, 0xe3, 0x9d, 0x93, 0x58, 0x62, 0x47, 0x1a, 0x07, 0x9f, 0xfc,
-	0x33, 0x00, 0x00, 0xff, 0xff, 0x07, 0x84, 0x3f, 0x04, 0xc4, 0x19, 0x00, 0x00,
+	// 2743 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x4d, 0x73, 0xdb, 0xc6,
+	0xf9, 0x37, 0x29, 0x11, 0x24, 0x1e, 0xbe, 0x08, 0x5e, 0x4b, 0x36, 0x2c, 0x45, 0x7f, 0x3b, 0x74,
+	0xf2, 0x8f, 0x6c, 0xd7, 0x92, 0x6b, 0xb7, 0x49, 0x7a, 0xc8, 0x8c, 0x29, 0x12, 0xb2, 0x31, 0x95,
+	0x41, 0x75, 0x49, 0x2a, 0x4d, 0x2f, 0x18, 0x88, 0x58, 0x49, 0x18, 0x83, 0x00, 0xbd, 0x00, 0x29,
+	0x39, 0xb7, 0x4e, 0xa7, 0x3d, 0x75, 0xa6, 0x87, 0x9e, 0xfa, 0x0d, 0x7a, 0xea, 0xf4, 0x96, 0x6b,
+	0xbf, 0x40, 0x66, 0xfa, 0x19, 0xd2, 0x4b, 0x8f, 0x3d, 0x74, 0x72, 0xee, 0xec, 0x1b, 0x08, 0x48,
+	0xb2, 0x93, 0xce, 0x74, 0x3a, 0xbd, 0x48, 0xd8, 0xdf, 0xf3, 0xb2, 0xbb, 0xcf, 0xee, 0xf3, 0xdb,
+	0x67, 0x97, 0xb0, 0x3e, 0x8f, 0xc3, 0xf4, 0xd4, 0x73, 0xa7, 0x34, 0x4e, 0xe3, 0x64, 0xc7, 0x27,
+	0xf3, 0x60, 0x4c, 0xb6, 0x79, 0x0b, 0x69, 0x42, 0xb6, 0x7e, 0xfb, 0x24, 0x8e, 0x4f, 0x42, 0xb2,
+	0xc3, 0xd1, 0xa3, 0xd9, 0xf1, 0x8e, 0x17, 0xbd, 0x11, 0x2a, 0xeb, 0x17, 0xcc, 0xc7, 0xf1, 0x64,
+	0x12, 0x47, 0x52, 0x66, 0x16, 0x65, 0x13, 0x92, 0x7a, 0x52, 0x72, 0xa7, 0x28, 0x89, 0xa7, 0x24,
+	0x3a, 0x0e, 0xe3, 0x33, 0xf7, 0x87, 0x4f, 0x85, 0x42, 0xfb, 0x9b, 0x32, 0x40, 0x8f, 0x0f, 0x65,
+	0xf8, 0x66, 0x4a, 0x50, 0x0b, 0xca, 0x81, 0x6f, 0x96, 0xee, 0x96, 0xb6, 0x74, 0x5c, 0x0e, 0x7c,
+	0xb4, 0x01, 0xfa, 0x9c, 0x44, 0x7e, 0x4c, 0xdd, 0xc0, 0x37, 0x2b, 0x1c, 0xae, 0x09, 0xc0, 0xf6,
+	0xd1, 0x26, 0x40, 0x26, 0x4c, 0x4c, 0xed, 0xee, 0xd2, 0x96, 0x8e, 0x75, 0x25, 0x4d, 0x90, 0x09,
+	0x55, 0xcf, 0xf7, 0xa6, 0x29, 0xa1, 0x66, 0x99, 0x5b, 0xaa, 0x26, 0xfa, 0x04, 0x4c, 0x6f, 0x3c,
+	0x26, 0xd3, 0x34, 0x71, 0x8f, 0x66, 0xe1, 0x2b, 0x97, 0x0f, 0x69, 0x36, 0xf5, 0xbd, 0x94, 0x98,
+	0x4b, 0x77, 0x4b, 0x5b, 0x35, 0xbc, 0x26, 0xe5, 0xbb, 0xb3, 0xf0, 0xd5, 0x5e, 0x18, 0x9f, 0x8d,
+	0xb8, 0x10, 0xf5, 0xe0, 0x8e, 0x32, 0xf4, 0x7c, 0xdf, 0xa5, 0x64, 0x12, 0xcf, 0x49, 0xde, 0x3c,
+	0x31, 0x97, 0xb9, 0xfd, 0x86, 0x54, 0xeb, 0xf8, 0x3e, 0xe6, 0x4a, 0x0b, 0x27, 0x09, 0xda, 0x87,
+	0x7b, 0xca, 0x8b, 0x1f, 0x50, 0x32, 0x4e, 0xdd, 0x30, 0x3e, 0x09, 0xc6, 0x5e, 0xc8, 0x3d, 0x25,
+	0x6a, 0x24, 0x55, 0xee, 0x49, 0x75, 0xd8, 0xe3, 0x9a, 0xfb, 0x42, 0x91, 0x79, 0x4b, 0xe4, 0x98,
+	0xde, 0x87, 0x86, 0x9c, 0x97, 0x9b, 0xbe, 0x99, 0x12, 0xb3, 0xc6, 0xe7, 0x5a, 0x97, 0x18, 0x8b,
+	0x6a, 0xfb, 0x13, 0xa8, 0x2f, 0x62, 0x9c, 0xa0, 0x2d, 0xa8, 0x04, 0x29, 0x99, 0x24, 0x66, 0xe9,
+	0xee, 0xd2, 0x56, 0xfd, 0x09, 0xda, 0x16, 0x8b, 0xb4, 0xbd, 0xd0, 0xc1, 0x42, 0xa1, 0xfd, 0x97,
+	0x12, 0xd4, 0x0e, 0x26, 0xdd, 0x38, 0x3a, 0x0e, 0x4e, 0x10, 0x82, 0xe5, 0xc8, 0x9b, 0x10, 0xb9,
+	0x3a, 0xfc, 0x1b, 0x3d, 0x84, 0x65, 0xde, 0x29, 0x0b, 0x70, 0xeb, 0xc9, 0x2d, 0xe5, 0x49, 0xd9,
+	0x6c, 0x1f, 0x4c, 0xb8, 0x3b, 0xae, 0xc4, 0x16, 0x84, 0x44, 0xde, 0x51, 0x48, 0x7c, 0x19, 0x65,
+	0xd5, 0x44, 0x77, 0xa0, 0x9e, 0x78, 0x93, 0x69, 0x48, 0xdc, 0x63, 0x4a, 0x5e, 0xf3, 0x18, 0x36,
+	0x31, 0x08, 0x68, 0x8f, 0x92, 0xd7, 0xed, 0x4f, 0x41, 0x13, 0xae, 0x50, 0x1d, 0xaa, 0xdd, 0xfe,
+	0xc8, 0x19, 0x5a, 0xd8, 0xb8, 0x86, 0x74, 0xa8, 0x3c, 0xef, 0x8c, 0x9e, 0x5b, 0x46, 0x89, 0x7d,
+	0x0e, 0x86, 0x9d, 0xa1, 0x65, 0x94, 0x85, 0x8a, 0x33, 0xb4, 0x7e, 0x3e, 0x34, 0x96, 0xda, 0xbf,
+	0x2f, 0x41, 0xf3, 0x60, 0xf2, 0x9c, 0xc6, 0xb3, 0xa9, 0x9c, 0xc7, 0x26, 0xc0, 0x09, 0x6b, 0xba,
+	0xb9, 0xd9, 0xe8, 0x1c, 0x71, 0xd8, 0x94, 0x32, 0x31, 0x1f, 0x4a, 0x99, 0x0f, 0x45, 0x88, 0xd9,
+	0x48, 0xde, 0x31, 0x89, 0x07, 0x50, 0x9d, 0x90, 0x94, 0x06, 0x63, 0xb6, 0x09, 0x58, 0x60, 0x8d,
+	0x8b, 0xe1, 0xc0, 0x4a, 0xa1, 0xfd, 0xcb, 0x32, 0xe8, 0x0a, 0x4d, 0x2e, 0xed, 0xfa, 0xf7, 0xa1,
+	0xe1, 0x93, 0x63, 0x6f, 0x16, 0xa6, 0xf9, 0x41, 0xd4, 0x25, 0xc6, 0x87, 0x71, 0x07, 0xaa, 0x7c,
+	0x4c, 0x6a, 0x18, 0xbb, 0x95, 0xbf, 0x7f, 0xfb, 0xf5, 0x66, 0x09, 0x2b, 0x14, 0x3d, 0x80, 0x26,
+	0xb3, 0x75, 0xe3, 0x39, 0xa1, 0x34, 0xf0, 0x89, 0xd8, 0x98, 0x4a, 0xad, 0xc1, 0x64, 0x7d, 0x29,
+	0x42, 0x8f, 0x40, 0xe3, 0x66, 0x89, 0x59, 0xe1, 0x03, 0x5f, 0x5b, 0x0c, 0x3c, 0x17, 0x38, 0x2c,
+	0x95, 0xf2, 0x13, 0xd5, 0xbe, 0x63, 0xa2, 0xe8, 0x36, 0xd4, 0x26, 0xde, 0xb9, 0x9b, 0xbc, 0x22,
+	0x67, 0x7c, 0x43, 0x37, 0x71, 0x75, 0xe2, 0x9d, 0x0f, 0x5e, 0x91, 0xb3, 0xf6, 0xef, 0xca, 0x50,
+	0xb1, 0x27, 0xde, 0x09, 0xb9, 0x72, 0x67, 0x99, 0x50, 0x9d, 0x13, 0x9a, 0x04, 0x71, 0xa4, 0xb2,
+	0x57, 0x36, 0x99, 0xf6, 0xa9, 0x97, 0x9c, 0xf2, 0x79, 0x37, 0x31, 0xff, 0x46, 0xf7, 0xc1, 0x08,
+	0xa2, 0x24, 0xf5, 0xc2, 0xd0, 0x65, 0x49, 0x91, 0x06, 0x13, 0x31, 0x61, 0x1d, 0xaf, 0x48, 0xbc,
+	0x27, 0x61, 0x46, 0x29, 0x41, 0xe2, 0x7a, 0xe3, 0x34, 0x98, 0x13, 0x4e, 0x29, 0x35, 0x5c, 0x0b,
+	0x92, 0x0e, 0x6f, 0xb3, 0xc8, 0x07, 0x89, 0xcb, 0xc8, 0x2d, 0x48, 0x53, 0xe2, 0x9b, 0x1a, 0x97,
+	0xd7, 0x83, 0xa4, 0xab, 0x20, 0x36, 0xa3, 0x20, 0x71, 0xe7, 0x5e, 0x18, 0xf8, 0x32, 0x45, 0xab,
+	0x41, 0x72, 0xc8, 0x9a, 0xc8, 0x80, 0xa5, 0x19, 0x0d, 0x65, 0x06, 0xb2, 0x4f, 0x74, 0x13, 0x34,
+	0x41, 0x48, 0xa6, 0xce, 0x41, 0xd9, 0x42, 0xab, 0x50, 0x19, 0xd3, 0xf1, 0xd3, 0x27, 0x26, 0xf0,
+	0x49, 0x88, 0x46, 0xfb, 0x6f, 0x55, 0x68, 0xf2, 0x88, 0xf4, 0xe2, 0xb3, 0x28, 0x8c, 0x3d, 0xff,
+	0xd2, 0xce, 0x50, 0x91, 0x2a, 0xe7, 0x22, 0x25, 0x7b, 0x5d, 0x5a, 0xf4, 0x6a, 0xc0, 0xd2, 0x98,
+	0x8e, 0x65, 0x1a, 0xb1, 0x4f, 0xd4, 0x87, 0x96, 0x2f, 0x7d, 0xba, 0x49, 0xca, 0xd8, 0xa5, 0xc2,
+	0x33, 0x76, 0x4b, 0xad, 0x5c, 0xa1, 0xdb, 0x62, 0x6b, 0xc0, 0xf4, 0x71, 0xd3, 0xcf, 0x37, 0xd1,
+	0x3d, 0x68, 0x06, 0x4c, 0xc9, 0x55, 0x8b, 0xa4, 0xf1, 0xee, 0x1b, 0x1c, 0x3c, 0x94, 0x2b, 0x75,
+	0x1f, 0x0c, 0x65, 0x45, 0x7c, 0xf7, 0xe8, 0x0d, 0xe3, 0x47, 0xb1, 0x09, 0x56, 0x16, 0xf8, 0x2e,
+	0x83, 0xd1, 0x0b, 0xd0, 0x28, 0xf1, 0x92, 0x38, 0xe2, 0xd1, 0x6b, 0x3d, 0x79, 0xfc, 0x3d, 0x06,
+	0xb6, 0xe7, 0x05, 0xe1, 0x8c, 0x12, 0xcc, 0xed, 0xb0, 0xb4, 0x47, 0x1f, 0xc1, 0x8a, 0xe7, 0xfb,
+	0x41, 0x1a, 0xc4, 0x91, 0x17, 0xba, 0x41, 0x74, 0x1c, 0xcb, 0xd8, 0xb7, 0x16, 0xb0, 0x1d, 0x1d,
+	0xc7, 0x82, 0x74, 0xe6, 0xc4, 0x1d, 0xf3, 0x2d, 0xcb, 0x57, 0xa2, 0xc6, 0x48, 0x67, 0x4e, 0x24,
+	0x51, 0x6c, 0x80, 0x1e, 0xc6, 0x8c, 0x96, 0xfd, 0x80, 0x9a, 0x75, 0x71, 0xf8, 0x70, 0xa0, 0x17,
+	0x50, 0x64, 0x43, 0x5d, 0x04, 0x40, 0x84, 0xb3, 0xf1, 0x9d, 0xe1, 0xe4, 0x3b, 0xcc, 0x4b, 0x89,
+	0x08, 0x27, 0x70, 0x63, 0x11, 0xcb, 0x0d, 0xd0, 0x8f, 0x83, 0x90, 0xb8, 0x49, 0xf0, 0x25, 0x31,
+	0x9b, 0x3c, 0x3e, 0x35, 0x06, 0x0c, 0x82, 0x2f, 0x49, 0xfb, 0xab, 0x12, 0xa0, 0xcb, 0xcb, 0x81,
+	0x56, 0xc1, 0xe8, 0xf5, 0x3f, 0x77, 0xf6, 0xfb, 0x9d, 0x9e, 0x3b, 0x72, 0x7e, 0xea, 0xf4, 0x3f,
+	0x77, 0x8c, 0x6b, 0xe8, 0x26, 0xa0, 0x0c, 0x1d, 0x8c, 0xba, 0x5d, 0xcb, 0xea, 0x59, 0x3d, 0xa3,
+	0x54, 0xc0, 0xb1, 0xf5, 0xb3, 0x91, 0x35, 0x18, 0x5a, 0x3d, 0xa3, 0x5c, 0xf0, 0x32, 0x18, 0x76,
+	0x30, 0x43, 0x97, 0xd0, 0x0d, 0x58, 0xc9, 0xd0, 0xbd, 0x8e, 0xbd, 0x6f, 0xf5, 0x8c, 0x65, 0x64,
+	0xc2, 0x6a, 0xae, 0xc3, 0xc1, 0xe8, 0xe0, 0xa0, 0xcf, 0xd5, 0x2b, 0x05, 0xe7, 0xdd, 0x8e, 0xd3,
+	0xb5, 0xf6, 0x99, 0x85, 0xd6, 0xfe, 0x4d, 0x09, 0xd6, 0xdf, 0xbe, 0x5e, 0xa8, 0x01, 0x35, 0xa7,
+	0xef, 0x5a, 0x18, 0xf7, 0x19, 0x93, 0xaf, 0x40, 0xdd, 0x76, 0x0e, 0x3b, 0xfb, 0x76, 0xcf, 0x1d,
+	0xe1, 0x7d, 0xa3, 0xc4, 0x80, 0x9e, 0x75, 0x68, 0x77, 0x2d, 0x77, 0x77, 0x34, 0xf8, 0xc2, 0x28,
+	0xb3, 0x6e, 0x6c, 0x67, 0x30, 0xda, 0xdb, 0xb3, 0xbb, 0xb6, 0xe5, 0x0c, 0xdd, 0xc1, 0x41, 0xa7,
+	0x6b, 0x19, 0x4b, 0xe8, 0x3a, 0x34, 0x65, 0x00, 0xa4, 0xb3, 0x65, 0xd4, 0x04, 0x7d, 0x31, 0x90,
+	0x4a, 0xfb, 0xb7, 0x2a, 0x84, 0x85, 0x25, 0x60, 0x86, 0xf6, 0xcb, 0xce, 0x73, 0x2b, 0x17, 0x3f,
+	0x04, 0x2d, 0x01, 0xd9, 0x4e, 0xa7, 0x3b, 0xb4, 0x0f, 0xd9, 0xc1, 0xb2, 0x0a, 0x86, 0xc0, 0x38,
+	0xd2, 0x19, 0xda, 0xce, 0x73, 0xa3, 0x8c, 0x0c, 0x68, 0xe4, 0x50, 0x4b, 0x44, 0x4d, 0x20, 0xd8,
+	0x3a, 0xb4, 0x30, 0x57, 0x5b, 0x5e, 0x38, 0x14, 0x20, 0x1f, 0xce, 0x67, 0xd0, 0x2a, 0x84, 0x25,
+	0x41, 0x0f, 0xd5, 0x81, 0x5c, 0x2e, 0xd2, 0x6f, 0x41, 0x4d, 0x9d, 0xc9, 0x8f, 0x40, 0xe3, 0x78,
+	0x82, 0xee, 0x41, 0x85, 0xef, 0x22, 0x79, 0x8e, 0x37, 0x0b, 0x66, 0x58, 0xc8, 0xda, 0x7f, 0x2e,
+	0x41, 0xad, 0x1f, 0xcd, 0x04, 0xd1, 0xe6, 0x48, 0xb5, 0x54, 0x24, 0xd5, 0xff, 0x03, 0x50, 0x24,
+	0x47, 0x7c, 0x4e, 0x2f, 0x35, 0x9c, 0x43, 0xd0, 0x3a, 0x64, 0x24, 0x29, 0xcf, 0xbd, 0x05, 0x69,
+	0x9a, 0xa0, 0x18, 0x50, 0x56, 0x3f, 0x19, 0x21, 0xde, 0x85, 0xfa, 0x94, 0xc6, 0xfe, 0x6c, 0x9c,
+	0x76, 0x63, 0x9f, 0xc8, 0x02, 0x2e, 0x0f, 0x65, 0x64, 0x2e, 0xe8, 0x83, 0x7f, 0xb7, 0x9f, 0x82,
+	0xae, 0x46, 0x9c, 0xa0, 0xff, 0x2f, 0x16, 0x2b, 0xd9, 0x51, 0xa3, 0x34, 0x54, 0x58, 0xc6, 0x60,
+	0x88, 0xfa, 0xc5, 0x2e, 0x24, 0x96, 0x28, 0x73, 0xdd, 0x8c, 0x44, 0x6b, 0x02, 0xb0, 0x7d, 0xf4,
+	0x04, 0x72, 0x39, 0xc8, 0x67, 0x9c, 0x2b, 0x85, 0x16, 0x4e, 0xf2, 0x99, 0xca, 0x08, 0x1a, 0x72,
+	0xfe, 0xdf, 0x1e, 0xce, 0xfd, 0x4b, 0x7c, 0x2b, 0x2a, 0xa4, 0x0f, 0x2f, 0x77, 0xf0, 0x3d, 0xc8,
+	0xf6, 0xb3, 0x8c, 0x1c, 0x97, 0xde, 0xed, 0xe5, 0x6a, 0x46, 0x7c, 0x51, 0xa4, 0xaa, 0x65, 0xee,
+	0xe3, 0xa3, 0xb7, 0xf9, 0x90, 0x49, 0x12, 0xc4, 0xd1, 0xe5, 0xf9, 0xff, 0xf5, 0x7f, 0x9e, 0x8c,
+	0x6e, 0xc1, 0x8d, 0x8b, 0x64, 0xc4, 0x32, 0x51, 0x7b, 0x0b, 0x4b, 0x55, 0xdb, 0xff, 0x54, 0x53,
+	0xfa, 0xaf, 0xb1, 0x93, 0x09, 0xab, 0xd9, 0x00, 0xdc, 0xbe, 0xa3, 0x62, 0x60, 0x54, 0xd0, 0x3a,
+	0xdc, 0x2c, 0x48, 0xfa, 0xce, 0xc8, 0x15, 0x45, 0xad, 0xc6, 0x64, 0x87, 0x96, 0xd3, 0xeb, 0x63,
+	0x57, 0x76, 0xfc, 0xd2, 0x1e, 0xbc, 0xec, 0x0c, 0xbb, 0x2f, 0x8c, 0x2a, 0x9b, 0x74, 0xff, 0x65,
+	0xd7, 0x76, 0x87, 0xb8, 0xe3, 0x0c, 0xf6, 0x2c, 0x2c, 0xbb, 0xaa, 0xb1, 0xae, 0x14, 0xfd, 0xec,
+	0x8d, 0x06, 0x56, 0xcf, 0xdd, 0xfd, 0x82, 0x39, 0x35, 0xf4, 0xf6, 0x1f, 0xca, 0xb0, 0x7a, 0xd5,
+	0x72, 0xff, 0xa7, 0x59, 0x31, 0xd3, 0xeb, 0xf6, 0x5f, 0xbe, 0xb4, 0x87, 0x92, 0x16, 0x33, 0xae,
+	0x94, 0x28, 0x5f, 0xba, 0x4d, 0xb8, 0x5d, 0x74, 0xd9, 0x77, 0xdc, 0xce, 0x6e, 0x5f, 0x50, 0xa9,
+	0x86, 0xde, 0x03, 0xf3, 0x6a, 0x31, 0x5b, 0x46, 0x74, 0x1b, 0xd6, 0xf2, 0x1e, 0x17, 0x86, 0xb9,
+	0x20, 0xe4, 0x45, 0x56, 0xcf, 0xd0, 0xd1, 0x1a, 0x5c, 0x17, 0x12, 0xb5, 0x33, 0x98, 0x01, 0xb4,
+	0xbf, 0xaa, 0xc0, 0xf2, 0x41, 0x4c, 0x53, 0x74, 0x0b, 0xaa, 0xd3, 0x98, 0xa6, 0x6e, 0x14, 0xf3,
+	0xfc, 0x6e, 0x62, 0x8d, 0x35, 0x9d, 0x98, 0x95, 0x6f, 0xa1, 0x77, 0x44, 0x42, 0x59, 0x87, 0x89,
+	0x06, 0xba, 0x2f, 0x2f, 0x43, 0x22, 0x49, 0x17, 0x45, 0x74, 0x4c, 0x53, 0xfe, 0x27, 0x77, 0x15,
+	0xfa, 0x09, 0xd4, 0x3d, 0x7f, 0x12, 0x44, 0x85, 0x62, 0xcc, 0xdc, 0x96, 0xb7, 0xea, 0x0e, 0x13,
+	0x89, 0x94, 0xe4, 0x37, 0x36, 0x0c, 0x5e, 0x86, 0x30, 0xd3, 0x78, 0x4a, 0x28, 0xb7, 0x9c, 0x25,
+	0x9c, 0x38, 0x73, 0xa6, 0xfd, 0x29, 0xa1, 0x03, 0x2e, 0x51, 0xa6, 0x71, 0x86, 0x14, 0xf9, 0xb0,
+	0x7a, 0x81, 0x0f, 0x1f, 0x42, 0x65, 0x4a, 0x08, 0x4d, 0xcc, 0xda, 0x85, 0x3b, 0x00, 0x1f, 0x3e,
+	0x21, 0x94, 0x7d, 0x60, 0xa1, 0xc3, 0x2e, 0x49, 0xf4, 0xdc, 0x9d, 0x7a, 0xe3, 0x57, 0x24, 0x4d,
+	0x78, 0x7d, 0xa5, 0x61, 0x9d, 0x9e, 0x1f, 0x08, 0x80, 0xd5, 0xc8, 0xf4, 0x5c, 0x16, 0x7c, 0xc0,
+	0x85, 0x55, 0x7a, 0x2e, 0x0a, 0xbd, 0x0d, 0xd0, 0xe9, 0xb9, 0x4b, 0x28, 0x8d, 0x69, 0xc2, 0x8b,
+	0x2a, 0x0d, 0xd7, 0xe8, 0xb9, 0xc5, 0xdb, 0xcc, 0x6d, 0xba, 0x70, 0xdb, 0x10, 0x6e, 0xd3, 0xbc,
+	0xdb, 0x54, 0xb9, 0x6d, 0x0a, 0xb7, 0xe9, 0xc2, 0x6d, 0x9a, 0xb9, 0x6d, 0x09, 0xb7, 0xa9, 0x72,
+	0xfb, 0x18, 0x6a, 0xf1, 0xf1, 0xd4, 0x65, 0x8b, 0x67, 0xae, 0x70, 0xa2, 0x5f, 0xdb, 0xce, 0x3f,
+	0x45, 0x28, 0x21, 0xae, 0xc6, 0xc7, 0x53, 0x36, 0xcd, 0xf5, 0x67, 0x50, 0x53, 0x53, 0x7e, 0xf7,
+	0x29, 0x92, 0xdb, 0x22, 0xe5, 0xfc, 0x16, 0x69, 0x27, 0x50, 0x53, 0x6b, 0xce, 0x2e, 0xa4, 0x8b,
+	0x6c, 0x32, 0xa0, 0x61, 0x0d, 0x5f, 0x58, 0xd8, 0xb1, 0x86, 0xae, 0xe3, 0xd8, 0x46, 0xa9, 0x80,
+	0x8c, 0x1c, 0x5b, 0xdc, 0x60, 0x0f, 0x58, 0xfe, 0xef, 0x0f, 0x8d, 0xa5, 0xac, 0xe1, 0x8c, 0x44,
+	0x69, 0x73, 0x68, 0x31, 0x45, 0x26, 0xab, 0xe4, 0x9a, 0xce, 0xc8, 0xd0, 0xda, 0x0f, 0xa1, 0xc2,
+	0x3a, 0x4d, 0x50, 0xbb, 0x78, 0x6a, 0x36, 0xf2, 0x8b, 0xa9, 0x4e, 0xcc, 0x7f, 0xd4, 0x41, 0x13,
+	0x47, 0x26, 0x5a, 0x5b, 0x5c, 0x33, 0xd4, 0x0d, 0x91, 0xdd, 0x36, 0x6e, 0xe7, 0x6e, 0xf7, 0x99,
+	0x40, 0x6c, 0xe0, 0xdb, 0xb0, 0x4c, 0xe3, 0x38, 0x2d, 0x5e, 0x3e, 0x39, 0x84, 0xda, 0xa0, 0x4f,
+	0x3d, 0x4a, 0xa2, 0xd4, 0x95, 0x05, 0x41, 0x66, 0x5a, 0x13, 0x38, 0xdf, 0x6c, 0x2d, 0xa9, 0xa3,
+	0xa2, 0xb7, 0xca, 0xa2, 0x97, 0x5d, 0x4f, 0x85, 0xf0, 0x40, 0x64, 0xdb, 0x66, 0x76, 0x89, 0xaa,
+	0xe4, 0xbd, 0xa9, 0xbb, 0xd4, 0x06, 0x54, 0x26, 0xb1, 0x4f, 0x42, 0x51, 0x43, 0x28, 0xa9, 0xc0,
+	0xd0, 0x63, 0x30, 0x4e, 0x3d, 0xea, 0x9f, 0x79, 0x74, 0x71, 0x55, 0xa9, 0xe6, 0xf5, 0x56, 0x94,
+	0x58, 0x5d, 0x5a, 0x1e, 0x83, 0x71, 0x1c, 0xd0, 0x49, 0xc1, 0xa2, 0x56, 0xb0, 0x50, 0x62, 0x65,
+	0xf1, 0x08, 0x34, 0x7e, 0x46, 0x8a, 0x44, 0xa8, 0x3f, 0x69, 0x15, 0x8e, 0xd6, 0x24, 0x1b, 0xaf,
+	0x50, 0x62, 0x37, 0xf3, 0x84, 0xd0, 0xc0, 0x0b, 0xdd, 0x68, 0x36, 0x39, 0x22, 0x94, 0x67, 0x48,
+	0xe6, 0xbd, 0x21, 0x64, 0x0e, 0x17, 0xb1, 0x58, 0x2e, 0xde, 0xbf, 0xcc, 0x42, 0x2c, 0xb3, 0x67,
+	0xb0, 0x3b, 0x8b, 0x77, 0xae, 0x7a, 0x5e, 0x23, 0x7b, 0xee, 0xba, 0x0f, 0x86, 0x7a, 0x21, 0x22,
+	0x91, 0x3f, 0x8d, 0x83, 0x28, 0x35, 0x6f, 0x8b, 0xcb, 0xb1, 0xc4, 0x2d, 0x09, 0xb3, 0x72, 0x6c,
+	0x1e, 0x7a, 0x11, 0x4f, 0xbd, 0x26, 0xe6, 0xdf, 0xec, 0x9e, 0x34, 0xf1, 0xc6, 0xae, 0xe7, 0xfb,
+	0x94, 0x24, 0x22, 0xf1, 0x74, 0x0c, 0x13, 0x6f, 0xdc, 0x11, 0x08, 0xba, 0x07, 0x8d, 0x60, 0x3a,
+	0xff, 0x51, 0xa6, 0xc1, 0xd2, 0x4f, 0x7f, 0x71, 0x0d, 0xd7, 0x19, 0x5a, 0x54, 0xfa, 0x38, 0x53,
+	0x5a, 0xc9, 0x29, 0x7d, 0xac, 0x94, 0x3e, 0x80, 0xe6, 0x69, 0x9c, 0xa4, 0xae, 0x17, 0xf9, 0x22,
+	0x5b, 0xd7, 0x94, 0x16, 0x83, 0x3b, 0x91, 0xcf, 0x13, 0x72, 0x13, 0x80, 0x9c, 0xa7, 0xd4, 0x73,
+	0x3d, 0x7a, 0x92, 0x98, 0xb7, 0xc4, 0x03, 0x0e, 0x47, 0x3a, 0xf4, 0x24, 0x41, 0xcf, 0xa0, 0x39,
+	0xa5, 0xf1, 0xf9, 0x9b, 0xac, 0xab, 0x1b, 0x7c, 0x55, 0x36, 0x8a, 0xcf, 0x5c, 0xdb, 0x07, 0x4c,
+	0x47, 0x76, 0x8c, 0x1b, 0xd3, 0x5c, 0xeb, 0x22, 0x3b, 0x1b, 0xff, 0x06, 0x3b, 0x3f, 0x2b, 0xb2,
+	0xf3, 0xf5, 0x77, 0xb3, 0xb3, 0x5a, 0xaa, 0x3c, 0x49, 0x6f, 0x66, 0xc5, 0xde, 0xcd, 0xc2, 0x6e,
+	0x97, 0xc5, 0x9c, 0x0d, 0xad, 0x71, 0x1c, 0x45, 0x64, 0x9c, 0xaa, 0x3e, 0x10, 0xef, 0x63, 0x43,
+	0xf5, 0xd1, 0x15, 0xd2, 0xab, 0xba, 0x69, 0x8e, 0xf3, 0x32, 0xf4, 0x03, 0xd0, 0xc6, 0xb3, 0x24,
+	0x8d, 0x27, 0xe6, 0x33, 0x1e, 0xa1, 0xd5, 0x6d, 0xf1, 0xfc, 0xbb, 0xad, 0x9e, 0x7f, 0xb7, 0x3b,
+	0xd1, 0x1b, 0x2c, 0x75, 0xd0, 0xa7, 0x00, 0xd3, 0x89, 0xbc, 0x2c, 0x27, 0xe6, 0xaf, 0x4a, 0xdc,
+	0xe4, 0xfa, 0xc5, 0x97, 0x9f, 0x64, 0xb7, 0xf2, 0xcd, 0xb7, 0x5f, 0x6f, 0x5e, 0xc3, 0xfa, 0x34,
+	0x7b, 0xde, 0xda, 0x87, 0x15, 0x51, 0x7f, 0xaa, 0xaa, 0x36, 0x31, 0x7f, 0x5d, 0x7a, 0xc7, 0x4d,
+	0x67, 0xb7, 0xce, 0x5c, 0x68, 0xe2, 0xa9, 0x03, 0xb7, 0x82, 0xc2, 0x65, 0x69, 0xfd, 0x4f, 0x65,
+	0x68, 0xe4, 0xd7, 0xee, 0xdd, 0xfc, 0x7c, 0x07, 0xea, 0x52, 0xb8, 0x60, 0x32, 0x0c, 0xfe, 0xe2,
+	0xc5, 0x79, 0x13, 0x60, 0x7c, 0xea, 0x45, 0x11, 0x09, 0x99, 0xb9, 0x78, 0x53, 0xd2, 0x25, 0x62,
+	0xfb, 0x68, 0x0b, 0x0c, 0x25, 0x16, 0xaf, 0x82, 0x92, 0xd3, 0x9a, 0xb8, 0x25, 0x71, 0xfe, 0x42,
+	0x66, 0xfb, 0x68, 0x07, 0x6e, 0x28, 0xcd, 0x94, 0xd0, 0x49, 0x10, 0xf1, 0xa2, 0x4a, 0xde, 0x79,
+	0x90, 0x14, 0x0d, 0x17, 0x12, 0xb4, 0x06, 0x5a, 0x1c, 0xcd, 0x98, 0x43, 0x4d, 0x3c, 0x02, 0xc5,
+	0xd1, 0xcc, 0xf6, 0xd1, 0x07, 0xd0, 0x62, 0x70, 0x42, 0x12, 0x46, 0x2e, 0xea, 0xa4, 0x6e, 0xe2,
+	0x46, 0x1c, 0xcd, 0x06, 0x02, 0xb4, 0xfd, 0x2b, 0x73, 0xba, 0x76, 0x65, 0x4e, 0xef, 0xea, 0x8c,
+	0x1f, 0x78, 0xa8, 0xda, 0x3b, 0x50, 0x15, 0xbb, 0x9f, 0xa5, 0x5a, 0xe1, 0x84, 0x68, 0x15, 0xb3,
+	0x43, 0x9d, 0x11, 0x7f, 0x5c, 0x82, 0xd5, 0x41, 0x30, 0x99, 0x85, 0x5e, 0x4a, 0x3a, 0xa1, 0x47,
+	0x27, 0x98, 0xbc, 0x9e, 0x91, 0x24, 0xbd, 0xf4, 0x30, 0xf5, 0x1e, 0xe8, 0x41, 0xe4, 0x07, 0x63,
+	0x2f, 0x8d, 0xd5, 0x73, 0xfb, 0x02, 0x60, 0xa7, 0x64, 0x10, 0xa5, 0xc7, 0x2a, 0xc2, 0x3a, 0xd6,
+	0x58, 0x53, 0x4c, 0x96, 0x1f, 0x00, 0x6c, 0x71, 0xc4, 0x7b, 0xac, 0x78, 0xb5, 0x6b, 0x4c, 0xe5,
+	0xd9, 0xc9, 0x9f, 0x64, 0xdb, 0xd0, 0x64, 0x21, 0x59, 0xac, 0xb2, 0xbc, 0x48, 0xc6, 0xd1, 0xac,
+	0xa7, 0x16, 0xfa, 0x29, 0xdc, 0x0c, 0x22, 0xc6, 0xd7, 0xc4, 0x3d, 0x0a, 0x52, 0x51, 0x09, 0xb8,
+	0x94, 0xa5, 0x2f, 0x8b, 0x6e, 0x05, 0xdf, 0x90, 0xd2, 0xdd, 0x20, 0xe5, 0x55, 0x01, 0x16, 0x17,
+	0x97, 0x8a, 0x4f, 0x83, 0xe3, 0x94, 0x87, 0xb8, 0x82, 0x45, 0x83, 0x8d, 0x36, 0x22, 0x67, 0x2e,
+	0x79, 0xed, 0xf3, 0x90, 0x56, 0xb0, 0x16, 0x91, 0x33, 0xeb, 0xb5, 0x8f, 0x1e, 0xc0, 0x75, 0xb1,
+	0x34, 0x79, 0xf6, 0x16, 0x8f, 0x4b, 0x2b, 0x7c, 0x75, 0x72, 0xcc, 0xfd, 0x02, 0x74, 0x96, 0xd4,
+	0x62, 0x13, 0x00, 0x4f, 0xd1, 0x07, 0x2a, 0xc6, 0x57, 0x45, 0x94, 0x73, 0x03, 0xd7, 0xe6, 0x65,
+	0xe2, 0xc2, 0xb8, 0xfd, 0x21, 0x34, 0x0b, 0x32, 0xa4, 0x43, 0x05, 0x77, 0xec, 0x81, 0x25, 0x1e,
+	0xc0, 0xbb, 0xfb, 0x56, 0x07, 0x1b, 0xa5, 0xdd, 0x01, 0xdc, 0x88, 0xe9, 0x09, 0xaf, 0x6b, 0xc6,
+	0x31, 0xf5, 0x65, 0x5f, 0xbb, 0x8d, 0x43, 0xfe, 0x5f, 0xc4, 0xe9, 0x17, 0xdb, 0x27, 0x41, 0x7a,
+	0x3a, 0x3b, 0x62, 0x5c, 0xb1, 0xa3, 0x34, 0x77, 0x84, 0xe6, 0x23, 0xf9, 0x1b, 0xcd, 0xfc, 0xc7,
+	0x3b, 0x27, 0xb1, 0xc4, 0x8e, 0x34, 0x0e, 0x3e, 0xfd, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7b,
+	0x77, 0x4a, 0x8d, 0x3d, 0x1a, 0x00, 0x00,
 }
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/voltha/events.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/voltha/events.pb.go
similarity index 99%
rename from vendor/github.com/opencord/voltha-protos/v4/go/voltha/events.pb.go
rename to vendor/github.com/opencord/voltha-protos/v5/go/voltha/events.pb.go
index 2dc1826..b61c610 100644
--- a/vendor/github.com/opencord/voltha-protos/v4/go/voltha/events.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/voltha/events.pb.go
@@ -7,7 +7,7 @@
 	fmt "fmt"
 	proto "github.com/golang/protobuf/proto"
 	timestamp "github.com/golang/protobuf/ptypes/timestamp"
-	common "github.com/opencord/voltha-protos/v4/go/common"
+	common "github.com/opencord/voltha-protos/v5/go/common"
 	_ "google.golang.org/genproto/googleapis/api/annotations"
 	math "math"
 )
@@ -1155,7 +1155,7 @@
 	// 1282 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xdb, 0x6e, 0xdb, 0x46,
 	0x13, 0x16, 0xa9, 0xf3, 0x50, 0xb6, 0xe9, 0xcd, 0xff, 0xb7, 0x8a, 0xd2, 0x26, 0x8e, 0x8a, 0x16,
-	0x46, 0x82, 0x50, 0x28, 0x5b, 0x20, 0x86, 0x83, 0x1e, 0x12, 0x85, 0x8d, 0x89, 0xd4, 0x94, 0x4a,
+	0x46, 0x82, 0x50, 0x28, 0x8b, 0x22, 0x86, 0x83, 0x1e, 0x12, 0x85, 0x8d, 0x89, 0xd4, 0x94, 0x4a,
 	0xcb, 0x06, 0xd2, 0x1b, 0x61, 0x4d, 0xae, 0x65, 0xc2, 0x92, 0x48, 0x70, 0x57, 0x42, 0xfc, 0x00,
 	0xbd, 0xee, 0x83, 0xf4, 0x39, 0x7a, 0xd7, 0x37, 0x28, 0x8a, 0xbe, 0x44, 0x1f, 0xa0, 0xd8, 0x03,
 	0x25, 0x52, 0x71, 0x90, 0x0b, 0xa3, 0x57, 0xdc, 0x9d, 0x9d, 0x6f, 0xe7, 0x9b, 0x8f, 0x3b, 0xb3,
@@ -1232,6 +1232,6 @@
 	0x8c, 0x5f, 0xb4, 0x00, 0x64, 0xcf, 0xe7, 0xff, 0xf2, 0x85, 0x03, 0x77, 0xe2, 0x74, 0x62, 0xc5,
 	0x09, 0x99, 0x07, 0x71, 0x1a, 0x2a, 0xe4, 0xcf, 0xd6, 0x24, 0x62, 0x97, 0x8b, 0x73, 0xde, 0x5c,
 	0x7a, 0xd9, 0x5a, 0x4f, 0xae, 0x3d, 0x51, 0x6f, 0xe8, 0xe5, 0xd7, 0xbd, 0x49, 0xac, 0x6c, 0xe7,
-	0x35, 0x61, 0xfc, 0xea, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4e, 0xd5, 0xe2, 0x3b, 0xe5, 0x0b,
+	0x35, 0x61, 0xfc, 0xea, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x16, 0x55, 0x00, 0xec, 0xe5, 0x0b,
 	0x00, 0x00,
 }
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/voltha/health.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/voltha/health.pb.go
similarity index 97%
rename from vendor/github.com/opencord/voltha-protos/v4/go/voltha/health.pb.go
rename to vendor/github.com/opencord/voltha-protos/v5/go/voltha/health.pb.go
index 4bde873..619ff26 100644
--- a/vendor/github.com/opencord/voltha-protos/v4/go/voltha/health.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/voltha/health.pb.go
@@ -8,7 +8,7 @@
 	fmt "fmt"
 	proto "github.com/golang/protobuf/proto"
 	empty "github.com/golang/protobuf/ptypes/empty"
-	_ "github.com/opencord/voltha-protos/v4/go/common"
+	_ "github.com/opencord/voltha-protos/v5/go/common"
 	_ "google.golang.org/genproto/googleapis/api/annotations"
 	grpc "google.golang.org/grpc"
 	codes "google.golang.org/grpc/codes"
@@ -123,8 +123,8 @@
 	0xa2, 0x12, 0x7f, 0xd3, 0xe5, 0x27, 0x93, 0x99, 0x38, 0x85, 0xd8, 0xa1, 0x81, 0xe9, 0xe4, 0xca,
 	0x25, 0x9c, 0x5f, 0x94, 0xae, 0x97, 0x5f, 0x90, 0x9a, 0x97, 0x9c, 0x5f, 0x94, 0x02, 0xd5, 0x14,
 	0xa5, 0x97, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x0f, 0x93, 0xd3, 0x87,
-	0xc8, 0xe9, 0x42, 0x83, 0xab, 0xcc, 0x44, 0x3f, 0x3d, 0x1f, 0x2a, 0x96, 0xc4, 0x06, 0x16, 0x34,
-	0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x72, 0x0c, 0x5a, 0x9d, 0xb2, 0x01, 0x00, 0x00,
+	0xc8, 0xe9, 0x42, 0x83, 0xab, 0xcc, 0x54, 0x3f, 0x3d, 0x1f, 0x2a, 0x96, 0xc4, 0x06, 0x16, 0x34,
+	0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x2a, 0x8c, 0xb8, 0x4a, 0xb2, 0x01, 0x00, 0x00,
 }
 
 // Reference imports to suppress errors if they are not otherwise used.
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/voltha/logical_device.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/voltha/logical_device.pb.go
similarity index 79%
rename from vendor/github.com/opencord/voltha-protos/v4/go/voltha/logical_device.pb.go
rename to vendor/github.com/opencord/voltha-protos/v5/go/voltha/logical_device.pb.go
index 5428ecc..2f37696 100644
--- a/vendor/github.com/opencord/voltha-protos/v4/go/voltha/logical_device.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/voltha/logical_device.pb.go
@@ -6,8 +6,8 @@
 import (
 	fmt "fmt"
 	proto "github.com/golang/protobuf/proto"
-	_ "github.com/opencord/voltha-protos/v4/go/common"
-	openflow_13 "github.com/opencord/voltha-protos/v4/go/openflow_13"
+	_ "github.com/opencord/voltha-protos/v5/go/common"
+	openflow_13 "github.com/opencord/voltha-protos/v5/go/openflow_13"
 	_ "google.golang.org/genproto/googleapis/api/annotations"
 	math "math"
 )
@@ -321,31 +321,31 @@
 	// 456 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0xc1, 0x6a, 0xdb, 0x40,
 	0x10, 0x86, 0x91, 0x63, 0x3b, 0xce, 0xca, 0x51, 0x40, 0x21, 0x54, 0x24, 0x85, 0x08, 0xd1, 0x83,
-	0x43, 0xa9, 0xd4, 0x3a, 0x3d, 0xb4, 0x87, 0x1e, 0x1a, 0x42, 0xc0, 0x50, 0xda, 0xb2, 0x85, 0x1e,
-	0x7a, 0x11, 0x1b, 0xed, 0x5a, 0x5e, 0x90, 0x35, 0x42, 0xbb, 0x71, 0x5e, 0xb6, 0x4f, 0xd1, 0x27,
-	0x28, 0x3b, 0xbb, 0x6a, 0xa5, 0x38, 0x47, 0xfd, 0xf3, 0xcd, 0xcc, 0x3f, 0xff, 0x8a, 0x24, 0x3b,
-	0xa8, 0xf4, 0x86, 0xe5, 0x4d, 0x0b, 0x1a, 0x54, 0x56, 0x41, 0x29, 0x0b, 0x56, 0xe5, 0x5c, 0xec,
-	0x64, 0x21, 0x52, 0x54, 0xc3, 0xa9, 0x65, 0xce, 0x5f, 0x96, 0x00, 0x65, 0x25, 0x32, 0xd6, 0xc8,
-	0x8c, 0xd5, 0x35, 0x68, 0xa6, 0x25, 0xd4, 0xca, 0x52, 0xe7, 0xd1, 0x70, 0xd2, 0x56, 0x68, 0xe6,
-	0x2a, 0x97, 0xc3, 0x0a, 0x34, 0xa2, 0x5e, 0x57, 0xf0, 0x98, 0xbf, 0xbb, 0xb6, 0x40, 0xf2, 0x81,
-	0x1c, 0x7f, 0xb1, 0x8b, 0xbf, 0x43, 0xab, 0x57, 0x3c, 0x0c, 0xc8, 0x48, 0xf2, 0xc8, 0x8b, 0xbd,
-	0xc5, 0x11, 0x1d, 0x49, 0x1e, 0xbe, 0x20, 0x87, 0x0d, 0xb4, 0x3a, 0x97, 0x3c, 0x1a, 0xa1, 0x38,
-	0x6d, 0x10, 0x4c, 0xfe, 0x78, 0xc4, 0xef, 0xb5, 0xee, 0x35, 0xbe, 0x25, 0x33, 0x58, 0x37, 0xb9,
-	0xa1, 0xb1, 0xd3, 0x5f, 0x9e, 0xa5, 0xfd, 0xfd, 0x5d, 0x91, 0x1e, 0xc2, 0xba, 0xc1, 0x09, 0x17,
-	0xe4, 0xc8, 0x1e, 0x6f, 0x96, 0x1d, 0xe0, 0xa0, 0x99, 0x15, 0x56, 0x3c, 0x7c, 0x45, 0x02, 0x57,
-	0x44, 0x3b, 0x35, 0x44, 0xe3, 0xd8, 0x5b, 0x1c, 0xd3, 0xb9, 0x55, 0xcd, 0x80, 0xaf, 0x60, 0x46,
-	0xb4, 0x00, 0xda, 0x6e, 0x9d, 0xc4, 0xde, 0x62, 0x46, 0x67, 0x46, 0xc0, 0xf9, 0x9f, 0x49, 0xd0,
-	0x2d, 0xcd, 0x95, 0x66, 0x5a, 0x45, 0x53, 0xf4, 0x75, 0xf1, 0xac, 0x2f, 0x8b, 0xd0, 0xb9, 0x73,
-	0xf7, 0xc3, 0x7c, 0x25, 0x1f, 0xc9, 0xbc, 0x77, 0xb3, 0x0a, 0xaf, 0xc8, 0x44, 0x6a, 0xb1, 0x55,
-	0x91, 0x17, 0x1f, 0x2c, 0xfc, 0xe5, 0x69, 0x6a, 0xf3, 0x4e, 0x7b, 0x10, 0xb5, 0x44, 0xf2, 0xdb,
-	0xfb, 0x17, 0xf5, 0x2d, 0x5a, 0xde, 0x4b, 0xec, 0x92, 0xf8, 0x9c, 0x69, 0xd6, 0x30, 0xbd, 0xe9,
-	0xe2, 0x1e, 0x53, 0xd2, 0x49, 0x2b, 0x1e, 0x5e, 0x91, 0x31, 0x17, 0xaa, 0xc0, 0x6c, 0x9e, 0x8b,
-	0xd3, 0x14, 0x29, 0x22, 0xe1, 0x8a, 0x9c, 0xa8, 0x47, 0xa9, 0x8b, 0x4d, 0xbe, 0x16, 0x4c, 0x3f,
-	0xb4, 0x42, 0x61, 0x5e, 0xfe, 0x32, 0xde, 0xeb, 0x7a, 0xc2, 0xd1, 0xc0, 0x0a, 0x77, 0xee, 0xdb,
-	0x24, 0x8f, 0x99, 0xfe, 0x7f, 0x9b, 0x09, 0x5a, 0x9e, 0x1b, 0xf5, 0xd6, 0xbd, 0x4f, 0xf2, 0x89,
-	0x04, 0x83, 0xeb, 0x54, 0xf8, 0x7a, 0x98, 0xcd, 0xd9, 0x93, 0x6c, 0x2c, 0xe6, 0xd2, 0xb9, 0xf9,
-	0x49, 0x4e, 0xa1, 0x2d, 0xd1, 0x5b, 0x01, 0x2d, 0x77, 0xec, 0xcd, 0xc9, 0xb7, 0xbb, 0x01, 0xfe,
-	0x2b, 0x2d, 0xa5, 0xde, 0x3c, 0xdc, 0xa7, 0x05, 0x6c, 0xb3, 0x0e, 0xce, 0x2c, 0xfc, 0xc6, 0xfd,
-	0xe4, 0xbb, 0xf7, 0x59, 0x09, 0x4e, 0xbb, 0x9f, 0xa2, 0x78, 0xfd, 0x37, 0x00, 0x00, 0xff, 0xff,
-	0xda, 0x87, 0xca, 0xb8, 0x6d, 0x03, 0x00, 0x00,
+	0x43, 0xa9, 0xd4, 0x3a, 0x14, 0xda, 0x43, 0x0f, 0x0d, 0x21, 0x60, 0x28, 0x6d, 0xd9, 0x42, 0x0f,
+	0xbd, 0x88, 0x8d, 0x76, 0x2d, 0x2f, 0xc8, 0x1a, 0xa1, 0xdd, 0x38, 0x2f, 0xdb, 0xa7, 0xe8, 0x13,
+	0x94, 0x9d, 0x5d, 0xb5, 0x52, 0x9c, 0xa3, 0xfe, 0xf9, 0x66, 0xe6, 0x9f, 0x7f, 0x45, 0x92, 0x1d,
+	0x54, 0x7a, 0xc3, 0xf2, 0xa6, 0x05, 0x0d, 0x2a, 0xab, 0xa0, 0x94, 0x05, 0xab, 0x72, 0x2e, 0x76,
+	0xb2, 0x10, 0x29, 0xaa, 0xe1, 0xd4, 0x32, 0xe7, 0x2f, 0x4b, 0x80, 0xb2, 0x12, 0x19, 0x6b, 0x64,
+	0xc6, 0xea, 0x1a, 0x34, 0xd3, 0x12, 0x6a, 0x65, 0xa9, 0xf3, 0x68, 0x38, 0x69, 0x2b, 0x34, 0x73,
+	0x95, 0xcb, 0x61, 0x05, 0x1a, 0x51, 0xaf, 0x2b, 0x78, 0xcc, 0xdf, 0x5d, 0x5b, 0x20, 0xf9, 0x40,
+	0x8e, 0xbf, 0xd8, 0xc5, 0xdf, 0xa1, 0xd5, 0x2b, 0x1e, 0x06, 0x64, 0x24, 0x79, 0xe4, 0xc5, 0xde,
+	0xe2, 0x88, 0x8e, 0x24, 0x0f, 0x5f, 0x90, 0xc3, 0x06, 0x5a, 0x9d, 0x4b, 0x1e, 0x8d, 0x50, 0x9c,
+	0x36, 0x08, 0x26, 0x7f, 0x3c, 0xe2, 0xf7, 0x5a, 0xf7, 0x1a, 0xdf, 0x92, 0x19, 0xac, 0x9b, 0xdc,
+	0xd0, 0xd8, 0xe9, 0x2f, 0xcf, 0xd2, 0xfe, 0xfe, 0xae, 0x48, 0x0f, 0x61, 0xdd, 0xe0, 0x84, 0x0b,
+	0x72, 0x64, 0x8f, 0x37, 0xcb, 0x0e, 0x70, 0xd0, 0xcc, 0x0a, 0x2b, 0x1e, 0xbe, 0x22, 0x81, 0x2b,
+	0xa2, 0x9d, 0x1a, 0xa2, 0x71, 0xec, 0x2d, 0x8e, 0xe9, 0xdc, 0xaa, 0x66, 0xc0, 0x57, 0x30, 0x23,
+	0x5a, 0x00, 0x6d, 0xb7, 0x4e, 0x62, 0x6f, 0x31, 0xa3, 0x33, 0x23, 0xe0, 0xfc, 0xcf, 0x24, 0xe8,
+	0x96, 0xe6, 0x4a, 0x33, 0xad, 0xa2, 0x29, 0xfa, 0xba, 0x78, 0xd6, 0x97, 0x45, 0xe8, 0xdc, 0xb9,
+	0xfb, 0x61, 0xbe, 0x92, 0x8f, 0x64, 0xde, 0xbb, 0x59, 0x85, 0x57, 0x64, 0x22, 0xb5, 0xd8, 0xaa,
+	0xc8, 0x8b, 0x0f, 0x16, 0xfe, 0xf2, 0x34, 0xb5, 0x79, 0xa7, 0x3d, 0x88, 0x5a, 0x22, 0xf9, 0xed,
+	0xfd, 0x8b, 0xfa, 0x16, 0x2d, 0xef, 0x25, 0x76, 0x49, 0x7c, 0xce, 0x34, 0x6b, 0x98, 0xde, 0x74,
+	0x71, 0x8f, 0x29, 0xe9, 0xa4, 0x15, 0x0f, 0xaf, 0xc8, 0x98, 0x0b, 0x55, 0x60, 0x36, 0xcf, 0xc5,
+	0x69, 0x8a, 0x14, 0x91, 0x70, 0x45, 0x4e, 0xd4, 0xa3, 0xd4, 0xc5, 0x26, 0x5f, 0x0b, 0xa6, 0x1f,
+	0x5a, 0xa1, 0x30, 0x2f, 0x7f, 0x19, 0xef, 0x75, 0x3d, 0xe1, 0x68, 0x60, 0x85, 0x3b, 0xf7, 0x6d,
+	0x92, 0xc7, 0x4c, 0xff, 0xbf, 0xcd, 0x04, 0x2d, 0xcf, 0x8d, 0x7a, 0xeb, 0xde, 0x27, 0xf9, 0x44,
+	0x82, 0xc1, 0x75, 0x2a, 0x7c, 0x3d, 0xcc, 0xe6, 0xec, 0x49, 0x36, 0x16, 0x73, 0xe9, 0xdc, 0xfc,
+	0x24, 0xa7, 0xd0, 0x96, 0xe8, 0xad, 0x80, 0x96, 0x3b, 0xf6, 0xe6, 0xe4, 0xdb, 0xdd, 0x00, 0xff,
+	0x95, 0x96, 0x52, 0x6f, 0x1e, 0xee, 0xd3, 0x02, 0xb6, 0x59, 0x07, 0x67, 0x16, 0x7e, 0xe3, 0x7e,
+	0xf2, 0xdd, 0xfb, 0xac, 0x04, 0xa7, 0xdd, 0x4f, 0x51, 0xbc, 0xfe, 0x1b, 0x00, 0x00, 0xff, 0xff,
+	0x82, 0x07, 0x28, 0x6f, 0x6d, 0x03, 0x00, 0x00,
 }
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/voltha/voltha.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/voltha/voltha.pb.go
similarity index 92%
rename from vendor/github.com/opencord/voltha-protos/v4/go/voltha/voltha.pb.go
rename to vendor/github.com/opencord/voltha-protos/v5/go/voltha/voltha.pb.go
index cc7717b..89368cf 100644
--- a/vendor/github.com/opencord/voltha-protos/v4/go/voltha/voltha.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/voltha/voltha.pb.go
@@ -8,10 +8,10 @@
 	fmt "fmt"
 	proto "github.com/golang/protobuf/proto"
 	empty "github.com/golang/protobuf/ptypes/empty"
-	common "github.com/opencord/voltha-protos/v4/go/common"
-	config "github.com/opencord/voltha-protos/v4/go/ext/config"
-	omci "github.com/opencord/voltha-protos/v4/go/omci"
-	openflow_13 "github.com/opencord/voltha-protos/v4/go/openflow_13"
+	common "github.com/opencord/voltha-protos/v5/go/common"
+	config "github.com/opencord/voltha-protos/v5/go/ext/config"
+	omci "github.com/opencord/voltha-protos/v5/go/omci"
+	openflow_13 "github.com/opencord/voltha-protos/v5/go/openflow_13"
 	_ "google.golang.org/genproto/googleapis/api/annotations"
 	grpc "google.golang.org/grpc"
 	codes "google.golang.org/grpc/codes"
@@ -2373,189 +2373,189 @@
 func init() { proto.RegisterFile("voltha_protos/voltha.proto", fileDescriptor_e084f1a60ce7016c) }
 
 var fileDescriptor_e084f1a60ce7016c = []byte{
-	// 2900 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5a, 0x49, 0x73, 0x1b, 0xc7,
-	0x15, 0x16, 0xb8, 0xf3, 0x01, 0x24, 0x81, 0x06, 0x17, 0x08, 0x24, 0xb5, 0xb4, 0x6c, 0x89, 0xa6,
-	0x2d, 0xc0, 0x5a, 0xac, 0x72, 0xec, 0xb8, 0x62, 0x6e, 0xa2, 0x11, 0x4b, 0x02, 0x32, 0x20, 0x25,
-	0x27, 0xb1, 0x0a, 0x35, 0xc0, 0x34, 0xc0, 0x29, 0x01, 0x18, 0x64, 0xa6, 0x41, 0x49, 0xa5, 0x72,
-	0xa5, 0xe2, 0x2c, 0x76, 0xce, 0xbe, 0xe7, 0x94, 0x54, 0xaa, 0x72, 0xc9, 0x7f, 0xc8, 0xdd, 0xa7,
-	0x9c, 0x72, 0x4d, 0xe5, 0x90, 0x5f, 0xe0, 0x73, 0xaa, 0x5f, 0x77, 0x03, 0x33, 0x98, 0x19, 0x2e,
-	0xb2, 0xab, 0x72, 0x22, 0xa7, 0xfb, 0xf5, 0xf7, 0xbd, 0x7e, 0xdd, 0x6f, 0xe9, 0x6e, 0x40, 0xfe,
-	0xd8, 0x69, 0xf3, 0x23, 0xb3, 0xd6, 0x73, 0x1d, 0xee, 0x78, 0x45, 0xf9, 0x55, 0xc0, 0x2f, 0x32,
-	0x25, 0xbf, 0xf2, 0x6b, 0x2d, 0xc7, 0x69, 0xb5, 0x59, 0xd1, 0xec, 0xd9, 0x45, 0xb3, 0xdb, 0x75,
-	0xb8, 0xc9, 0x6d, 0xa7, 0xeb, 0x49, 0xa9, 0xfc, 0xaa, 0xea, 0xc5, 0xaf, 0x7a, 0xbf, 0x59, 0x64,
-	0x9d, 0x1e, 0x7f, 0xa9, 0x3a, 0x73, 0x41, 0xf8, 0x0e, 0xe3, 0x0a, 0x3c, 0x3f, 0x42, 0xdc, 0x70,
-	0x3a, 0x1d, 0xa7, 0x1b, 0xdd, 0x77, 0xc4, 0xcc, 0x36, 0x3f, 0x52, 0x7d, 0x34, 0xd8, 0xd7, 0x76,
-	0x5a, 0x76, 0xc3, 0x6c, 0xd7, 0x2c, 0x76, 0x6c, 0x37, 0x58, 0xf4, 0xf8, 0x40, 0xdf, 0x6a, 0xb0,
-	0xcf, 0xb4, 0xcc, 0x1e, 0x67, 0xae, 0xea, 0xbc, 0x1c, 0xec, 0x74, 0x7a, 0xac, 0xdb, 0x6c, 0x3b,
-	0xcf, 0x6b, 0xb7, 0xee, 0xc4, 0x08, 0x74, 0x1a, 0x76, 0xad, 0x63, 0xd7, 0x6b, 0x56, 0x5d, 0x09,
-	0x5c, 0x8d, 0x10, 0x30, 0xdb, 0xa6, 0xdb, 0x19, 0x8a, 0x5c, 0x0a, 0x8a, 0xb0, 0x17, 0xbc, 0xd6,
-	0x70, 0xba, 0x4d, 0xbb, 0x25, 0xfb, 0xe9, 0x9f, 0x13, 0x90, 0xdc, 0x45, 0x95, 0xf7, 0x5d, 0xa7,
-	0xdf, 0x23, 0x4b, 0x30, 0x66, 0x5b, 0xb9, 0xc4, 0x95, 0xc4, 0xc6, 0xec, 0xf6, 0xe4, 0x7f, 0xbf,
-	0xfb, 0x76, 0x3d, 0x61, 0x8c, 0xd9, 0x16, 0x29, 0xc1, 0x42, 0x70, 0xf2, 0x5e, 0x6e, 0xec, 0xca,
-	0xf8, 0x46, 0xf2, 0xf6, 0x52, 0x41, 0xad, 0xe2, 0x03, 0xd9, 0x2d, 0xb1, 0xb6, 0x67, 0xff, 0xfd,
-	0xdd, 0xb7, 0xeb, 0x13, 0x02, 0xcb, 0x98, 0x6f, 0xfb, 0x7b, 0x3c, 0x72, 0x07, 0xa6, 0x35, 0xc4,
-	0x38, 0x42, 0xcc, 0x6b, 0x88, 0xf0, 0x58, 0x2d, 0x49, 0x7f, 0x04, 0x29, 0x9f, 0x96, 0x1e, 0x79,
-	0x0b, 0x26, 0x6d, 0xce, 0x3a, 0x5e, 0x2e, 0x81, 0x10, 0xd9, 0x20, 0x04, 0x0a, 0x19, 0x52, 0x82,
-	0xfe, 0x29, 0x01, 0x64, 0xef, 0x98, 0x75, 0xf9, 0x7d, 0xbb, 0xcd, 0x99, 0x6b, 0xf4, 0xdb, 0xec,
-	0x53, 0xf6, 0x92, 0x7e, 0x95, 0x80, 0xec, 0x48, 0xf3, 0xc1, 0xcb, 0x1e, 0x23, 0xf3, 0x00, 0x4d,
-	0x6c, 0xa9, 0x99, 0xed, 0x76, 0xfa, 0x02, 0x49, 0xc1, 0x4c, 0xc3, 0xe4, 0xac, 0xe5, 0xb8, 0x2f,
-	0xd3, 0x09, 0x92, 0x86, 0x94, 0xd7, 0xaf, 0xd7, 0x06, 0x2d, 0x63, 0x84, 0xc0, 0xfc, 0xb3, 0x9e,
-	0x5d, 0x63, 0x02, 0xaa, 0xc6, 0x5f, 0xf6, 0x58, 0x7a, 0x9c, 0x2c, 0x41, 0x46, 0x1a, 0xd9, 0xdf,
-	0x3c, 0x21, 0x9a, 0xe5, 0x7c, 0xfc, 0xcd, 0x93, 0xd4, 0x86, 0x85, 0x11, 0x45, 0xc8, 0xc7, 0x30,
-	0xfe, 0x8c, 0xbd, 0xc4, 0x65, 0x98, 0xbf, 0x5d, 0xd0, 0x93, 0x0b, 0xcf, 0xa2, 0x10, 0x31, 0x03,
-	0x43, 0x0c, 0x25, 0x8b, 0x30, 0x79, 0x6c, 0xb6, 0xfb, 0x2c, 0x37, 0x26, 0x96, 0xd2, 0x90, 0x1f,
-	0xf4, 0xaf, 0x09, 0x48, 0xfa, 0x86, 0xc4, 0xad, 0xf6, 0x32, 0x4c, 0xb1, 0xae, 0x59, 0x6f, 0xcb,
-	0xd1, 0x33, 0x86, 0xfa, 0x22, 0xab, 0x30, 0xab, 0x26, 0x60, 0x5b, 0xb9, 0x71, 0x04, 0x9e, 0x91,
-	0x0d, 0x25, 0x8b, 0xac, 0x03, 0x0c, 0xa7, 0x95, 0x9b, 0xc0, 0xde, 0x59, 0x6c, 0x41, 0xbb, 0xde,
-	0x84, 0x49, 0xb7, 0xdf, 0x66, 0x5e, 0x6e, 0x12, 0x57, 0x6c, 0x25, 0x66, 0x52, 0x86, 0x94, 0xa2,
-	0x1f, 0x41, 0xca, 0xd7, 0xe3, 0x91, 0x9b, 0x30, 0x2d, 0x97, 0x25, 0xb4, 0xe4, 0x7e, 0x00, 0x2d,
-	0x43, 0x9f, 0x41, 0x6a, 0xc7, 0x71, 0x59, 0xa9, 0xeb, 0x71, 0xb3, 0xdb, 0x60, 0xe4, 0x3a, 0x24,
-	0x6d, 0xf5, 0x7f, 0x6d, 0x74, 0xc6, 0xa0, 0x7b, 0x4a, 0x16, 0xb9, 0x03, 0x53, 0x32, 0x00, 0xe0,
-	0xcc, 0x93, 0xb7, 0x17, 0x35, 0xcb, 0x27, 0xd8, 0x5a, 0xe5, 0x26, 0xef, 0x7b, 0xdb, 0x93, 0x62,
-	0x87, 0x5e, 0x30, 0x94, 0x28, 0xfd, 0x10, 0xe6, 0xfc, 0x64, 0x1e, 0xd9, 0x0c, 0xee, 0xce, 0x01,
-	0x88, 0x5f, 0x4a, 0x6f, 0xcf, 0xf7, 0x60, 0xa1, 0xdc, 0x69, 0xd8, 0x07, 0xcc, 0xe3, 0x06, 0xfb,
-	0x55, 0x9f, 0x79, 0x9c, 0xcc, 0x0f, 0x57, 0x05, 0x97, 0x83, 0xc0, 0x44, 0xbf, 0x6f, 0x5b, 0x6a,
-	0x29, 0xf1, 0x7f, 0xfa, 0x6b, 0x48, 0xc9, 0x21, 0x5e, 0xcf, 0xe9, 0x7a, 0x8c, 0xfc, 0x04, 0xa6,
-	0x5c, 0xe6, 0xf5, 0xdb, 0x5c, 0x6d, 0x9a, 0x1b, 0x9a, 0xd3, 0x2f, 0x15, 0xf8, 0x30, 0x50, 0xdc,
-	0x50, 0xc3, 0x68, 0x01, 0x48, 0xb8, 0x97, 0x24, 0x61, 0xba, 0x7a, 0xb8, 0xb3, 0xb3, 0x57, 0xad,
-	0xa6, 0x2f, 0x88, 0x8f, 0xfb, 0x5b, 0xa5, 0x07, 0x87, 0xc6, 0x5e, 0x3a, 0x41, 0x9f, 0xc2, 0xcc,
-	0x63, 0xb1, 0xa7, 0xaa, 0x2c, 0xac, 0xf0, 0xfb, 0x90, 0x92, 0x61, 0x48, 0x7a, 0x81, 0xb2, 0x65,
-	0xb6, 0xa0, 0x22, 0xcf, 0x96, 0xe8, 0xdb, 0xc1, 0xff, 0x3f, 0xb9, 0x60, 0x24, 0xcd, 0xe1, 0xe7,
-	0xf6, 0xb4, 0xda, 0xb6, 0xf4, 0x5f, 0x13, 0x30, 0xf5, 0x18, 0x67, 0x40, 0x2e, 0xc3, 0xf4, 0x31,
-	0x73, 0x3d, 0xdb, 0xe9, 0x06, 0xd7, 0x4d, 0xb7, 0x92, 0x7b, 0x30, 0xa3, 0x22, 0xab, 0x8e, 0x4a,
-	0x0b, 0x7a, 0xf6, 0x5b, 0xb2, 0xdd, 0x1f, 0x53, 0x06, 0xb2, 0x51, 0x41, 0x6d, 0xfc, 0xfb, 0x07,
-	0xb5, 0x89, 0xb3, 0x06, 0x35, 0xf2, 0x31, 0xa4, 0x94, 0x3b, 0x09, 0x97, 0xd1, 0x9e, 0x41, 0x82,
-	0x23, 0x85, 0xf3, 0xf8, 0x47, 0x27, 0xad, 0x41, 0xb3, 0x47, 0x76, 0x60, 0x4e, 0x21, 0xb4, 0x30,
-	0x2e, 0xe6, 0xa6, 0x62, 0xc3, 0xa1, 0x1f, 0x43, 0xd1, 0xaa, 0x58, 0xba, 0x03, 0x73, 0xd2, 0x71,
-	0xb5, 0x83, 0x4d, 0xc7, 0x3a, 0x58, 0x00, 0x84, 0xf9, 0xfd, 0xf3, 0x67, 0x90, 0x19, 0xe6, 0x27,
-	0x93, 0x9b, 0x75, 0xd3, 0x63, 0xb9, 0x35, 0x05, 0x24, 0x7a, 0x0a, 0x0f, 0xed, 0xba, 0x54, 0x67,
-	0xd7, 0xe4, 0xe6, 0x76, 0x5a, 0x00, 0x25, 0x7d, 0xf1, 0xc4, 0x58, 0x10, 0x52, 0x42, 0x48, 0x8d,
-	0x26, 0x4f, 0x20, 0xeb, 0xcf, 0x68, 0x1a, 0x74, 0x5d, 0x2d, 0x11, 0x82, 0xe2, 0x56, 0x3a, 0x11,
-	0x16, 0xd5, 0x92, 0x62, 0x0a, 0x81, 0xfe, 0x25, 0x01, 0xe9, 0x2a, 0x6b, 0x37, 0xcf, 0xe6, 0x40,
-	0xa3, 0x92, 0xfe, 0x06, 0xbf, 0x03, 0x55, 0x60, 0x3e, 0xd8, 0x13, 0xef, 0x3c, 0x24, 0x03, 0x73,
-	0x8f, 0xca, 0x07, 0xb5, 0xea, 0x61, 0xa5, 0x52, 0x36, 0x0e, 0xf6, 0x76, 0xd3, 0x63, 0xa2, 0xe9,
-	0xf0, 0xd1, 0xa7, 0x8f, 0xca, 0x4f, 0x1e, 0xd5, 0xf6, 0x0c, 0xa3, 0x6c, 0xa4, 0xc7, 0x69, 0x19,
-	0x32, 0xe5, 0xe6, 0x56, 0x8b, 0x75, 0x79, 0xb5, 0x5f, 0xf7, 0x1a, 0xae, 0x5d, 0x67, 0xae, 0x08,
-	0xb3, 0x4e, 0xd3, 0x14, 0x8d, 0x83, 0x40, 0x66, 0xcc, 0xaa, 0x96, 0x92, 0x25, 0x42, 0xb4, 0xca,
-	0xf8, 0x83, 0x80, 0x31, 0x23, 0x1b, 0x4a, 0x16, 0xfd, 0x10, 0xe0, 0x21, 0xeb, 0xd4, 0x99, 0xeb,
-	0x1d, 0xd9, 0x3d, 0x81, 0x84, 0xbb, 0xa6, 0xd6, 0x35, 0x3b, 0x4c, 0x23, 0x61, 0xcb, 0x23, 0xb3,
-	0xc3, 0x94, 0x53, 0x8f, 0x69, 0xa7, 0xa6, 0xff, 0x48, 0x40, 0x5e, 0x5a, 0xba, 0xd4, 0x31, 0x5b,
-	0x6c, 0xd7, 0x79, 0xde, 0x6d, 0x3b, 0xa6, 0xa5, 0x83, 0xd6, 0x0d, 0x7f, 0x6e, 0x90, 0x71, 0x0f,
-	0x0a, 0xaa, 0xd0, 0x2a, 0xed, 0xfa, 0xf2, 0xc4, 0x35, 0x98, 0xb4, 0x05, 0x80, 0x8a, 0x0a, 0x73,
-	0xda, 0xce, 0x88, 0x6a, 0xc8, 0x3e, 0xf2, 0x0e, 0x64, 0xcc, 0x06, 0xb7, 0x8f, 0x4d, 0xce, 0xca,
-	0xdd, 0x6a, 0xbf, 0xd1, 0x60, 0x9e, 0x87, 0x19, 0x67, 0xc6, 0x08, 0x77, 0x90, 0x0d, 0x58, 0x10,
-	0x4c, 0x36, 0x1f, 0xca, 0x4e, 0xa0, 0xec, 0x68, 0x33, 0xfd, 0x4d, 0x02, 0x88, 0x6f, 0x12, 0xe7,
-	0x56, 0x3e, 0x37, 0x8c, 0x45, 0xd2, 0x32, 0x83, 0x20, 0x14, 0xa1, 0xc3, 0x78, 0xb4, 0x0e, 0x35,
-	0xc8, 0x06, 0x54, 0x50, 0x1b, 0xf0, 0x13, 0xc8, 0x6a, 0x1d, 0x44, 0x7b, 0xcd, 0xe3, 0x26, 0x67,
-	0x3a, 0x85, 0xe4, 0x82, 0x1e, 0x8d, 0x23, 0x45, 0x32, 0x62, 0x86, 0x2a, 0x29, 0x86, 0x2d, 0x1e,
-	0xdd, 0x83, 0xd4, 0xfd, 0xb6, 0xf3, 0xfc, 0x21, 0xe3, 0xa6, 0xf0, 0x1a, 0xf2, 0x1e, 0x4c, 0x75,
-	0x98, 0x2f, 0x75, 0xae, 0x17, 0xfc, 0xb5, 0xa6, 0xd3, 0xec, 0xd5, 0xb0, 0x5b, 0x45, 0x6b, 0x43,
-	0x09, 0xdf, 0xfe, 0xfb, 0x3d, 0x98, 0x93, 0x21, 0xb8, 0xca, 0x5c, 0xc1, 0x41, 0x9e, 0xc0, 0xdc,
-	0x3e, 0xe3, 0xbe, 0x2d, 0xb4, 0x5c, 0x90, 0xf5, 0x78, 0x41, 0xd7, 0xe3, 0x85, 0x3d, 0x51, 0x8f,
-	0xe7, 0x07, 0x31, 0x6c, 0x28, 0x4b, 0xf3, 0x5f, 0xfe, 0xf3, 0x3f, 0xdf, 0x8c, 0x2d, 0x12, 0x82,
-	0xa5, 0xfd, 0xf1, 0xad, 0x62, 0x67, 0x88, 0xf3, 0x14, 0xd2, 0x87, 0x3d, 0xcb, 0xe4, 0xcc, 0x87,
-	0x1d, 0x81, 0x91, 0x8f, 0xe1, 0xa3, 0xeb, 0x88, 0xbd, 0x42, 0x23, 0xb0, 0x3f, 0x48, 0x6c, 0x92,
-	0x5d, 0x98, 0xdd, 0x67, 0x5c, 0xa5, 0x93, 0x38, 0x9d, 0x07, 0x11, 0x5b, 0xca, 0xd1, 0x05, 0xc4,
-	0x9c, 0x25, 0xd3, 0x0a, 0x93, 0x3c, 0x85, 0xcc, 0x03, 0xdb, 0xe3, 0xc1, 0x54, 0x1f, 0x87, 0xb6,
-	0x14, 0x95, 0xf3, 0x3d, 0x7a, 0x11, 0x41, 0xb3, 0x24, 0xa3, 0x15, 0xb5, 0x07, 0x48, 0x55, 0x58,
-	0xd8, 0x67, 0x01, 0x74, 0xe2, 0xdb, 0x83, 0xf9, 0xc8, 0x22, 0x82, 0x5e, 0x42, 0xbc, 0x1c, 0x59,
-	0x0e, 0xe1, 0x15, 0x5f, 0xd9, 0xd6, 0x17, 0xc4, 0x80, 0x94, 0xd0, 0x79, 0x4b, 0xa7, 0xbc, 0x38,
-	0x75, 0xd3, 0x23, 0x09, 0xd3, 0xa3, 0x39, 0x44, 0x26, 0x24, 0xad, 0x91, 0x07, 0x69, 0x93, 0x01,
-	0x11, 0x98, 0x0f, 0x82, 0x19, 0x30, 0x0e, 0x79, 0x39, 0x32, 0x97, 0x7a, 0xf4, 0x32, 0xe2, 0x5f,
-	0x24, 0x2b, 0x1a, 0x7f, 0x24, 0x15, 0x93, 0x5f, 0x42, 0x7a, 0x9f, 0x05, 0x59, 0x02, 0x06, 0x89,
-	0x4e, 0xd2, 0xf4, 0x0d, 0xc4, 0xbd, 0x44, 0xd6, 0x62, 0x70, 0xa5, 0x5d, 0x9a, 0xb0, 0x1c, 0x9a,
-	0x43, 0xc5, 0x71, 0xb9, 0x17, 0x6d, 0x73, 0x25, 0x87, 0x12, 0x74, 0x13, 0x19, 0xde, 0x20, 0xf4,
-	0x24, 0x86, 0x62, 0x0f, 0xd1, 0x5e, 0xc0, 0xe2, 0xe8, 0x24, 0x04, 0x08, 0x59, 0x8a, 0x40, 0x2e,
-	0x59, 0xf9, 0x6c, 0x44, 0x33, 0xbd, 0x8b, 0x7c, 0x05, 0xf2, 0xce, 0xe9, 0x7c, 0xc5, 0x57, 0xe2,
-	0x4f, 0x4d, 0xcc, 0xf0, 0xf7, 0x09, 0x58, 0xd9, 0xc3, 0xb2, 0xfd, 0xcc, 0xec, 0x71, 0xde, 0xf5,
-	0x21, 0x2a, 0xf0, 0x1e, 0xbd, 0x73, 0x1e, 0x05, 0x8a, 0xea, 0xcc, 0xf0, 0x55, 0x02, 0x72, 0xbb,
-	0xb6, 0xf7, 0x83, 0x28, 0xf2, 0x63, 0x54, 0xe4, 0x1e, 0xbd, 0x7b, 0x2e, 0x45, 0x2c, 0xc9, 0x4e,
-	0xac, 0x88, 0x35, 0x17, 0x71, 0x32, 0xb8, 0xe6, 0x24, 0x10, 0x1c, 0xb1, 0xff, 0x8c, 0x2b, 0xde,
-	0x44, 0xac, 0xdf, 0x26, 0x60, 0x4d, 0xc6, 0xb2, 0x10, 0xd1, 0x01, 0xaa, 0xb1, 0x16, 0x22, 0xc0,
-	0x76, 0x39, 0x26, 0x76, 0xea, 0x37, 0x51, 0x85, 0x1b, 0xf4, 0x0c, 0x2a, 0x88, 0x88, 0xf7, 0xbb,
-	0x04, 0xac, 0x47, 0x68, 0xf1, 0x50, 0x44, 0x76, 0xa9, 0xc6, 0x6a, 0x40, 0x0d, 0xec, 0x78, 0xe8,
-	0x58, 0xa7, 0x68, 0x51, 0x40, 0x2d, 0x36, 0xe8, 0xb5, 0x13, 0xb5, 0x90, 0xf9, 0x43, 0xa8, 0xd1,
-	0x82, 0x95, 0x90, 0xc9, 0x91, 0x2a, 0x68, 0xf3, 0x6c, 0x58, 0x17, 0x8f, 0xbe, 0x8d, 0x5c, 0x6f,
-	0x92, 0xb3, 0x70, 0x11, 0x0e, 0xab, 0x91, 0x6b, 0xab, 0x4a, 0x5c, 0x3f, 0xd9, 0x4a, 0xc8, 0xfe,
-	0x52, 0x88, 0xbe, 0x8b, 0x84, 0x9b, 0x64, 0xe3, 0x54, 0x13, 0xab, 0x6a, 0x9b, 0x7c, 0x93, 0x80,
-	0xab, 0x31, 0x6b, 0x8d, 0x98, 0xd2, 0xd2, 0x57, 0xa3, 0x09, 0xcf, 0xb2, 0xea, 0x77, 0x50, 0xa5,
-	0x9b, 0xf4, 0xcc, 0x2a, 0x09, 0xa3, 0x97, 0x21, 0x29, 0x6c, 0x71, 0x5a, 0x60, 0x5e, 0x08, 0x96,
-	0x14, 0x1e, 0x5d, 0x41, 0xb2, 0x0c, 0x59, 0xd0, 0x64, 0x3a, 0x12, 0x97, 0x61, 0x6e, 0x08, 0x58,
-	0xb2, 0xe2, 0x21, 0x93, 0x43, 0x33, 0x47, 0xa4, 0x3a, 0x09, 0x67, 0x5b, 0x1e, 0x39, 0x84, 0xb4,
-	0xc1, 0x1a, 0x4e, 0xb7, 0x61, 0xb7, 0x99, 0x56, 0xd3, 0x3f, 0x36, 0xd6, 0x1e, 0x6b, 0x88, 0xb9,
-	0x4c, 0xc3, 0x98, 0x62, 0xe2, 0x7b, 0x98, 0xe6, 0x23, 0x52, 0xc5, 0xc8, 0x61, 0x4c, 0xc3, 0x90,
-	0xc5, 0x91, 0x99, 0xca, 0xdc, 0xf0, 0x53, 0x48, 0xed, 0xb8, 0xcc, 0xe4, 0x4a, 0x35, 0x32, 0x32,
-	0x3a, 0x84, 0xa6, 0x0a, 0x1b, 0x3a, 0x6a, 0x37, 0xa1, 0xd2, 0x13, 0x48, 0xc9, 0x20, 0x1c, 0xa1,
-	0x55, 0xdc, 0x24, 0xaf, 0x21, 0xde, 0x3a, 0x5d, 0x8d, 0xd2, 0x4e, 0x87, 0xd5, 0x9f, 0xc3, 0x9c,
-	0x8a, 0xaa, 0xe7, 0x40, 0x56, 0xb9, 0x91, 0xae, 0x45, 0x22, 0xeb, 0x38, 0xf9, 0x04, 0x52, 0x06,
-	0xab, 0x3b, 0x0e, 0xff, 0xc1, 0x74, 0x76, 0x11, 0x4e, 0x00, 0xef, 0xb2, 0x36, 0xe3, 0xaf, 0x61,
-	0x8c, 0xcd, 0x68, 0x60, 0x0b, 0xe1, 0x48, 0x1d, 0x32, 0xf7, 0x1d, 0xb7, 0xc1, 0xce, 0x8d, 0xfe,
-	0x16, 0xa2, 0x5f, 0xdb, 0xbc, 0x1a, 0x89, 0xde, 0x14, 0x98, 0x35, 0xc5, 0xd1, 0x87, 0x39, 0x7d,
-	0xe4, 0xc1, 0x5a, 0x7b, 0x98, 0xbb, 0x02, 0xc7, 0xa1, 0xfc, 0x92, 0xa6, 0x2d, 0xf7, 0x98, 0x8b,
-	0x77, 0xd7, 0xa2, 0xc2, 0xa7, 0xf7, 0x90, 0xe9, 0x5d, 0xfa, 0x76, 0x24, 0x93, 0xac, 0xf8, 0x2d,
-	0x85, 0xe1, 0x15, 0x5f, 0x89, 0x23, 0xd9, 0x17, 0x62, 0x03, 0x7d, 0x99, 0x80, 0xe5, 0x7d, 0xc6,
-	0x03, 0x1c, 0xf2, 0x16, 0x2a, 0x5e, 0x81, 0xa8, 0x66, 0xfa, 0x01, 0x2a, 0x70, 0x97, 0xdc, 0x3e,
-	0x87, 0x02, 0x45, 0x4f, 0x32, 0xf5, 0xb1, 0x14, 0x0b, 0xe0, 0x9d, 0x93, 0x5d, 0x05, 0x32, 0x72,
-	0x9e, 0xe9, 0x93, 0xa6, 0x2c, 0x34, 0x03, 0x48, 0xde, 0xc8, 0xba, 0x46, 0xb1, 0x79, 0xf4, 0x1d,
-	0xa4, 0xbb, 0x4e, 0xde, 0x38, 0x0b, 0x1d, 0x79, 0x01, 0xd9, 0x1d, 0x51, 0x33, 0xb7, 0xcf, 0x38,
-	0xc3, 0xc8, 0x05, 0x56, 0x33, 0xdc, 0x3c, 0xd7, 0x0c, 0xbf, 0x4e, 0x40, 0x76, 0x4b, 0x1d, 0x67,
-	0x91, 0x45, 0xe6, 0x83, 0x73, 0x52, 0xef, 0x20, 0xf5, 0x47, 0xf4, 0xfd, 0xf3, 0x2c, 0xad, 0x6c,
-	0xee, 0x23, 0x9f, 0xd8, 0x68, 0x7f, 0x48, 0x40, 0xc6, 0x60, 0xc7, 0xcc, 0xe5, 0xff, 0x17, 0x45,
-	0x5c, 0xa4, 0x16, 0x8a, 0x7c, 0x9d, 0x80, 0xa5, 0x80, 0xa7, 0x1d, 0x38, 0xca, 0xa3, 0x69, 0xc4,
-	0x21, 0x78, 0xe4, 0x1a, 0x22, 0xbf, 0x1a, 0x21, 0xa3, 0x8f, 0xd8, 0xba, 0x7c, 0x21, 0xd7, 0x47,
-	0xf5, 0x43, 0x1d, 0xbc, 0xa2, 0xd6, 0x4d, 0x9e, 0xc0, 0x3d, 0xf2, 0x1c, 0xe6, 0xf5, 0xbe, 0x57,
-	0x3e, 0x97, 0x8f, 0x84, 0x3f, 0x03, 0x75, 0xec, 0x8e, 0x54, 0xd4, 0xf2, 0x4f, 0x4d, 0x39, 0xdc,
-	0x1f, 0x13, 0x70, 0x71, 0xab, 0xee, 0x0c, 0xd6, 0xa2, 0xe5, 0x9a, 0xd6, 0xd0, 0x0e, 0xaf, 0xad,
-	0x44, 0xac, 0x17, 0x2a, 0x25, 0x4c, 0x41, 0x59, 0xeb, 0x4b, 0x3a, 0x6d, 0x84, 0xc7, 0x90, 0xda,
-	0x67, 0xbc, 0xdc, 0xed, 0x97, 0xe4, 0xb7, 0xdf, 0xff, 0x32, 0x9a, 0x6d, 0xd0, 0x4d, 0x6f, 0x20,
-	0xc7, 0x55, 0x72, 0x39, 0x72, 0x0f, 0x38, 0xdd, 0xbe, 0xc6, 0x7d, 0x05, 0x73, 0x81, 0xad, 0xff,
-	0xfa, 0xd3, 0xba, 0x85, 0x94, 0x6f, 0xd3, 0xb8, 0x65, 0xd5, 0x17, 0x46, 0x8a, 0x59, 0x6c, 0xb2,
-	0xe7, 0x90, 0xdc, 0xc1, 0x6b, 0x99, 0xef, 0x49, 0x5d, 0x44, 0xea, 0xb7, 0x68, 0xdc, 0xb2, 0xca,
-	0xbb, 0x1f, 0x1f, 0x71, 0x05, 0x16, 0x86, 0xb5, 0x54, 0xf8, 0xc4, 0x39, 0xb8, 0x0d, 0x93, 0x47,
-	0x4d, 0x8a, 0xf0, 0x6b, 0x24, 0x1f, 0x69, 0x4c, 0x79, 0xc4, 0x7c, 0x0a, 0x59, 0x1f, 0xa2, 0xba,
-	0x48, 0x8f, 0x59, 0xa6, 0x41, 0xf7, 0x29, 0xcb, 0xd4, 0xd3, 0x57, 0xf7, 0x1e, 0xe9, 0xc2, 0x92,
-	0x8c, 0x05, 0xa3, 0x04, 0x61, 0xd0, 0xd8, 0x34, 0xab, 0xce, 0x4f, 0xf4, 0x34, 0x32, 0x61, 0xa0,
-	0x43, 0xbf, 0x81, 0xce, 0x76, 0x3c, 0x3b, 0xd9, 0x4a, 0xf2, 0x58, 0xc6, 0x60, 0x31, 0x08, 0x7b,
-	0x9e, 0x93, 0xc1, 0x06, 0x12, 0x50, 0x72, 0x25, 0x96, 0x40, 0x9f, 0x08, 0x3e, 0xf7, 0x6b, 0x2f,
-	0xef, 0xe8, 0xe3, 0x8a, 0xe5, 0x6c, 0xf8, 0x9e, 0xdf, 0x8b, 0xab, 0x4c, 0xe5, 0x03, 0x01, 0x31,
-	0xf0, 0xfe, 0x6d, 0x28, 0x3f, 0x62, 0x99, 0x10, 0x1e, 0xbd, 0x8a, 0x70, 0xab, 0xe4, 0x62, 0x14,
-	0x9c, 0xac, 0x76, 0x6b, 0x90, 0x1e, 0x6a, 0xac, 0x8c, 0x12, 0xa7, 0xf2, 0x62, 0xc4, 0xbb, 0x82,
-	0xa7, 0x2f, 0xdf, 0xc8, 0xd2, 0x08, 0x89, 0x32, 0xc9, 0x7d, 0x48, 0x57, 0xb9, 0xcb, 0xcc, 0x4e,
-	0xc5, 0x6c, 0x3c, 0x63, 0xdc, 0x2b, 0xf7, 0x39, 0x59, 0x0e, 0x58, 0x5a, 0x76, 0x94, 0xfb, 0x3c,
-	0x76, 0x03, 0x5d, 0xd8, 0x48, 0x90, 0x3d, 0x3c, 0x34, 0x30, 0xfb, 0x98, 0x29, 0xa0, 0x52, 0xf7,
-	0x84, 0xdb, 0xb7, 0x30, 0x7e, 0xa9, 0x4b, 0x2f, 0xbc, 0x9b, 0x20, 0x9f, 0x42, 0x56, 0xc1, 0xec,
-	0x1c, 0x99, 0xdd, 0x16, 0xc3, 0xd7, 0x8d, 0xf8, 0x29, 0xe7, 0x02, 0x48, 0xbe, 0x21, 0x08, 0x76,
-	0x88, 0x09, 0xc2, 0xff, 0x7e, 0x1e, 0x3c, 0xd6, 0x86, 0xcd, 0x15, 0xb7, 0x59, 0x95, 0xb5, 0xf4,
-	0x9a, 0x64, 0xe4, 0x09, 0xc4, 0xff, 0x56, 0x1b, 0xf5, 0x1e, 0x93, 0x8f, 0x6a, 0xa4, 0x57, 0x90,
-	0x22, 0x4f, 0x07, 0x0b, 0x12, 0x78, 0xde, 0x11, 0x4e, 0xf6, 0x18, 0xf5, 0xf6, 0xa3, 0x47, 0x5e,
-	0x7b, 0xf9, 0x5f, 0x60, 0xc3, 0x8a, 0x07, 0x50, 0xa5, 0xe2, 0x16, 0x64, 0x64, 0xb0, 0x78, 0x3d,
-	0xc5, 0xdf, 0x44, 0x8a, 0xcb, 0xf9, 0x13, 0x28, 0x84, 0xf6, 0x16, 0x64, 0x64, 0xa5, 0x7f, 0x2a,
-	0x4b, 0xdc, 0x7e, 0x52, 0x73, 0xd9, 0x3c, 0x69, 0x2e, 0xca, 0x31, 0x02, 0xaf, 0xd0, 0xa7, 0x3a,
-	0x46, 0xc0, 0x62, 0x21, 0xc7, 0x08, 0xb0, 0x90, 0x07, 0x78, 0x5c, 0x8d, 0xc8, 0xaa, 0xf3, 0x81,
-	0xa2, 0xcb, 0xd3, 0x67, 0x20, 0xb2, 0x1a, 0x5f, 0x56, 0x79, 0xe4, 0x33, 0x98, 0xd1, 0xcf, 0x4f,
-	0x01, 0xb0, 0x5c, 0xdc, 0x3b, 0x16, 0xbd, 0x8e, 0xb0, 0x57, 0xe8, 0xa5, 0x48, 0x58, 0x8f, 0xb5,
-	0x9b, 0x35, 0x2e, 0xd0, 0x1e, 0x63, 0xf5, 0x1f, 0x78, 0xbe, 0x1b, 0xbd, 0xbd, 0x09, 0xbd, 0xef,
-	0x85, 0x23, 0x8f, 0x70, 0x23, 0x21, 0xa7, 0xae, 0x6d, 0xec, 0x3a, 0xf9, 0x1c, 0xc8, 0x3e, 0xe3,
-	0x23, 0x2f, 0x78, 0x23, 0x57, 0xbc, 0x51, 0x8f, 0x7c, 0x61, 0x7b, 0x04, 0xb1, 0xf1, 0xbd, 0x90,
-	0x78, 0x30, 0x57, 0xb5, 0x3b, 0xfd, 0xb6, 0xc9, 0x19, 0x8e, 0x27, 0x6b, 0x03, 0x43, 0xf8, 0x9b,
-	0x75, 0x96, 0x8f, 0xa9, 0x68, 0x43, 0xd7, 0x6e, 0x41, 0x1b, 0x29, 0xa4, 0x9a, 0x40, 0x12, 0x3b,
-	0x73, 0x07, 0x66, 0x07, 0x4f, 0x75, 0xe4, 0xe2, 0xa0, 0x38, 0x1a, 0x7d, 0xc4, 0xcb, 0xc7, 0x77,
-	0xd1, 0x0b, 0xe4, 0x21, 0x80, 0xbc, 0x33, 0xc0, 0x2b, 0xd2, 0x94, 0xbf, 0x22, 0x88, 0xdd, 0xd0,
-	0xea, 0xb2, 0x85, 0xce, 0x0b, 0x1d, 0x87, 0xa3, 0xd5, 0x75, 0x90, 0xba, 0x29, 0x38, 0x07, 0xde,
-	0xf0, 0x4e, 0xe3, 0xf8, 0x56, 0xd1, 0x37, 0x5c, 0x00, 0x7e, 0x06, 0x49, 0x11, 0x3c, 0x5e, 0x70,
-	0x7c, 0xff, 0x27, 0xcb, 0xda, 0x72, 0xf2, 0xe7, 0x00, 0x3d, 0xd6, 0xb0, 0x9b, 0x36, 0x73, 0xf3,
-	0x8b, 0xba, 0xdd, 0x60, 0xbc, 0xef, 0x76, 0xb1, 0xd7, 0xa3, 0xab, 0x08, 0xbc, 0x44, 0xb2, 0xda,
-	0xa0, 0x7e, 0xa8, 0x03, 0x48, 0x56, 0x7d, 0x9f, 0x83, 0x47, 0x09, 0xfd, 0x43, 0x83, 0x58, 0x75,
-	0x43, 0xa8, 0x7e, 0x98, 0x23, 0xc8, 0x56, 0xb9, 0xe9, 0x72, 0xfd, 0x33, 0x0b, 0x51, 0x75, 0x3a,
-	0x5d, 0x32, 0xf8, 0x05, 0xca, 0xc8, 0xcf, 0x2f, 0x86, 0xce, 0x1c, 0xf0, 0x16, 0x15, 0x32, 0xe8,
-	0xe0, 0xbd, 0xc2, 0x13, 0x98, 0x35, 0x7c, 0xa1, 0x16, 0x6e, 0xf2, 0x41, 0x62, 0x73, 0xbb, 0x0d,
-	0x59, 0xc7, 0x6d, 0x61, 0xc6, 0x68, 0x38, 0xae, 0xa5, 0x70, 0xb6, 0x53, 0xf2, 0x45, 0xa9, 0x82,
-	0x3f, 0xc1, 0xfa, 0x45, 0xa1, 0x65, 0xf3, 0xa3, 0x7e, 0x5d, 0x58, 0xa7, 0xa8, 0x25, 0xd5, 0x4f,
-	0xe1, 0x6e, 0xea, 0x1f, 0xc6, 0xdd, 0x2d, 0xb6, 0x1c, 0xd5, 0xf6, 0xb7, 0xb1, 0xe5, 0xb2, 0xc6,
-	0x7b, 0xec, 0x7f, 0xa0, 0xaa, 0x8c, 0x55, 0xc6, 0x2b, 0x13, 0x95, 0xc9, 0xca, 0x54, 0x65, 0xba,
-	0x32, 0x53, 0x9f, 0xc2, 0xb1, 0x77, 0xfe, 0x17, 0x00, 0x00, 0xff, 0xff, 0x87, 0x9b, 0x36, 0xf4,
-	0x64, 0x27, 0x00, 0x00,
+	// 2901 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5a, 0xcb, 0x73, 0x1b, 0xb7,
+	0x19, 0x37, 0xf5, 0xd6, 0x47, 0x4a, 0x22, 0x41, 0x3d, 0x68, 0x4a, 0xf2, 0x03, 0x4e, 0x6c, 0x45,
+	0x89, 0xc9, 0xd8, 0x8e, 0x3d, 0x69, 0xd2, 0x4c, 0xa3, 0x97, 0x15, 0x36, 0xb6, 0xc9, 0x2e, 0x25,
+	0x3b, 0x6d, 0xe3, 0xe1, 0x2c, 0xb9, 0x20, 0xb5, 0x63, 0x92, 0xcb, 0xee, 0x82, 0xb2, 0x3d, 0x9e,
+	0x4c, 0xa7, 0xe9, 0x23, 0xe9, 0x39, 0xf7, 0x9e, 0xda, 0xe9, 0x4c, 0x2f, 0xfd, 0x1f, 0x7a, 0xcf,
+	0xa9, 0xa7, 0x5e, 0x3b, 0x3d, 0xf4, 0x2f, 0xc8, 0xb9, 0x83, 0x0f, 0x00, 0xb9, 0xcb, 0xdd, 0xd5,
+	0xc3, 0xc9, 0x4c, 0x4f, 0xd2, 0x02, 0x1f, 0x7e, 0xbf, 0x0f, 0x1f, 0xf0, 0x3d, 0x00, 0x10, 0xf2,
+	0xc7, 0x4e, 0x9b, 0x1f, 0x99, 0xb5, 0x9e, 0xeb, 0x70, 0xc7, 0x2b, 0xca, 0xaf, 0x02, 0x7e, 0x91,
+	0x29, 0xf9, 0x95, 0x5f, 0x6b, 0x39, 0x4e, 0xab, 0xcd, 0x8a, 0x66, 0xcf, 0x2e, 0x9a, 0xdd, 0xae,
+	0xc3, 0x4d, 0x6e, 0x3b, 0x5d, 0x4f, 0x4a, 0xe5, 0x57, 0x55, 0x2f, 0x7e, 0xd5, 0xfb, 0xcd, 0x22,
+	0xeb, 0xf4, 0xf8, 0x4b, 0xd5, 0x99, 0x0b, 0xc2, 0x77, 0x18, 0x57, 0xe0, 0xf9, 0x11, 0xe2, 0x86,
+	0xd3, 0xe9, 0x38, 0xdd, 0xe8, 0xbe, 0x23, 0x66, 0xb6, 0xf9, 0x91, 0xea, 0xa3, 0xc1, 0xbe, 0xb6,
+	0xd3, 0xb2, 0x1b, 0x66, 0xbb, 0x66, 0xb1, 0x63, 0xbb, 0xc1, 0xa2, 0xc7, 0x07, 0xfa, 0x56, 0x83,
+	0x7d, 0xa6, 0x65, 0xf6, 0x38, 0x73, 0x55, 0xe7, 0xe5, 0x60, 0xa7, 0xd3, 0x63, 0xdd, 0x66, 0xdb,
+	0x79, 0x5e, 0xbb, 0x75, 0x27, 0x46, 0xa0, 0xd3, 0xb0, 0x6b, 0x1d, 0xbb, 0x5e, 0xb3, 0xea, 0x4a,
+	0xe0, 0x6a, 0x84, 0x80, 0xd9, 0x36, 0xdd, 0xce, 0x50, 0xe4, 0x52, 0x50, 0x84, 0xbd, 0xe0, 0xb5,
+	0x86, 0xd3, 0x6d, 0xda, 0x2d, 0xd9, 0x4f, 0xff, 0x9c, 0x80, 0xe4, 0x2e, 0xaa, 0xbc, 0xef, 0x3a,
+	0xfd, 0x1e, 0x59, 0x82, 0x31, 0xdb, 0xca, 0x25, 0xae, 0x24, 0x36, 0x66, 0xb7, 0x27, 0xff, 0xfb,
+	0xdd, 0xb7, 0xeb, 0x09, 0x63, 0xcc, 0xb6, 0x48, 0x09, 0x16, 0x82, 0x93, 0xf7, 0x72, 0x63, 0x57,
+	0xc6, 0x37, 0x92, 0xb7, 0x97, 0x0a, 0x6a, 0x15, 0x1f, 0xc8, 0x6e, 0x89, 0xb5, 0x3d, 0xfb, 0xef,
+	0xef, 0xbe, 0x5d, 0x9f, 0x10, 0x58, 0xc6, 0x7c, 0xdb, 0xdf, 0xe3, 0x91, 0x3b, 0x30, 0xad, 0x21,
+	0xc6, 0x11, 0x62, 0x5e, 0x43, 0x84, 0xc7, 0x6a, 0x49, 0xfa, 0x23, 0x48, 0xf9, 0xb4, 0xf4, 0xc8,
+	0x5b, 0x30, 0x69, 0x73, 0xd6, 0xf1, 0x72, 0x09, 0x84, 0xc8, 0x06, 0x21, 0x50, 0xc8, 0x90, 0x12,
+	0xf4, 0x4f, 0x09, 0x20, 0x7b, 0xc7, 0xac, 0xcb, 0xef, 0xdb, 0x6d, 0xce, 0x5c, 0xa3, 0xdf, 0x66,
+	0x9f, 0xb2, 0x97, 0xf4, 0xab, 0x04, 0x64, 0x47, 0x9a, 0x0f, 0x5e, 0xf6, 0x18, 0x99, 0x07, 0x68,
+	0x62, 0x4b, 0xcd, 0x6c, 0xb7, 0xd3, 0x17, 0x48, 0x0a, 0x66, 0x1a, 0x26, 0x67, 0x2d, 0xc7, 0x7d,
+	0x99, 0x4e, 0x90, 0x34, 0xa4, 0xbc, 0x7e, 0xbd, 0x36, 0x68, 0x19, 0x23, 0x04, 0xe6, 0x9f, 0xf5,
+	0xec, 0x1a, 0x13, 0x50, 0x35, 0xfe, 0xb2, 0xc7, 0xd2, 0xe3, 0x64, 0x09, 0x32, 0xd2, 0xc8, 0xfe,
+	0xe6, 0x09, 0xd1, 0x2c, 0xe7, 0xe3, 0x6f, 0x9e, 0xa4, 0x36, 0x2c, 0x8c, 0x28, 0x42, 0x3e, 0x86,
+	0xf1, 0x67, 0xec, 0x25, 0x2e, 0xc3, 0xfc, 0xed, 0x82, 0x9e, 0x5c, 0x78, 0x16, 0x85, 0x88, 0x19,
+	0x18, 0x62, 0x28, 0x59, 0x84, 0xc9, 0x63, 0xb3, 0xdd, 0x67, 0xb9, 0x31, 0xb1, 0x94, 0x86, 0xfc,
+	0xa0, 0x7f, 0x4d, 0x40, 0xd2, 0x37, 0x24, 0x6e, 0xb5, 0x97, 0x61, 0x8a, 0x75, 0xcd, 0x7a, 0x5b,
+	0x8e, 0x9e, 0x31, 0xd4, 0x17, 0x59, 0x85, 0x59, 0x35, 0x01, 0xdb, 0xca, 0x8d, 0x23, 0xf0, 0x8c,
+	0x6c, 0x28, 0x59, 0x64, 0x1d, 0x60, 0x38, 0xad, 0xdc, 0x04, 0xf6, 0xce, 0x62, 0x0b, 0xda, 0xf5,
+	0x26, 0x4c, 0xba, 0xfd, 0x36, 0xf3, 0x72, 0x93, 0xb8, 0x62, 0x2b, 0x31, 0x93, 0x32, 0xa4, 0x14,
+	0xfd, 0x08, 0x52, 0xbe, 0x1e, 0x8f, 0xdc, 0x84, 0x69, 0xb9, 0x2c, 0xa1, 0x25, 0xf7, 0x03, 0x68,
+	0x19, 0xfa, 0x0c, 0x52, 0x3b, 0x8e, 0xcb, 0x4a, 0x5d, 0x8f, 0x9b, 0xdd, 0x06, 0x23, 0xd7, 0x21,
+	0x69, 0xab, 0xff, 0x6b, 0xa3, 0x33, 0x06, 0xdd, 0x53, 0xb2, 0xc8, 0x1d, 0x98, 0x92, 0x01, 0x00,
+	0x67, 0x9e, 0xbc, 0xbd, 0xa8, 0x59, 0x3e, 0xc1, 0xd6, 0x2a, 0x37, 0x79, 0xdf, 0xdb, 0x9e, 0x14,
+	0x3b, 0xf4, 0x82, 0xa1, 0x44, 0xe9, 0x87, 0x30, 0xe7, 0x27, 0xf3, 0xc8, 0x66, 0x70, 0x77, 0x0e,
+	0x40, 0xfc, 0x52, 0x7a, 0x7b, 0xde, 0x85, 0x85, 0x72, 0xa7, 0x61, 0x1f, 0x30, 0x8f, 0x1b, 0xec,
+	0x57, 0x7d, 0xe6, 0x71, 0x32, 0x3f, 0x5c, 0x15, 0x5c, 0x0e, 0x02, 0x13, 0xfd, 0xbe, 0x6d, 0xa9,
+	0xa5, 0xc4, 0xff, 0xe9, 0xaf, 0x21, 0x25, 0x87, 0x78, 0x3d, 0xa7, 0xeb, 0x31, 0xf2, 0x13, 0x98,
+	0x72, 0x99, 0xd7, 0x6f, 0x73, 0xb5, 0x69, 0x6e, 0x68, 0x4e, 0xbf, 0x54, 0xe0, 0xc3, 0x40, 0x71,
+	0x43, 0x0d, 0xa3, 0x05, 0x20, 0xe1, 0x5e, 0x92, 0x84, 0xe9, 0xea, 0xe1, 0xce, 0xce, 0x5e, 0xb5,
+	0x9a, 0xbe, 0x20, 0x3e, 0xee, 0x6f, 0x95, 0x1e, 0x1c, 0x1a, 0x7b, 0xe9, 0x04, 0x7d, 0x0a, 0x33,
+	0x8f, 0xc5, 0x9e, 0xaa, 0xb2, 0xb0, 0xc2, 0xef, 0x43, 0x4a, 0x86, 0x21, 0xe9, 0x05, 0xca, 0x96,
+	0xd9, 0x82, 0x8a, 0x3c, 0x5b, 0xa2, 0x6f, 0x07, 0xff, 0xff, 0xe4, 0x82, 0x91, 0x34, 0x87, 0x9f,
+	0xdb, 0xd3, 0x6a, 0xdb, 0xd2, 0x7f, 0x4d, 0xc0, 0xd4, 0x63, 0x9c, 0x01, 0xb9, 0x0c, 0xd3, 0xc7,
+	0xcc, 0xf5, 0x6c, 0xa7, 0x1b, 0x5c, 0x37, 0xdd, 0x4a, 0xee, 0xc1, 0x8c, 0x8a, 0xac, 0x3a, 0x2a,
+	0x2d, 0xe8, 0xd9, 0x6f, 0xc9, 0x76, 0x7f, 0x4c, 0x19, 0xc8, 0x46, 0x05, 0xb5, 0xf1, 0xef, 0x1f,
+	0xd4, 0x26, 0xce, 0x1a, 0xd4, 0xc8, 0xc7, 0x90, 0x52, 0xee, 0x24, 0x5c, 0x46, 0x7b, 0x06, 0x09,
+	0x8e, 0x14, 0xce, 0xe3, 0x1f, 0x9d, 0xb4, 0x06, 0xcd, 0x1e, 0xd9, 0x81, 0x39, 0x85, 0xd0, 0xc2,
+	0xb8, 0x98, 0x9b, 0x8a, 0x0d, 0x87, 0x7e, 0x0c, 0x45, 0xab, 0x62, 0xe9, 0x0e, 0xcc, 0x49, 0xc7,
+	0xd5, 0x0e, 0x36, 0x1d, 0xeb, 0x60, 0x01, 0x10, 0xe6, 0xf7, 0xcf, 0x9f, 0x41, 0x66, 0x98, 0x9f,
+	0x4c, 0x6e, 0xd6, 0x4d, 0x8f, 0xe5, 0xd6, 0x14, 0x90, 0xe8, 0x29, 0x3c, 0xb4, 0xeb, 0x52, 0x9d,
+	0x5d, 0x93, 0x9b, 0xdb, 0x69, 0x01, 0x94, 0xf4, 0xc5, 0x13, 0x63, 0x41, 0x48, 0x09, 0x21, 0x35,
+	0x9a, 0x3c, 0x81, 0xac, 0x3f, 0xa3, 0x69, 0xd0, 0x75, 0xb5, 0x44, 0x08, 0x8a, 0x5b, 0xe9, 0x44,
+	0x58, 0x54, 0x4b, 0x8a, 0x29, 0x04, 0xfa, 0x97, 0x04, 0xa4, 0xab, 0xac, 0xdd, 0x3c, 0x9b, 0x03,
+	0x8d, 0x4a, 0xfa, 0x1b, 0xfc, 0x0e, 0x54, 0x81, 0xf9, 0x60, 0x4f, 0xbc, 0xf3, 0x90, 0x0c, 0xcc,
+	0x3d, 0x2a, 0x1f, 0xd4, 0xaa, 0x87, 0x95, 0x4a, 0xd9, 0x38, 0xd8, 0xdb, 0x4d, 0x8f, 0x89, 0xa6,
+	0xc3, 0x47, 0x9f, 0x3e, 0x2a, 0x3f, 0x79, 0x54, 0xdb, 0x33, 0x8c, 0xb2, 0x91, 0x1e, 0xa7, 0x65,
+	0xc8, 0x94, 0x9b, 0x5b, 0x2d, 0xd6, 0xe5, 0xd5, 0x7e, 0xdd, 0x6b, 0xb8, 0x76, 0x9d, 0xb9, 0x22,
+	0xcc, 0x3a, 0x4d, 0x53, 0x34, 0x0e, 0x02, 0x99, 0x31, 0xab, 0x5a, 0x4a, 0x96, 0x08, 0xd1, 0x2a,
+	0xe3, 0x0f, 0x02, 0xc6, 0x8c, 0x6c, 0x28, 0x59, 0xf4, 0x43, 0x80, 0x87, 0xac, 0x53, 0x67, 0xae,
+	0x77, 0x64, 0xf7, 0x04, 0x12, 0xee, 0x9a, 0x5a, 0xd7, 0xec, 0x30, 0x8d, 0x84, 0x2d, 0x8f, 0xcc,
+	0x0e, 0x53, 0x4e, 0x3d, 0xa6, 0x9d, 0x9a, 0xfe, 0x23, 0x01, 0x79, 0x69, 0xe9, 0x52, 0xc7, 0x6c,
+	0xb1, 0x5d, 0xe7, 0x79, 0xb7, 0xed, 0x98, 0x96, 0x0e, 0x5a, 0x37, 0xfc, 0xb9, 0x41, 0xc6, 0x3d,
+	0x28, 0xa8, 0x42, 0xab, 0xb4, 0xeb, 0xcb, 0x13, 0xd7, 0x60, 0xd2, 0x16, 0x00, 0x2a, 0x2a, 0xcc,
+	0x69, 0x3b, 0x23, 0xaa, 0x21, 0xfb, 0xc8, 0x3b, 0x90, 0x31, 0x1b, 0xdc, 0x3e, 0x36, 0x39, 0x2b,
+	0x77, 0xab, 0xfd, 0x46, 0x83, 0x79, 0x1e, 0x66, 0x9c, 0x19, 0x23, 0xdc, 0x41, 0x36, 0x60, 0x41,
+	0x30, 0xd9, 0x7c, 0x28, 0x3b, 0x81, 0xb2, 0xa3, 0xcd, 0xf4, 0x37, 0x09, 0x20, 0xbe, 0x49, 0x9c,
+	0x5b, 0xf9, 0xdc, 0x30, 0x16, 0x49, 0xcb, 0x0c, 0x82, 0x50, 0x84, 0x0e, 0xe3, 0xd1, 0x3a, 0xd4,
+	0x20, 0x1b, 0x50, 0x41, 0x6d, 0xc0, 0x4f, 0x20, 0xab, 0x75, 0x10, 0xed, 0x35, 0x8f, 0x9b, 0x9c,
+	0xe9, 0x14, 0x92, 0x0b, 0x7a, 0x34, 0x8e, 0x14, 0xc9, 0x88, 0x19, 0xaa, 0xa4, 0x18, 0xb6, 0x78,
+	0x74, 0x0f, 0x52, 0xf7, 0xdb, 0xce, 0xf3, 0x87, 0x8c, 0x9b, 0xc2, 0x6b, 0xc8, 0x5d, 0x98, 0xea,
+	0x30, 0x5f, 0xea, 0x5c, 0x2f, 0xf8, 0x6b, 0x4d, 0xa7, 0xd9, 0xab, 0x61, 0xb7, 0x8a, 0xd6, 0x86,
+	0x12, 0xbe, 0xfd, 0xf7, 0x7b, 0x30, 0x27, 0x43, 0x70, 0x95, 0xb9, 0x82, 0x83, 0x3c, 0x81, 0xb9,
+	0x7d, 0xc6, 0x7d, 0x5b, 0x68, 0xb9, 0x20, 0xeb, 0xf1, 0x82, 0xae, 0xc7, 0x0b, 0x7b, 0xa2, 0x1e,
+	0xcf, 0x0f, 0x62, 0xd8, 0x50, 0x96, 0xe6, 0xbf, 0xfc, 0xe7, 0x7f, 0xbe, 0x19, 0x5b, 0x24, 0x04,
+	0x4b, 0xfb, 0xe3, 0x5b, 0xc5, 0xce, 0x10, 0xe7, 0x29, 0xa4, 0x0f, 0x7b, 0x96, 0xc9, 0x99, 0x0f,
+	0x3b, 0x02, 0x23, 0x1f, 0xc3, 0x47, 0xd7, 0x11, 0x7b, 0x85, 0x46, 0x60, 0x7f, 0x90, 0xd8, 0x24,
+	0xbb, 0x30, 0xbb, 0xcf, 0xb8, 0x4a, 0x27, 0x71, 0x3a, 0x0f, 0x22, 0xb6, 0x94, 0xa3, 0x0b, 0x88,
+	0x39, 0x4b, 0xa6, 0x15, 0x26, 0x79, 0x0a, 0x99, 0x07, 0xb6, 0xc7, 0x83, 0xa9, 0x3e, 0x0e, 0x6d,
+	0x29, 0x2a, 0xe7, 0x7b, 0xf4, 0x22, 0x82, 0x66, 0x49, 0x46, 0x2b, 0x6a, 0x0f, 0x90, 0xaa, 0xb0,
+	0xb0, 0xcf, 0x02, 0xe8, 0xc4, 0xb7, 0x07, 0xf3, 0x91, 0x45, 0x04, 0xbd, 0x84, 0x78, 0x39, 0xb2,
+	0x1c, 0xc2, 0x2b, 0xbe, 0xb2, 0xad, 0x2f, 0x88, 0x01, 0x29, 0xa1, 0xf3, 0x96, 0x4e, 0x79, 0x71,
+	0xea, 0xa6, 0x47, 0x12, 0xa6, 0x47, 0x73, 0x88, 0x4c, 0x48, 0x5a, 0x23, 0x0f, 0xd2, 0x26, 0x03,
+	0x22, 0x30, 0x1f, 0x04, 0x33, 0x60, 0x1c, 0xf2, 0x72, 0x64, 0x2e, 0xf5, 0xe8, 0x65, 0xc4, 0xbf,
+	0x48, 0x56, 0x34, 0xfe, 0x48, 0x2a, 0x26, 0xbf, 0x84, 0xf4, 0x3e, 0x0b, 0xb2, 0x04, 0x0c, 0x12,
+	0x9d, 0xa4, 0xe9, 0x1b, 0x88, 0x7b, 0x89, 0xac, 0xc5, 0xe0, 0x4a, 0xbb, 0x34, 0x61, 0x39, 0x34,
+	0x87, 0x8a, 0xe3, 0x72, 0x2f, 0xda, 0xe6, 0x4a, 0x0e, 0x25, 0xe8, 0x26, 0x32, 0xbc, 0x41, 0xe8,
+	0x49, 0x0c, 0xc5, 0x1e, 0xa2, 0xbd, 0x80, 0xc5, 0xd1, 0x49, 0x08, 0x10, 0xb2, 0x14, 0x81, 0x5c,
+	0xb2, 0xf2, 0xd9, 0x88, 0x66, 0xfa, 0x1e, 0xf2, 0x15, 0xc8, 0x3b, 0xa7, 0xf3, 0x15, 0x5f, 0x89,
+	0x3f, 0x35, 0x31, 0xc3, 0xdf, 0x27, 0x60, 0x65, 0x0f, 0xcb, 0xf6, 0x33, 0xb3, 0xc7, 0x79, 0xd7,
+	0x87, 0xa8, 0xc0, 0x5d, 0x7a, 0xe7, 0x3c, 0x0a, 0x14, 0xd5, 0x99, 0xe1, 0xab, 0x04, 0xe4, 0x76,
+	0x6d, 0xef, 0x07, 0x51, 0xe4, 0xc7, 0xa8, 0xc8, 0x3d, 0xfa, 0xde, 0xb9, 0x14, 0xb1, 0x24, 0x3b,
+	0xb1, 0x22, 0xd6, 0x5c, 0xc4, 0xc9, 0xe0, 0x9a, 0x93, 0x40, 0x70, 0xc4, 0xfe, 0x33, 0xae, 0x78,
+	0x13, 0xb1, 0x7e, 0x9b, 0x80, 0x35, 0x19, 0xcb, 0x42, 0x44, 0x07, 0xa8, 0xc6, 0x5a, 0x88, 0x00,
+	0xdb, 0xe5, 0x98, 0xd8, 0xa9, 0xdf, 0x44, 0x15, 0x6e, 0xd0, 0x33, 0xa8, 0x20, 0x22, 0xde, 0xef,
+	0x12, 0xb0, 0x1e, 0xa1, 0xc5, 0x43, 0x11, 0xd9, 0xa5, 0x1a, 0xab, 0x01, 0x35, 0xb0, 0xe3, 0xa1,
+	0x63, 0x9d, 0xa2, 0x45, 0x01, 0xb5, 0xd8, 0xa0, 0xd7, 0x4e, 0xd4, 0x42, 0xe6, 0x0f, 0xa1, 0x46,
+	0x0b, 0x56, 0x42, 0x26, 0x47, 0xaa, 0xa0, 0xcd, 0xb3, 0x61, 0x5d, 0x3c, 0xfa, 0x36, 0x72, 0xbd,
+	0x49, 0xce, 0xc2, 0x45, 0x38, 0xac, 0x46, 0xae, 0xad, 0x2a, 0x71, 0xfd, 0x64, 0x2b, 0x21, 0xfb,
+	0x4b, 0x21, 0xfa, 0x2e, 0x12, 0x6e, 0x92, 0x8d, 0x53, 0x4d, 0xac, 0xaa, 0x6d, 0xf2, 0x4d, 0x02,
+	0xae, 0xc6, 0xac, 0x35, 0x62, 0x4a, 0x4b, 0x5f, 0x8d, 0x26, 0x3c, 0xcb, 0xaa, 0xdf, 0x41, 0x95,
+	0x6e, 0xd2, 0x33, 0xab, 0x24, 0x8c, 0x5e, 0x86, 0xa4, 0xb0, 0xc5, 0x69, 0x81, 0x79, 0x21, 0x58,
+	0x52, 0x78, 0x74, 0x05, 0xc9, 0x32, 0x64, 0x41, 0x93, 0xe9, 0x48, 0x5c, 0x86, 0xb9, 0x21, 0x60,
+	0xc9, 0x8a, 0x87, 0x4c, 0x0e, 0xcd, 0x1c, 0x91, 0xea, 0x24, 0x9c, 0x6d, 0x79, 0xe4, 0x10, 0xd2,
+	0x06, 0x6b, 0x38, 0xdd, 0x86, 0xdd, 0x66, 0x5a, 0x4d, 0xff, 0xd8, 0x58, 0x7b, 0xac, 0x21, 0xe6,
+	0x32, 0x0d, 0x63, 0x8a, 0x89, 0xef, 0x61, 0x9a, 0x8f, 0x48, 0x15, 0x23, 0x87, 0x31, 0x0d, 0x43,
+	0x16, 0x47, 0x66, 0x2a, 0x73, 0xc3, 0x4f, 0x21, 0xb5, 0xe3, 0x32, 0x93, 0x2b, 0xd5, 0xc8, 0xc8,
+	0xe8, 0x10, 0x9a, 0x2a, 0x6c, 0xe8, 0xa8, 0xdd, 0x84, 0x4a, 0x4f, 0x20, 0x25, 0x83, 0x70, 0x84,
+	0x56, 0x71, 0x93, 0xbc, 0x86, 0x78, 0xeb, 0x74, 0x35, 0x4a, 0x3b, 0x1d, 0x56, 0x7f, 0x0e, 0x73,
+	0x2a, 0xaa, 0x9e, 0x03, 0x59, 0xe5, 0x46, 0xba, 0x16, 0x89, 0xac, 0xe3, 0xe4, 0x13, 0x48, 0x19,
+	0xac, 0xee, 0x38, 0xfc, 0x07, 0xd3, 0xd9, 0x45, 0x38, 0x01, 0xbc, 0xcb, 0xda, 0x8c, 0xbf, 0x86,
+	0x31, 0x36, 0xa3, 0x81, 0x2d, 0x84, 0x23, 0x75, 0xc8, 0xdc, 0x77, 0xdc, 0x06, 0x3b, 0x37, 0xfa,
+	0x5b, 0x88, 0x7e, 0x6d, 0xf3, 0x6a, 0x24, 0x7a, 0x53, 0x60, 0xd6, 0x14, 0x47, 0x1f, 0xe6, 0xf4,
+	0x91, 0x07, 0x6b, 0xed, 0x61, 0xee, 0x0a, 0x1c, 0x87, 0xf2, 0x4b, 0x9a, 0xb6, 0xdc, 0x63, 0x2e,
+	0xde, 0x5d, 0x8b, 0x0a, 0x9f, 0xde, 0x43, 0xa6, 0x77, 0xe9, 0xdb, 0x91, 0x4c, 0xb2, 0xe2, 0xb7,
+	0x14, 0x86, 0x57, 0x7c, 0x25, 0x8e, 0x64, 0x5f, 0x88, 0x0d, 0xf4, 0x65, 0x02, 0x96, 0xf7, 0x19,
+	0x0f, 0x70, 0xc8, 0x5b, 0xa8, 0x78, 0x05, 0xa2, 0x9a, 0xe9, 0x07, 0xa8, 0xc0, 0x7b, 0xe4, 0xf6,
+	0x39, 0x14, 0x28, 0x7a, 0x92, 0xa9, 0x8f, 0xa5, 0x58, 0x00, 0xef, 0x9c, 0xec, 0x2a, 0x90, 0x91,
+	0xf3, 0x4c, 0x9f, 0x34, 0x65, 0xa1, 0x19, 0x40, 0xf2, 0x46, 0xd6, 0x35, 0x8a, 0xcd, 0xa3, 0xef,
+	0x20, 0xdd, 0x75, 0xf2, 0xc6, 0x59, 0xe8, 0xc8, 0x0b, 0xc8, 0xee, 0x88, 0x9a, 0xb9, 0x7d, 0xc6,
+	0x19, 0x46, 0x2e, 0xb0, 0x9a, 0xe1, 0xe6, 0xb9, 0x66, 0xf8, 0x75, 0x02, 0xb2, 0x5b, 0xea, 0x38,
+	0x8b, 0x2c, 0x32, 0x1f, 0x9c, 0x93, 0x7a, 0x07, 0xa9, 0x3f, 0xa2, 0xef, 0x9f, 0x67, 0x69, 0x65,
+	0x73, 0x1f, 0xf9, 0xc4, 0x46, 0xfb, 0x43, 0x02, 0x32, 0x06, 0x3b, 0x66, 0x2e, 0xff, 0xbf, 0x28,
+	0xe2, 0x22, 0xb5, 0x50, 0xe4, 0xeb, 0x04, 0x2c, 0x05, 0x3c, 0xed, 0xc0, 0x51, 0x1e, 0x4d, 0x23,
+	0x0e, 0xc1, 0x23, 0xd7, 0x10, 0xf9, 0xd5, 0x08, 0x19, 0x7d, 0xc4, 0xd6, 0xe5, 0x0b, 0xb9, 0x3e,
+	0xaa, 0x1f, 0xea, 0xe0, 0x15, 0xb5, 0x6e, 0xf2, 0x04, 0xee, 0x91, 0xe7, 0x30, 0xaf, 0xf7, 0xbd,
+	0xf2, 0xb9, 0x7c, 0x24, 0xfc, 0x19, 0xa8, 0x63, 0x77, 0xa4, 0xa2, 0x96, 0x7f, 0x6a, 0xca, 0xe1,
+	0xfe, 0x98, 0x80, 0x8b, 0x5b, 0x75, 0x67, 0xb0, 0x16, 0x2d, 0xd7, 0xb4, 0x86, 0x76, 0x78, 0x6d,
+	0x25, 0x62, 0xbd, 0x50, 0x29, 0x61, 0x0a, 0xca, 0x5a, 0x5f, 0xd2, 0x69, 0x23, 0x3c, 0x86, 0xd4,
+	0x3e, 0xe3, 0xe5, 0x6e, 0xbf, 0x24, 0xbf, 0xfd, 0xfe, 0x97, 0xd1, 0x6c, 0x83, 0x6e, 0x7a, 0x03,
+	0x39, 0xae, 0x92, 0xcb, 0x91, 0x7b, 0xc0, 0xe9, 0xf6, 0x35, 0xee, 0x2b, 0x98, 0x0b, 0x6c, 0xfd,
+	0xd7, 0x9f, 0xd6, 0x2d, 0xa4, 0x7c, 0x9b, 0xc6, 0x2d, 0xab, 0xbe, 0x30, 0x52, 0xcc, 0x62, 0x93,
+	0x3d, 0x87, 0xe4, 0x0e, 0x5e, 0xcb, 0x7c, 0x4f, 0xea, 0x22, 0x52, 0xbf, 0x45, 0xe3, 0x96, 0x55,
+	0xde, 0xfd, 0xf8, 0x88, 0x2b, 0xb0, 0x30, 0xac, 0xa5, 0xc2, 0x27, 0xce, 0xc1, 0x6d, 0x98, 0x3c,
+	0x6a, 0x52, 0x84, 0x5f, 0x23, 0xf9, 0x48, 0x63, 0xca, 0x23, 0xe6, 0x53, 0xc8, 0xfa, 0x10, 0xd5,
+	0x45, 0x7a, 0xcc, 0x32, 0x0d, 0xba, 0x4f, 0x59, 0xa6, 0x9e, 0xbe, 0xba, 0xf7, 0x48, 0x17, 0x96,
+	0x64, 0x2c, 0x18, 0x25, 0x08, 0x83, 0xc6, 0xa6, 0x59, 0x75, 0x7e, 0xa2, 0xa7, 0x91, 0x09, 0x03,
+	0x1d, 0xfa, 0x0d, 0x74, 0xb6, 0xe3, 0xd9, 0xc9, 0x56, 0x92, 0xc7, 0x32, 0x06, 0x8b, 0x41, 0xd8,
+	0xf3, 0x9c, 0x0c, 0x36, 0x90, 0x80, 0x92, 0x2b, 0xb1, 0x04, 0xfa, 0x44, 0xf0, 0xb9, 0x5f, 0x7b,
+	0x79, 0x47, 0x1f, 0x57, 0x2c, 0x67, 0xc3, 0xf7, 0xfc, 0x5e, 0x5c, 0x65, 0x2a, 0x1f, 0x08, 0x88,
+	0x81, 0xf7, 0x6f, 0x43, 0xf9, 0x11, 0xcb, 0x84, 0xf0, 0xe8, 0x55, 0x84, 0x5b, 0x25, 0x17, 0xa3,
+	0xe0, 0x64, 0xb5, 0x5b, 0x83, 0xf4, 0x50, 0x63, 0x65, 0x94, 0x38, 0x95, 0x17, 0x23, 0xde, 0x15,
+	0x3c, 0x7d, 0xf9, 0x46, 0x96, 0x46, 0x48, 0x94, 0x49, 0xee, 0x43, 0xba, 0xca, 0x5d, 0x66, 0x76,
+	0x2a, 0x66, 0xe3, 0x19, 0xe3, 0x5e, 0xb9, 0xcf, 0xc9, 0x72, 0xc0, 0xd2, 0xb2, 0xa3, 0xdc, 0xe7,
+	0xb1, 0x1b, 0xe8, 0xc2, 0x46, 0x82, 0xec, 0xe1, 0xa1, 0x81, 0xd9, 0xc7, 0x4c, 0x01, 0x95, 0xba,
+	0x27, 0xdc, 0xbe, 0x85, 0xf1, 0x4b, 0x5d, 0x7a, 0xe1, 0xdd, 0x04, 0xf9, 0x14, 0xb2, 0x0a, 0x66,
+	0xe7, 0xc8, 0xec, 0xb6, 0x18, 0xbe, 0x6e, 0xc4, 0x4f, 0x39, 0x17, 0x40, 0xf2, 0x0d, 0x41, 0xb0,
+	0x43, 0x4c, 0x10, 0xfe, 0xf7, 0xf3, 0xe0, 0xb1, 0x36, 0x6c, 0xae, 0xb8, 0xcd, 0xaa, 0xac, 0xa5,
+	0xd7, 0x24, 0x23, 0x4f, 0x20, 0xfe, 0xb7, 0xda, 0xa8, 0xf7, 0x98, 0x7c, 0x54, 0x23, 0xbd, 0x82,
+	0x14, 0x79, 0x3a, 0x58, 0x90, 0xc0, 0xf3, 0x8e, 0x70, 0xb2, 0xc7, 0xa8, 0xb7, 0x1f, 0x3d, 0xf2,
+	0xda, 0xcb, 0xff, 0x02, 0x1b, 0x56, 0x3c, 0x80, 0x2a, 0x15, 0xb7, 0x20, 0x23, 0x83, 0xc5, 0xeb,
+	0x29, 0xfe, 0x26, 0x52, 0x5c, 0xce, 0x9f, 0x40, 0x21, 0xb4, 0xb7, 0x20, 0x23, 0x2b, 0xfd, 0x53,
+	0x59, 0xe2, 0xf6, 0x93, 0x9a, 0xcb, 0xe6, 0x49, 0x73, 0x51, 0x8e, 0x11, 0x78, 0x85, 0x3e, 0xd5,
+	0x31, 0x02, 0x16, 0x0b, 0x39, 0x46, 0x80, 0x85, 0x3c, 0xc0, 0xe3, 0x6a, 0x44, 0x56, 0x9d, 0x0f,
+	0x14, 0x5d, 0x9e, 0x3e, 0x03, 0x91, 0xd5, 0xf8, 0xb2, 0xca, 0x23, 0x9f, 0xc1, 0x8c, 0x7e, 0x7e,
+	0x0a, 0x80, 0xe5, 0xe2, 0xde, 0xb1, 0xe8, 0x75, 0x84, 0xbd, 0x42, 0x2f, 0x45, 0xc2, 0x7a, 0xac,
+	0xdd, 0xac, 0x71, 0x81, 0xf6, 0x18, 0xab, 0xff, 0xc0, 0xf3, 0xdd, 0xe8, 0xed, 0x4d, 0xe8, 0x7d,
+	0x2f, 0x1c, 0x79, 0x84, 0x1b, 0x09, 0x39, 0x75, 0x6d, 0x63, 0xd7, 0xc9, 0xe7, 0x40, 0xf6, 0x19,
+	0x1f, 0x79, 0xc1, 0x1b, 0xb9, 0xe2, 0x8d, 0x7a, 0xe4, 0x0b, 0xdb, 0x23, 0x88, 0x8d, 0xef, 0x85,
+	0xc4, 0x83, 0xb9, 0xaa, 0xdd, 0xe9, 0xb7, 0x4d, 0xce, 0x70, 0x3c, 0x59, 0x1b, 0x18, 0xc2, 0xdf,
+	0xac, 0xb3, 0x7c, 0x4c, 0x45, 0x1b, 0xba, 0x76, 0x0b, 0xda, 0x48, 0x21, 0xd5, 0x04, 0x92, 0xd8,
+	0x99, 0x3b, 0x30, 0x3b, 0x78, 0xaa, 0x23, 0x17, 0x07, 0xc5, 0xd1, 0xe8, 0x23, 0x5e, 0x3e, 0xbe,
+	0x8b, 0x5e, 0x20, 0x0f, 0x01, 0xe4, 0x9d, 0x01, 0x5e, 0x91, 0xa6, 0xfc, 0x15, 0x41, 0xec, 0x86,
+	0x56, 0x97, 0x2d, 0x74, 0x5e, 0xe8, 0x38, 0x1c, 0xad, 0xae, 0x83, 0xd4, 0x4d, 0xc1, 0x39, 0xf0,
+	0x86, 0x77, 0x1a, 0xc7, 0xb7, 0x8a, 0xbe, 0xe1, 0x02, 0xf0, 0x33, 0x48, 0x8a, 0xe0, 0xf1, 0x82,
+	0xe3, 0xfb, 0x3f, 0x59, 0xd6, 0x96, 0x93, 0x3f, 0x07, 0xe8, 0xb1, 0x86, 0xdd, 0xb4, 0x99, 0x9b,
+	0x5f, 0xd4, 0xed, 0x06, 0xe3, 0x7d, 0xb7, 0x8b, 0xbd, 0x1e, 0x5d, 0x45, 0xe0, 0x25, 0x92, 0xd5,
+	0x06, 0xf5, 0x43, 0x1d, 0x40, 0xb2, 0xea, 0xfb, 0x1c, 0x3c, 0x4a, 0xe8, 0x1f, 0x1a, 0xc4, 0xaa,
+	0x1b, 0x42, 0xf5, 0xc3, 0x1c, 0x41, 0xb6, 0xca, 0x4d, 0x97, 0xeb, 0x9f, 0x59, 0x88, 0xaa, 0xd3,
+	0xe9, 0x92, 0xc1, 0x2f, 0x50, 0x46, 0x7e, 0x7e, 0x31, 0x74, 0xe6, 0x80, 0xb7, 0xa8, 0x90, 0x41,
+	0x07, 0xef, 0x15, 0x9e, 0xc0, 0xac, 0xe1, 0x0b, 0xb5, 0x70, 0x93, 0x0f, 0x12, 0x9b, 0xdb, 0x6d,
+	0xc8, 0x3a, 0x6e, 0x0b, 0x33, 0x46, 0xc3, 0x71, 0x2d, 0x85, 0xb3, 0x9d, 0x92, 0x2f, 0x4a, 0x15,
+	0xfc, 0x09, 0xd6, 0x2f, 0x0a, 0x2d, 0x9b, 0x1f, 0xf5, 0xeb, 0xc2, 0x3a, 0x45, 0x2d, 0xa9, 0x7e,
+	0x0a, 0x77, 0x53, 0xff, 0x30, 0xee, 0x6e, 0xb1, 0xe5, 0xa8, 0xb6, 0xbf, 0x8d, 0x2d, 0x97, 0x35,
+	0xde, 0x63, 0xff, 0x03, 0x55, 0x65, 0xac, 0x32, 0x5e, 0x99, 0xa8, 0x4c, 0x56, 0xa6, 0x2a, 0xd3,
+	0x95, 0x99, 0xfa, 0x14, 0x8e, 0xbd, 0xf3, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x14, 0x94, 0xf1,
+	0xbe, 0x64, 0x27, 0x00, 0x00,
 }
 
 // Reference imports to suppress errors if they are not otherwise used.
diff --git a/vendor/github.com/opentracing/opentracing-go/.travis.yml b/vendor/github.com/opentracing/opentracing-go/.travis.yml
index 8d5b75e..b950e42 100644
--- a/vendor/github.com/opentracing/opentracing-go/.travis.yml
+++ b/vendor/github.com/opentracing/opentracing-go/.travis.yml
@@ -2,8 +2,8 @@
 
 matrix:
   include:
-  - go: "1.11.x"
-  - go: "1.12.x"
+  - go: "1.13.x"
+  - go: "1.14.x"
   - go: "tip"
     env:
     - LINT=true
diff --git a/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md b/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md
index 7c14feb..d3bfcf6 100644
--- a/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md
+++ b/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md
@@ -1,6 +1,23 @@
 Changes by Version
 ==================
 
+
+1.2.0 (2020-07-01)
+-------------------
+
+* Restore the ability to reset the current span in context to nil (#231) -- Yuri Shkuro
+* Use error.object per OpenTracing Semantic Conventions (#179) -- Rahman Syed
+* Convert nil pointer log field value to string "nil" (#230) -- Cyril Tovena
+* Add Go module support (#215) -- Zaba505
+* Make SetTag helper types in ext public (#229) -- Blake Edwards
+* Add log/fields helpers for keys from specification (#226) -- Dmitry Monakhov
+* Improve noop impementation (#223) -- chanxuehong
+* Add an extension to Tracer interface for custom go context creation (#220) -- Krzesimir Nowak
+* Fix typo in comments (#222) -- meteorlxy
+* Improve documentation for log.Object() to emphasize the requirement to pass immutable arguments (#219) -- 疯狂的小企鹅
+* [mock] Return ErrInvalidSpanContext if span context is not MockSpanContext (#216) -- Milad Irannejad
+
+
 1.1.0 (2019-03-23)
 -------------------
 
diff --git a/vendor/github.com/opentracing/opentracing-go/ext.go b/vendor/github.com/opentracing/opentracing-go/ext.go
new file mode 100644
index 0000000..e11977e
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/ext.go
@@ -0,0 +1,24 @@
+package opentracing
+
+import (
+	"context"
+)
+
+// TracerContextWithSpanExtension is an extension interface that the
+// implementation of the Tracer interface may want to implement. It
+// allows to have some control over the go context when the
+// ContextWithSpan is invoked.
+//
+// The primary purpose of this extension are adapters from opentracing
+// API to some other tracing API.
+type TracerContextWithSpanExtension interface {
+	// ContextWithSpanHook gets called by the ContextWithSpan
+	// function, when the Tracer implementation also implements
+	// this interface. It allows to put extra information into the
+	// context and make it available to the callers of the
+	// ContextWithSpan.
+	//
+	// This hook is invoked before the ContextWithSpan function
+	// actually puts the span into the context.
+	ContextWithSpanHook(ctx context.Context, span Span) context.Context
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/ext/field.go b/vendor/github.com/opentracing/opentracing-go/ext/field.go
new file mode 100644
index 0000000..8282bd7
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/ext/field.go
@@ -0,0 +1,17 @@
+package ext
+
+import (
+	"github.com/opentracing/opentracing-go"
+	"github.com/opentracing/opentracing-go/log"
+)
+
+// LogError sets the error=true tag on the Span and logs err as an "error" event.
+func LogError(span opentracing.Span, err error, fields ...log.Field) {
+	Error.Set(span, true)
+	ef := []log.Field{
+		log.Event("error"),
+		log.Error(err),
+	}
+	ef = append(ef, fields...)
+	span.LogFields(ef...)
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/ext/tags.go b/vendor/github.com/opentracing/opentracing-go/ext/tags.go
index 52e8895..a414b59 100644
--- a/vendor/github.com/opentracing/opentracing-go/ext/tags.go
+++ b/vendor/github.com/opentracing/opentracing-go/ext/tags.go
@@ -47,40 +47,40 @@
 
 	// Component is a low-cardinality identifier of the module, library,
 	// or package that is generating a span.
-	Component = stringTagName("component")
+	Component = StringTagName("component")
 
 	//////////////////////////////////////////////////////////////////////
 	// Sampling hint
 	//////////////////////////////////////////////////////////////////////
 
 	// SamplingPriority determines the priority of sampling this Span.
-	SamplingPriority = uint16TagName("sampling.priority")
+	SamplingPriority = Uint16TagName("sampling.priority")
 
 	//////////////////////////////////////////////////////////////////////
-	// Peer tags. These tags can be emitted by either client-side of
+	// Peer tags. These tags can be emitted by either client-side or
 	// server-side to describe the other side/service in a peer-to-peer
 	// communications, like an RPC call.
 	//////////////////////////////////////////////////////////////////////
 
 	// PeerService records the service name of the peer.
-	PeerService = stringTagName("peer.service")
+	PeerService = StringTagName("peer.service")
 
 	// PeerAddress records the address name of the peer. This may be a "ip:port",
 	// a bare "hostname", a FQDN or even a database DSN substring
 	// like "mysql://username@127.0.0.1:3306/dbname"
-	PeerAddress = stringTagName("peer.address")
+	PeerAddress = StringTagName("peer.address")
 
 	// PeerHostname records the host name of the peer
-	PeerHostname = stringTagName("peer.hostname")
+	PeerHostname = StringTagName("peer.hostname")
 
 	// PeerHostIPv4 records IP v4 host address of the peer
-	PeerHostIPv4 = ipv4Tag("peer.ipv4")
+	PeerHostIPv4 = IPv4TagName("peer.ipv4")
 
 	// PeerHostIPv6 records IP v6 host address of the peer
-	PeerHostIPv6 = stringTagName("peer.ipv6")
+	PeerHostIPv6 = StringTagName("peer.ipv6")
 
 	// PeerPort records port number of the peer
-	PeerPort = uint16TagName("peer.port")
+	PeerPort = Uint16TagName("peer.port")
 
 	//////////////////////////////////////////////////////////////////////
 	// HTTP Tags
@@ -88,46 +88,46 @@
 
 	// HTTPUrl should be the URL of the request being handled in this segment
 	// of the trace, in standard URI format. The protocol is optional.
-	HTTPUrl = stringTagName("http.url")
+	HTTPUrl = StringTagName("http.url")
 
 	// HTTPMethod is the HTTP method of the request, and is case-insensitive.
-	HTTPMethod = stringTagName("http.method")
+	HTTPMethod = StringTagName("http.method")
 
 	// HTTPStatusCode is the numeric HTTP status code (200, 404, etc) of the
 	// HTTP response.
-	HTTPStatusCode = uint16TagName("http.status_code")
+	HTTPStatusCode = Uint16TagName("http.status_code")
 
 	//////////////////////////////////////////////////////////////////////
 	// DB Tags
 	//////////////////////////////////////////////////////////////////////
 
 	// DBInstance is database instance name.
-	DBInstance = stringTagName("db.instance")
+	DBInstance = StringTagName("db.instance")
 
 	// DBStatement is a database statement for the given database type.
 	// It can be a query or a prepared statement (i.e., before substitution).
-	DBStatement = stringTagName("db.statement")
+	DBStatement = StringTagName("db.statement")
 
 	// DBType is a database type. For any SQL database, "sql".
 	// For others, the lower-case database category, e.g. "redis"
-	DBType = stringTagName("db.type")
+	DBType = StringTagName("db.type")
 
 	// DBUser is a username for accessing database.
-	DBUser = stringTagName("db.user")
+	DBUser = StringTagName("db.user")
 
 	//////////////////////////////////////////////////////////////////////
 	// Message Bus Tag
 	//////////////////////////////////////////////////////////////////////
 
 	// MessageBusDestination is an address at which messages can be exchanged
-	MessageBusDestination = stringTagName("message_bus.destination")
+	MessageBusDestination = StringTagName("message_bus.destination")
 
 	//////////////////////////////////////////////////////////////////////
 	// Error Tag
 	//////////////////////////////////////////////////////////////////////
 
 	// Error indicates that operation represented by the span resulted in an error.
-	Error = boolTagName("error")
+	Error = BoolTagName("error")
 )
 
 // ---
@@ -163,48 +163,53 @@
 
 // ---
 
-type stringTagName string
+// StringTagName is a common tag name to be set to a string value
+type StringTagName string
 
 // Set adds a string tag to the `span`
-func (tag stringTagName) Set(span opentracing.Span, value string) {
+func (tag StringTagName) Set(span opentracing.Span, value string) {
 	span.SetTag(string(tag), value)
 }
 
 // ---
 
-type uint32TagName string
+// Uint32TagName is a common tag name to be set to a uint32 value
+type Uint32TagName string
 
 // Set adds a uint32 tag to the `span`
-func (tag uint32TagName) Set(span opentracing.Span, value uint32) {
+func (tag Uint32TagName) Set(span opentracing.Span, value uint32) {
 	span.SetTag(string(tag), value)
 }
 
 // ---
 
-type uint16TagName string
+// Uint16TagName is a common tag name to be set to a uint16 value
+type Uint16TagName string
 
 // Set adds a uint16 tag to the `span`
-func (tag uint16TagName) Set(span opentracing.Span, value uint16) {
+func (tag Uint16TagName) Set(span opentracing.Span, value uint16) {
 	span.SetTag(string(tag), value)
 }
 
 // ---
 
-type boolTagName string
+// BoolTagName is a common tag name to be set to a bool value
+type BoolTagName string
 
-// Add adds a bool tag to the `span`
-func (tag boolTagName) Set(span opentracing.Span, value bool) {
+// Set adds a bool tag to the `span`
+func (tag BoolTagName) Set(span opentracing.Span, value bool) {
 	span.SetTag(string(tag), value)
 }
 
-type ipv4Tag string
+// IPv4TagName is a common tag name to be set to an ipv4 value
+type IPv4TagName string
 
 // Set adds IP v4 host address of the peer as an uint32 value to the `span`, keep this for backward and zipkin compatibility
-func (tag ipv4Tag) Set(span opentracing.Span, value uint32) {
+func (tag IPv4TagName) Set(span opentracing.Span, value uint32) {
 	span.SetTag(string(tag), value)
 }
 
 // SetString records IP v4 host address of the peer as a .-separated tuple to the `span`. E.g., "127.0.0.1"
-func (tag ipv4Tag) SetString(span opentracing.Span, value string) {
+func (tag IPv4TagName) SetString(span opentracing.Span, value string) {
 	span.SetTag(string(tag), value)
 }
diff --git a/vendor/github.com/opentracing/opentracing-go/go.mod b/vendor/github.com/opentracing/opentracing-go/go.mod
new file mode 100644
index 0000000..bf48bb5
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/go.mod
@@ -0,0 +1,5 @@
+module github.com/opentracing/opentracing-go
+
+go 1.14
+
+require github.com/stretchr/testify v1.3.0
diff --git a/vendor/github.com/opentracing/opentracing-go/go.sum b/vendor/github.com/opentracing/opentracing-go/go.sum
new file mode 100644
index 0000000..4347755
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/go.sum
@@ -0,0 +1,7 @@
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
diff --git a/vendor/github.com/opentracing/opentracing-go/gocontext.go b/vendor/github.com/opentracing/opentracing-go/gocontext.go
index 08c00c0..1831bc9 100644
--- a/vendor/github.com/opentracing/opentracing-go/gocontext.go
+++ b/vendor/github.com/opentracing/opentracing-go/gocontext.go
@@ -7,8 +7,13 @@
 var activeSpanKey = contextKey{}
 
 // ContextWithSpan returns a new `context.Context` that holds a reference to
-// `span`'s SpanContext.
+// the span. If span is nil, a new context without an active span is returned.
 func ContextWithSpan(ctx context.Context, span Span) context.Context {
+	if span != nil {
+		if tracerWithHook, ok := span.Tracer().(TracerContextWithSpanExtension); ok {
+			ctx = tracerWithHook.ContextWithSpanHook(ctx, span)
+		}
+	}
 	return context.WithValue(ctx, activeSpanKey, span)
 }
 
diff --git a/vendor/github.com/opentracing/opentracing-go/log/field.go b/vendor/github.com/opentracing/opentracing-go/log/field.go
index 50feea3..f222ded 100644
--- a/vendor/github.com/opentracing/opentracing-go/log/field.go
+++ b/vendor/github.com/opentracing/opentracing-go/log/field.go
@@ -122,16 +122,19 @@
 	}
 }
 
-// Error adds an error with the key "error" to a Span.LogFields() record
+// Error adds an error with the key "error.object" to a Span.LogFields() record
 func Error(err error) Field {
 	return Field{
-		key:          "error",
+		key:          "error.object",
 		fieldType:    errorType,
 		interfaceVal: err,
 	}
 }
 
 // Object adds an object-valued key:value pair to a Span.LogFields() record
+// Please pass in an immutable object, otherwise there may be concurrency issues.
+// Such as passing in the map, log.Object may result in "fatal error: concurrent map iteration and map write".
+// Because span is sent asynchronously, it is possible that this map will also be modified.
 func Object(key string, obj interface{}) Field {
 	return Field{
 		key:          key,
@@ -140,6 +143,16 @@
 	}
 }
 
+// Event creates a string-valued Field for span logs with key="event" and value=val.
+func Event(val string) Field {
+	return String("event", val)
+}
+
+// Message creates a string-valued Field for span logs with key="message" and value=val.
+func Message(val string) Field {
+	return String("message", val)
+}
+
 // LazyLogger allows for user-defined, late-bound logging of arbitrary data
 type LazyLogger func(fv Encoder)
 
diff --git a/vendor/github.com/opentracing/opentracing-go/log/util.go b/vendor/github.com/opentracing/opentracing-go/log/util.go
index 3832feb..d57e28a 100644
--- a/vendor/github.com/opentracing/opentracing-go/log/util.go
+++ b/vendor/github.com/opentracing/opentracing-go/log/util.go
@@ -1,6 +1,9 @@
 package log
 
-import "fmt"
+import (
+	"fmt"
+	"reflect"
+)
 
 // InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice
 // a la Span.LogFields().
@@ -46,6 +49,10 @@
 		case float64:
 			fields[i] = Float64(key, typedVal)
 		default:
+			if typedVal == nil || (reflect.ValueOf(typedVal).Kind() == reflect.Ptr && reflect.ValueOf(typedVal).IsNil()) {
+				fields[i] = String(key, "nil")
+				continue
+			}
 			// When in doubt, coerce to a string
 			fields[i] = String(key, fmt.Sprint(typedVal))
 		}
diff --git a/vendor/github.com/opentracing/opentracing-go/noop.go b/vendor/github.com/opentracing/opentracing-go/noop.go
index 0d32f69..f9b680a 100644
--- a/vendor/github.com/opentracing/opentracing-go/noop.go
+++ b/vendor/github.com/opentracing/opentracing-go/noop.go
@@ -21,9 +21,9 @@
 type noopSpanContext struct{}
 
 var (
-	defaultNoopSpanContext = noopSpanContext{}
-	defaultNoopSpan        = noopSpan{}
-	defaultNoopTracer      = NoopTracer{}
+	defaultNoopSpanContext SpanContext = noopSpanContext{}
+	defaultNoopSpan        Span        = noopSpan{}
+	defaultNoopTracer      Tracer      = NoopTracer{}
 )
 
 const (
@@ -35,7 +35,7 @@
 
 // noopSpan:
 func (n noopSpan) Context() SpanContext                                  { return defaultNoopSpanContext }
-func (n noopSpan) SetBaggageItem(key, val string) Span                   { return defaultNoopSpan }
+func (n noopSpan) SetBaggageItem(key, val string) Span                   { return n }
 func (n noopSpan) BaggageItem(key string) string                         { return emptyString }
 func (n noopSpan) SetTag(key string, value interface{}) Span             { return n }
 func (n noopSpan) LogFields(fields ...log.Field)                         {}
diff --git a/vendor/github.com/phayes/freeport/.gitignore b/vendor/github.com/phayes/freeport/.gitignore
new file mode 100644
index 0000000..1521c8b
--- /dev/null
+++ b/vendor/github.com/phayes/freeport/.gitignore
@@ -0,0 +1 @@
+dist
diff --git a/vendor/github.com/phayes/freeport/.goreleaser.yml b/vendor/github.com/phayes/freeport/.goreleaser.yml
new file mode 100644
index 0000000..48044ea
--- /dev/null
+++ b/vendor/github.com/phayes/freeport/.goreleaser.yml
@@ -0,0 +1,134 @@
+project_name: freeport
+
+release:
+  github:
+    owner: phayes
+    name: freeport
+
+builds:
+  - binary: freeport
+    goos:
+      - linux
+      - darwin
+    goarch:
+      - amd64
+      - "386"
+    goarm:
+      - "6"
+    main: ./cmd/freeport
+    ldflags: -s -w -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.Date}}
+
+archive:
+  format: tar.gz
+  name_template: '{{ .Binary }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{
+    .Arm }}{{ end }}'
+  files:
+  - licence*
+  - LICENCE*
+  - license*
+  - LICENSE*
+  - readme*
+  - README*
+  - changelog*
+  - CHANGELOG*
+
+snapshot:
+  name_template: SNAPSHOT-{{ .Commit }}
+
+checksum:
+  name_template: '{{ .ProjectName }}_{{ .Version }}_checksums.txt'
+
+# Create RPM and .DEB files
+fpm:
+  vendor: Patrick Hayes
+
+  # Your app's homepage.
+  #homepage: https://example.com/
+
+  # Your app's maintainer
+  maintainer: Patrick Hayes <patrick.d.hayes@gmail.com>
+
+  # Your app's description.
+  description: Get a free open TCP port that is ready to use.
+
+  # Your app's license.
+  # Default is empty.
+  license: BSD
+
+  # Formats to be generated.
+  formats:
+    - deb
+    - rpm
+
+  # Packages your package depends on.
+  #dependencies:
+  #  - git
+  #  - zsh
+
+  # Packages that conflict with your package.
+  #conflicts:
+  #  - svn
+  #  - bash
+
+  # Files or directories to add to your package (beyond the binary).
+  # Keys are source paths to get the files from.
+  # Values are the destination locations of the files in the package.
+  #files:
+  #  "scripts/etc/init.d/": "/etc/init.d"
+
+# Homebrew repos
+brew:
+  # Reporitory to push the tap to.
+  github:
+    owner: phayes
+    name: homebrew-repo
+
+  # Git author used to commit to the repository.
+  # Defaults are shown.
+  commit_author:
+    name: goreleaserbot
+    email: goreleaser@carlosbecker.com
+
+  # Folder inside the repository to put the formula.
+  # Default is the root folder.
+  # folder: .
+
+  # Caveats for the user of your binary.
+  # Default is empty.
+  # caveats: "How to use this binary"
+
+  # Your app's homepage.
+  # Default is empty.
+  # homepage: "https://example.com/"
+
+  # Your app's description.
+  # Default is empty.
+  description: "Get a free open TCP port that is ready to use."
+
+  # Packages your package depends on.
+  #dependencies:
+  #  - git
+  #  - zsh
+
+  # Packages that conflict with your package.
+  #conflicts:
+  #  - svn
+  #  - bash
+
+  # Specify for packages that run as a service.
+  # Default is empty.
+  #plist: |
+  #  <?xml version="1.0" encoding="UTF-8"?>
+  #  ...
+
+  # So you can `brew test` your formula.
+  # Default is empty.
+  #test: |
+  #  system "#{bin}/program --version"
+  #  ...
+
+  # Custom install script for brew.
+  # Default is 'bin.install "program"'.
+  #install: |
+  #  bin.install "program"
+  #  ...
\ No newline at end of file
diff --git a/vendor/github.com/phayes/freeport/LICENSE.md b/vendor/github.com/phayes/freeport/LICENSE.md
new file mode 100644
index 0000000..d9882e5
--- /dev/null
+++ b/vendor/github.com/phayes/freeport/LICENSE.md
@@ -0,0 +1,15 @@
+Open Source License (BSD 3-Clause)
+----------------------------------
+
+Copyright (c) 2014, Patrick Hayes / HighWire Press
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/phayes/freeport/README.md b/vendor/github.com/phayes/freeport/README.md
new file mode 100644
index 0000000..1665ccf
--- /dev/null
+++ b/vendor/github.com/phayes/freeport/README.md
@@ -0,0 +1,58 @@
+FreePort
+========
+
+Get a free open TCP port that is ready to use.
+
+## Command Line Example:
+```bash
+# Ask the kernel to give us an open port.
+export port=$(freeport)
+
+# Start standalone httpd server for testing
+httpd -X -c "Listen $port" &
+
+# Curl local server on the selected port
+curl localhost:$port
+```
+
+## Golang example:
+```go
+package main
+
+import "github.com/phayes/freeport"
+
+func main() {
+	port, err := freeport.GetFreePort()
+	if err != nil {
+		log.Fatal(err)
+	}
+	// port is ready to listen on
+}
+
+```
+
+## Installation
+
+#### Mac OSX
+```bash
+brew install phayes/repo/freeport
+```
+
+
+#### CentOS and other RPM based systems
+```bash
+wget https://github.com/phayes/freeport/releases/download/1.0.2/freeport_1.0.2_linux_386.rpm
+rpm -Uvh freeport_1.0.2_linux_386.rpm
+```
+
+#### Ubuntu and other DEB based systems
+```bash
+wget wget https://github.com/phayes/freeport/releases/download/1.0.2/freeport_1.0.2_linux_amd64.deb
+dpkg -i freeport_1.0.2_linux_amd64.deb
+```
+
+#### Building From Source
+```bash
+sudo apt-get install golang                    # Download go. Alternativly build from source: https://golang.org/doc/install/source
+go get github.com/phayes/freeport
+```
diff --git a/vendor/github.com/phayes/freeport/freeport.go b/vendor/github.com/phayes/freeport/freeport.go
new file mode 100644
index 0000000..fcfb6ba
--- /dev/null
+++ b/vendor/github.com/phayes/freeport/freeport.go
@@ -0,0 +1,49 @@
+package freeport
+
+import (
+	"net"
+)
+
+// GetFreePort asks the kernel for a free open port that is ready to use.
+func GetFreePort() (int, error) {
+	addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
+	if err != nil {
+		return 0, err
+	}
+
+	l, err := net.ListenTCP("tcp", addr)
+	if err != nil {
+		return 0, err
+	}
+	defer l.Close()
+	return l.Addr().(*net.TCPAddr).Port, nil
+}
+
+// GetPort is deprecated, use GetFreePort instead
+// Ask the kernel for a free open port that is ready to use
+func GetPort() int {
+	port, err := GetFreePort()
+	if err != nil {
+		panic(err)
+	}
+	return port
+}
+
+// GetFreePort asks the kernel for free open ports that are ready to use.
+func GetFreePorts(count int) ([]int, error) {
+	var ports []int
+	for i := 0; i < count; i++ {
+		addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
+		if err != nil {
+			return nil, err
+		}
+
+		l, err := net.ListenTCP("tcp", addr)
+		if err != nil {
+			return nil, err
+		}
+		defer l.Close()
+		ports = append(ports, l.Addr().(*net.TCPAddr).Port)
+	}
+	return ports, nil
+}
diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md
index be1f52a..4ee388e 100644
--- a/vendor/github.com/pierrec/lz4/README.md
+++ b/vendor/github.com/pierrec/lz4/README.md
@@ -83,24 +83,8 @@
 
 ## Contributors
 
-Thanks to all contributors so far:
+Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors)  so far!
 
-- [@klauspost](https://github.com/klauspost)
-- [@heidawei](https://github.com/heidawei)
-- [@x4m](https://github.com/x4m)
-- [@Zariel](https://github.com/Zariel)
-- [@edwingeng](https://github.com/edwingeng)
-- [@danielmoy-google](https://github.com/danielmoy-google)
-- [@honda-tatsuya](https://github.com/honda-tatsuya)
-- [@h8liu](https://github.com/h8liu)
-- [@sbinet](https://github.com/sbinet)
-- [@fingon](https://github.com/fingon)
-- [@emfree](https://github.com/emfree)
-- [@lhemala](https://github.com/lhemala)
-- [@connor4312](https://github.com/connor4312)
-- [@oov](https://github.com/oov)
-- [@arya](https://github.com/arya)
-- [@ikkeps](https://github.com/ikkeps)
+Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder.
 
-Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder
-Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code
+Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code.
diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go
index 5755cda..664d9be 100644
--- a/vendor/github.com/pierrec/lz4/block.go
+++ b/vendor/github.com/pierrec/lz4/block.go
@@ -2,8 +2,8 @@
 
 import (
 	"encoding/binary"
-	"fmt"
 	"math/bits"
+	"sync"
 )
 
 // blockHash hashes the lower 6 bytes into a value < htSize.
@@ -35,24 +35,31 @@
 
 // CompressBlock compresses the source buffer into the destination one.
 // This is the fast version of LZ4 compression and also the default one.
-// The size of hashTable must be at least 64Kb.
 //
-// The size of the compressed data is returned. If it is 0 and no error, then the data is incompressible.
+// The argument hashTable is scratch space for a hash table used by the
+// compressor. If provided, it should have length at least 1<<16. If it is
+// shorter (or nil), CompressBlock allocates its own hash table.
+//
+// The size of the compressed data is returned.
+//
+// If the destination buffer size is lower than CompressBlockBound and
+// the compressed size is 0 and no error, then the data is incompressible.
 //
 // An error is returned if the destination buffer is too small.
-func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) {
+func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) {
 	defer recoverBlock(&err)
 
+	// Return 0, nil only if the destination buffer size is < CompressBlockBound.
+	isNotCompressible := len(dst) < CompressBlockBound(len(src))
+
 	// adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
-	// This significantly speeds up incompressible data and usually has very small impact on compresssion.
+	// This significantly speeds up incompressible data and usually has very small impact on compression.
 	// bytes to skip =  1 + (bytes since last match >> adaptSkipLog)
 	const adaptSkipLog = 7
-	sn, dn := len(src)-mfLimit, len(dst)
-	if sn <= 0 || dn == 0 {
-		return 0, nil
-	}
 	if len(hashTable) < htSize {
-		return 0, fmt.Errorf("hash table too small, should be at least %d in size", htSize)
+		htIface := htPool.Get()
+		defer htPool.Put(htIface)
+		hashTable = (*(htIface).(*[htSize]int))[:]
 	}
 	// Prove to the compiler the table has at least htSize elements.
 	// The compiler can see that "uint32() >> hashShift" cannot be out of bounds.
@@ -60,7 +67,11 @@
 
 	// si: Current position of the search.
 	// anchor: Position of the current literals.
-	var si, anchor int
+	var si, di, anchor int
+	sn := len(src) - mfLimit
+	if sn <= 0 {
+		goto lastLiterals
+	}
 
 	// Fast scan strategy: the hash table only stores the last 4 bytes sequences.
 	for si < sn {
@@ -124,7 +135,7 @@
 		si, mLen = si+mLen, si+minMatch
 
 		// Find the longest match by looking by batches of 8 bytes.
-		for si < sn {
+		for si+8 < sn {
 			x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:])
 			if x == 0 {
 				si += 8
@@ -184,7 +195,8 @@
 		hashTable[h] = si - 2
 	}
 
-	if anchor == 0 {
+lastLiterals:
+	if isNotCompressible && anchor == 0 {
 		// Incompressible.
 		return 0, nil
 	}
@@ -205,7 +217,7 @@
 	di++
 
 	// Write the last literals.
-	if di >= anchor {
+	if isNotCompressible && di >= anchor {
 		// Incompressible.
 		return 0, nil
 	}
@@ -213,6 +225,13 @@
 	return di, nil
 }
 
+// Pool of hash tables for CompressBlock.
+var htPool = sync.Pool{
+	New: func() interface{} {
+		return new([htSize]int)
+	},
+}
+
 // blockHash hashes 4 bytes into a value < winSize.
 func blockHashHC(x uint32) uint32 {
 	const hasher uint32 = 2654435761 // Knuth multiplicative hash.
@@ -224,22 +243,24 @@
 //
 // CompressBlockHC compression ratio is better than CompressBlock but it is also slower.
 //
-// The size of the compressed data is returned. If it is 0 and no error, then the data is not compressible.
+// The size of the compressed data is returned.
+//
+// If the destination buffer size is lower than CompressBlockBound and
+// the compressed size is 0 and no error, then the data is incompressible.
 //
 // An error is returned if the destination buffer is too small.
-func CompressBlockHC(src, dst []byte, depth int) (di int, err error) {
+func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) {
 	defer recoverBlock(&err)
 
+	// Return 0, nil only if the destination buffer size is < CompressBlockBound.
+	isNotCompressible := len(dst) < CompressBlockBound(len(src))
+
 	// adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
-	// This significantly speeds up incompressible data and usually has very small impact on compresssion.
+	// This significantly speeds up incompressible data and usually has very small impact on compression.
 	// bytes to skip =  1 + (bytes since last match >> adaptSkipLog)
 	const adaptSkipLog = 7
 
-	sn, dn := len(src)-mfLimit, len(dst)
-	if sn <= 0 || dn == 0 {
-		return 0, nil
-	}
-	var si int
+	var si, di, anchor int
 
 	// hashTable: stores the last position found for a given hash
 	// chainTable: stores previous positions for a given hash
@@ -249,7 +270,11 @@
 		depth = winSize
 	}
 
-	anchor := si
+	sn := len(src) - mfLimit
+	if sn <= 0 {
+		goto lastLiterals
+	}
+
 	for si < sn {
 		// Hash the next 4 bytes (sequence).
 		match := binary.LittleEndian.Uint32(src[si:])
@@ -356,12 +381,13 @@
 		}
 	}
 
-	if anchor == 0 {
+	if isNotCompressible && anchor == 0 {
 		// Incompressible.
 		return 0, nil
 	}
 
 	// Last literals.
+lastLiterals:
 	lLen := len(src) - anchor
 	if lLen < 0xF {
 		dst[di] = byte(lLen << 4)
@@ -378,7 +404,7 @@
 	di++
 
 	// Write the last literals.
-	if di >= anchor {
+	if isNotCompressible && di >= anchor {
 		// Incompressible.
 		return 0, nil
 	}
diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go
index cdbf961..a3284bd 100644
--- a/vendor/github.com/pierrec/lz4/lz4.go
+++ b/vendor/github.com/pierrec/lz4/lz4.go
@@ -10,14 +10,20 @@
 //
 package lz4
 
+import (
+	"math/bits"
+	"sync"
+)
+
 const (
 	// Extension is the LZ4 frame file name extension
 	Extension = ".lz4"
 	// Version is the LZ4 frame format version
 	Version = 1
 
-	frameMagic     uint32 = 0x184D2204
-	frameSkipMagic uint32 = 0x184D2A50
+	frameMagic       uint32 = 0x184D2204
+	frameSkipMagic   uint32 = 0x184D2A50
+	frameMagicLegacy uint32 = 0x184C2102
 
 	// The following constants are used to setup the compression algorithm.
 	minMatch            = 4  // the minimum size of the match sequence size (4 bytes)
@@ -34,28 +40,67 @@
 	hashLog = 16
 	htSize  = 1 << hashLog
 
-	mfLimit = 8 + minMatch // The last match cannot start within the last 12 bytes.
+	mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes.
 )
 
 // map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb.
 const (
-	blockSize64K  = 64 << 10
-	blockSize256K = 256 << 10
-	blockSize1M   = 1 << 20
-	blockSize4M   = 4 << 20
+	blockSize64K = 1 << (16 + 2*iota)
+	blockSize256K
+	blockSize1M
+	blockSize4M
 )
 
 var (
-	bsMapID    = map[byte]int{4: blockSize64K, 5: blockSize256K, 6: blockSize1M, 7: blockSize4M}
-	bsMapValue = map[int]byte{blockSize64K: 4, blockSize256K: 5, blockSize1M: 6, blockSize4M: 7}
+	// Keep a pool of buffers for each valid block sizes.
+	bsMapValue = [...]*sync.Pool{
+		newBufferPool(2 * blockSize64K),
+		newBufferPool(2 * blockSize256K),
+		newBufferPool(2 * blockSize1M),
+		newBufferPool(2 * blockSize4M),
+	}
 )
 
+// newBufferPool returns a pool for buffers of the given size.
+func newBufferPool(size int) *sync.Pool {
+	return &sync.Pool{
+		New: func() interface{} {
+			return make([]byte, size)
+		},
+	}
+}
+
+// getBuffer returns a buffer to its pool.
+func getBuffer(size int) []byte {
+	idx := blockSizeValueToIndex(size) - 4
+	return bsMapValue[idx].Get().([]byte)
+}
+
+// putBuffer returns a buffer to its pool.
+func putBuffer(size int, buf []byte) {
+	if cap(buf) > 0 {
+		idx := blockSizeValueToIndex(size) - 4
+		bsMapValue[idx].Put(buf[:cap(buf)])
+	}
+}
+func blockSizeIndexToValue(i byte) int {
+	return 1 << (16 + 2*uint(i))
+}
+func isValidBlockSize(size int) bool {
+	const blockSizeMask = blockSize64K | blockSize256K | blockSize1M | blockSize4M
+
+	return size&blockSizeMask > 0 && bits.OnesCount(uint(size)) == 1
+}
+func blockSizeValueToIndex(size int) byte {
+	return 4 + byte(bits.TrailingZeros(uint(size)>>16)/2)
+}
+
 // Header describes the various flags that can be set on a Writer or obtained from a Reader.
 // The default values match those of the LZ4 frame format definition
 // (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html).
 //
 // NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls.
-// It is the caller responsibility to check them if necessary.
+// It is the caller's responsibility to check them if necessary.
 type Header struct {
 	BlockChecksum    bool   // Compressed blocks checksum flag.
 	NoChecksum       bool   // Frame checksum flag.
@@ -64,3 +109,8 @@
 	CompressionLevel int    // Compression level (higher is better, use 0 for fastest compression).
 	done             bool   // Header processed flag (Read or Write and checked).
 }
+
+// Reset reset internal status
+func (h *Header) Reset() {
+	h.done = false
+}
diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go
index 126b792..87dd72b 100644
--- a/vendor/github.com/pierrec/lz4/reader.go
+++ b/vendor/github.com/pierrec/lz4/reader.go
@@ -88,10 +88,10 @@
 	z.NoChecksum = b>>2&1 == 0
 
 	bmsID := buf[1] >> 4 & 0x7
-	bSize, ok := bsMapID[bmsID]
-	if !ok {
+	if bmsID < 4 || bmsID > 7 {
 		return fmt.Errorf("lz4: invalid block max size ID: %d", bmsID)
 	}
+	bSize := blockSizeIndexToValue(bmsID - 4)
 	z.BlockMaxSize = bSize
 
 	// Allocate the compressed/uncompressed buffers.
diff --git a/vendor/github.com/pierrec/lz4/reader_legacy.go b/vendor/github.com/pierrec/lz4/reader_legacy.go
new file mode 100644
index 0000000..1670a77
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/reader_legacy.go
@@ -0,0 +1,207 @@
+package lz4
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+)
+
+// ReaderLegacy implements the LZ4Demo frame decoder.
+// The Header is set after the first call to Read().
+type ReaderLegacy struct {
+	Header
+	// Handler called when a block has been successfully read.
+	// It provides the number of bytes read.
+	OnBlockDone func(size int)
+
+	lastBlock bool
+	buf       [8]byte   // Scrap buffer.
+	pos       int64     // Current position in src.
+	src       io.Reader // Source.
+	zdata     []byte    // Compressed data.
+	data      []byte    // Uncompressed data.
+	idx       int       // Index of unread bytes into data.
+	skip      int64     // Bytes to skip before next read.
+	dpos      int64     // Position in dest
+}
+
+// NewReaderLegacy returns a new LZ4Demo frame decoder.
+// No access to the underlying io.Reader is performed.
+func NewReaderLegacy(src io.Reader) *ReaderLegacy {
+	r := &ReaderLegacy{src: src}
+	return r
+}
+
+// readHeader checks the frame magic number and parses the frame descriptoz.
+// Skippable frames are supported even as a first frame although the LZ4
+// specifications recommends skippable frames not to be used as first frames.
+func (z *ReaderLegacy) readLegacyHeader() error {
+	z.lastBlock = false
+	magic, err := z.readUint32()
+	if err != nil {
+		z.pos += 4
+		if err == io.ErrUnexpectedEOF {
+			return io.EOF
+		}
+		return err
+	}
+	if magic != frameMagicLegacy {
+		return ErrInvalid
+	}
+	z.pos += 4
+
+	// Legacy has fixed 8MB blocksizes
+	// https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame
+	bSize := blockSize4M * 2
+
+	// Allocate the compressed/uncompressed buffers.
+	// The compressed buffer cannot exceed the uncompressed one.
+	if n := 2 * bSize; cap(z.zdata) < n {
+		z.zdata = make([]byte, n, n)
+	}
+	if debugFlag {
+		debug("header block max size size=%d", bSize)
+	}
+	z.zdata = z.zdata[:bSize]
+	z.data = z.zdata[:cap(z.zdata)][bSize:]
+	z.idx = len(z.data)
+
+	z.Header.done = true
+	if debugFlag {
+		debug("header read: %v", z.Header)
+	}
+
+	return nil
+}
+
+// Read decompresses data from the underlying source into the supplied buffer.
+//
+// Since there can be multiple streams concatenated, Header values may
+// change between calls to Read(). If that is the case, no data is actually read from
+// the underlying io.Reader, to allow for potential input buffer resizing.
+func (z *ReaderLegacy) Read(buf []byte) (int, error) {
+	if debugFlag {
+		debug("Read buf len=%d", len(buf))
+	}
+	if !z.Header.done {
+		if err := z.readLegacyHeader(); err != nil {
+			return 0, err
+		}
+		if debugFlag {
+			debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d",
+				len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx)
+		}
+	}
+
+	if len(buf) == 0 {
+		return 0, nil
+	}
+
+	if z.idx == len(z.data) {
+		// No data ready for reading, process the next block.
+		if debugFlag {
+			debug("  reading block from writer %d %d", z.idx, blockSize4M*2)
+		}
+
+		// Reset uncompressed buffer
+		z.data = z.zdata[:cap(z.zdata)][len(z.zdata):]
+
+		bLen, err := z.readUint32()
+		if err != nil {
+			return 0, err
+		}
+		if debugFlag {
+			debug("   bLen %d (0x%x) offset = %d (0x%x)", bLen, bLen, z.pos, z.pos)
+		}
+		z.pos += 4
+
+		// Legacy blocks are always compressed, even when detrimental
+		if debugFlag {
+			debug("   compressed block size %d", bLen)
+		}
+
+		if int(bLen) > cap(z.data) {
+			return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
+		}
+		zdata := z.zdata[:bLen]
+		if _, err := io.ReadFull(z.src, zdata); err != nil {
+			return 0, err
+		}
+		z.pos += int64(bLen)
+
+		n, err := UncompressBlock(zdata, z.data)
+		if err != nil {
+			return 0, err
+		}
+
+		z.data = z.data[:n]
+		if z.OnBlockDone != nil {
+			z.OnBlockDone(n)
+		}
+
+		z.idx = 0
+
+		// Legacy blocks are fixed to 8MB, if we read a decompressed block smaller than this
+		// it means we've reached the end...
+		if n < blockSize4M*2 {
+			z.lastBlock = true
+		}
+	}
+
+	if z.skip > int64(len(z.data[z.idx:])) {
+		z.skip -= int64(len(z.data[z.idx:]))
+		z.dpos += int64(len(z.data[z.idx:]))
+		z.idx = len(z.data)
+		return 0, nil
+	}
+
+	z.idx += int(z.skip)
+	z.dpos += z.skip
+	z.skip = 0
+
+	n := copy(buf, z.data[z.idx:])
+	z.idx += n
+	z.dpos += int64(n)
+	if debugFlag {
+		debug("%v] copied %d bytes to input (%d:%d)", z.lastBlock, n, z.idx, len(z.data))
+	}
+	if z.lastBlock && len(z.data) == z.idx {
+		return n, io.EOF
+	}
+	return n, nil
+}
+
+// Seek implements io.Seeker, but supports seeking forward from the current
+// position only. Any other seek will return an error. Allows skipping output
+// bytes which aren't needed, which in some scenarios is faster than reading
+// and discarding them.
+// Note this may cause future calls to Read() to read 0 bytes if all of the
+// data they would have returned is skipped.
+func (z *ReaderLegacy) Seek(offset int64, whence int) (int64, error) {
+	if offset < 0 || whence != io.SeekCurrent {
+		return z.dpos + z.skip, ErrUnsupportedSeek
+	}
+	z.skip += offset
+	return z.dpos + z.skip, nil
+}
+
+// Reset discards the Reader's state and makes it equivalent to the
+// result of its original state from NewReader, but reading from r instead.
+// This permits reusing a Reader rather than allocating a new one.
+func (z *ReaderLegacy) Reset(r io.Reader) {
+	z.Header = Header{}
+	z.pos = 0
+	z.src = r
+	z.zdata = z.zdata[:0]
+	z.data = z.data[:0]
+	z.idx = 0
+}
+
+// readUint32 reads an uint32 into the supplied buffer.
+// The idea is to make use of the already allocated buffers avoiding additional allocations.
+func (z *ReaderLegacy) readUint32() (uint32, error) {
+	buf := z.buf[:4]
+	_, err := io.ReadFull(z.src, buf)
+	x := binary.LittleEndian.Uint32(buf)
+	return x, err
+}
diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go
index 2cc8d95..6a60a9a 100644
--- a/vendor/github.com/pierrec/lz4/writer.go
+++ b/vendor/github.com/pierrec/lz4/writer.go
@@ -4,10 +4,18 @@
 	"encoding/binary"
 	"fmt"
 	"io"
+	"runtime"
 
 	"github.com/pierrec/lz4/internal/xxh32"
 )
 
+// zResult contains the results of compressing a block.
+type zResult struct {
+	size     uint32 // Block header
+	data     []byte // Compressed data
+	checksum uint32 // Data checksum
+}
+
 // Writer implements the LZ4 frame encoder.
 type Writer struct {
 	Header
@@ -18,10 +26,13 @@
 	buf       [19]byte      // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes
 	dst       io.Writer     // Destination.
 	checksum  xxh32.XXHZero // Frame checksum.
-	zdata     []byte        // Compressed data.
-	data      []byte        // Data to be compressed.
+	data      []byte        // Data to be compressed + buffer for compressed data.
 	idx       int           // Index into data.
 	hashtable [winSize]int  // Hash table used in CompressBlock().
+
+	// For concurrency.
+	c   chan chan zResult // Channel for block compression goroutines and writer goroutine.
+	err error             // Any error encountered while writing to the underlying destination.
 }
 
 // NewWriter returns a new LZ4 frame encoder.
@@ -29,30 +40,92 @@
 // The supplied Header is checked at the first Write.
 // It is ok to change it before the first Write but then not until a Reset() is performed.
 func NewWriter(dst io.Writer) *Writer {
-	return &Writer{dst: dst}
+	z := new(Writer)
+	z.Reset(dst)
+	return z
+}
+
+// WithConcurrency sets the number of concurrent go routines used for compression.
+// A negative value sets the concurrency to GOMAXPROCS.
+func (z *Writer) WithConcurrency(n int) *Writer {
+	switch {
+	case n == 0 || n == 1:
+		z.c = nil
+		return z
+	case n < 0:
+		n = runtime.GOMAXPROCS(0)
+	}
+	z.c = make(chan chan zResult, n)
+	// Writer goroutine managing concurrent block compression goroutines.
+	go func() {
+		// Process next block compression item.
+		for c := range z.c {
+			// Read the next compressed block result.
+			// Waiting here ensures that the blocks are output in the order they were sent.
+			// The incoming channel is always closed as it indicates to the caller that
+			// the block has been processed.
+			res := <-c
+			n := len(res.data)
+			if n == 0 {
+				// Notify the block compression routine that we are done with its result.
+				// This is used when a sentinel block is sent to terminate the compression.
+				close(c)
+				return
+			}
+			// Write the block.
+			if err := z.writeUint32(res.size); err != nil && z.err == nil {
+				z.err = err
+			}
+			if _, err := z.dst.Write(res.data); err != nil && z.err == nil {
+				z.err = err
+			}
+			if z.BlockChecksum {
+				if err := z.writeUint32(res.checksum); err != nil && z.err == nil {
+					z.err = err
+				}
+			}
+			if isCompressed := res.size&compressedBlockFlag == 0; isCompressed {
+				// It is now safe to release the buffer as no longer in use by any goroutine.
+				putBuffer(cap(res.data), res.data)
+			}
+			if h := z.OnBlockDone; h != nil {
+				h(n)
+			}
+			close(c)
+		}
+	}()
+	return z
+}
+
+// newBuffers instantiates new buffers which size matches the one in Header.
+// The returned buffers are for decompression and compression respectively.
+func (z *Writer) newBuffers() {
+	bSize := z.Header.BlockMaxSize
+	buf := getBuffer(bSize)
+	z.data = buf[:bSize] // Uncompressed buffer is the first half.
+}
+
+// freeBuffers puts the writer's buffers back to the pool.
+func (z *Writer) freeBuffers() {
+	// Put the buffer back into the pool, if any.
+	putBuffer(z.Header.BlockMaxSize, z.data)
+	z.data = nil
 }
 
 // writeHeader builds and writes the header (magic+header) to the underlying io.Writer.
 func (z *Writer) writeHeader() error {
 	// Default to 4Mb if BlockMaxSize is not set.
 	if z.Header.BlockMaxSize == 0 {
-		z.Header.BlockMaxSize = bsMapID[7]
+		z.Header.BlockMaxSize = blockSize4M
 	}
 	// The only option that needs to be validated.
 	bSize := z.Header.BlockMaxSize
-	bSizeID, ok := bsMapValue[bSize]
-	if !ok {
+	if !isValidBlockSize(z.Header.BlockMaxSize) {
 		return fmt.Errorf("lz4: invalid block max size: %d", bSize)
 	}
 	// Allocate the compressed/uncompressed buffers.
 	// The compressed buffer cannot exceed the uncompressed one.
-	if cap(z.zdata) < bSize {
-		// Only allocate if there is not enough capacity.
-		// Allocate both buffers at once.
-		z.zdata = make([]byte, 2*bSize)
-	}
-	z.data = z.zdata[:bSize]                 // Uncompressed buffer is the first half.
-	z.zdata = z.zdata[:cap(z.zdata)][bSize:] // Compressed buffer is the second half.
+	z.newBuffers()
 	z.idx = 0
 
 	// Size is optional.
@@ -72,7 +145,7 @@
 		flg |= 1 << 2
 	}
 	buf[4] = flg
-	buf[5] = bSizeID << 4
+	buf[5] = blockSizeValueToIndex(z.Header.BlockMaxSize) << 4
 
 	// Current buffer size: magic(4) + flags(1) + block max size (1).
 	n := 6
@@ -152,28 +225,34 @@
 // compressBlock compresses a block.
 func (z *Writer) compressBlock(data []byte) error {
 	if !z.NoChecksum {
-		z.checksum.Write(data)
+		_, _ = z.checksum.Write(data)
 	}
 
+	if z.c != nil {
+		c := make(chan zResult)
+		z.c <- c // Send now to guarantee order
+		go writerCompressBlock(c, z.Header, data)
+		return nil
+	}
+
+	zdata := z.data[z.Header.BlockMaxSize:cap(z.data)]
 	// The compressed block size cannot exceed the input's.
 	var zn int
-	var err error
 
 	if level := z.Header.CompressionLevel; level != 0 {
-		zn, err = CompressBlockHC(data, z.zdata, level)
+		zn, _ = CompressBlockHC(data, zdata, level)
 	} else {
-		zn, err = CompressBlock(data, z.zdata, z.hashtable[:])
+		zn, _ = CompressBlock(data, zdata, z.hashtable[:])
 	}
 
-	var zdata []byte
 	var bLen uint32
 	if debugFlag {
 		debug("block compression %d => %d", len(data), zn)
 	}
-	if err == nil && zn > 0 && zn < len(data) {
+	if zn > 0 && zn < len(data) {
 		// Compressible and compressed size smaller than uncompressed: ok!
 		bLen = uint32(zn)
-		zdata = z.zdata[:zn]
+		zdata = zdata[:zn]
 	} else {
 		// Uncompressed block.
 		bLen = uint32(len(data)) | compressedBlockFlag
@@ -220,13 +299,35 @@
 		return nil
 	}
 
-	if err := z.compressBlock(z.data[:z.idx]); err != nil {
-		return err
-	}
+	data := z.data[:z.idx]
 	z.idx = 0
+	if z.c == nil {
+		return z.compressBlock(data)
+	}
+	if !z.NoChecksum {
+		_, _ = z.checksum.Write(data)
+	}
+	c := make(chan zResult)
+	z.c <- c
+	writerCompressBlock(c, z.Header, data)
 	return nil
 }
 
+func (z *Writer) close() error {
+	if z.c == nil {
+		return nil
+	}
+	// Send a sentinel block (no data to compress) to terminate the writer main goroutine.
+	c := make(chan zResult)
+	z.c <- c
+	c <- zResult{}
+	// Wait for the main goroutine to complete.
+	<-c
+	// At this point the main goroutine has shut down or is about to return.
+	z.c = nil
+	return z.err
+}
+
 // Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer.
 func (z *Writer) Close() error {
 	if !z.Header.done {
@@ -237,6 +338,10 @@
 	if err := z.Flush(); err != nil {
 		return err
 	}
+	if err := z.close(); err != nil {
+		return err
+	}
+	z.freeBuffers()
 
 	if debugFlag {
 		debug("writing last empty block")
@@ -258,12 +363,19 @@
 // initial state from NewWriter, but instead writing to w.
 // No access to the underlying io.Writer is performed.
 func (z *Writer) Reset(w io.Writer) {
-	z.Header = Header{}
+	n := cap(z.c)
+	_ = z.close()
+	z.freeBuffers()
+	z.Header.Reset()
 	z.dst = w
 	z.checksum.Reset()
-	z.zdata = z.zdata[:0]
-	z.data = z.data[:0]
 	z.idx = 0
+	z.err = nil
+	// reset hashtable to ensure deterministic output.
+	for i := range z.hashtable {
+		z.hashtable[i] = 0
+	}
+	z.WithConcurrency(n)
 }
 
 // writeUint32 writes a uint32 to the underlying writer.
@@ -273,3 +385,29 @@
 	_, err := z.dst.Write(buf)
 	return err
 }
+
+// writerCompressBlock compresses data into a pooled buffer and writes its result
+// out to the input channel.
+func writerCompressBlock(c chan zResult, header Header, data []byte) {
+	zdata := getBuffer(header.BlockMaxSize)
+	// The compressed block size cannot exceed the input's.
+	var zn int
+	if level := header.CompressionLevel; level != 0 {
+		zn, _ = CompressBlockHC(data, zdata, level)
+	} else {
+		var hashTable [winSize]int
+		zn, _ = CompressBlock(data, zdata, hashTable[:])
+	}
+	var res zResult
+	if zn > 0 && zn < len(data) {
+		res.size = uint32(zn)
+		res.data = zdata[:zn]
+	} else {
+		res.size = uint32(len(data)) | compressedBlockFlag
+		res.data = data
+	}
+	if header.BlockChecksum {
+		res.checksum = xxh32.ChecksumZero(res.data)
+	}
+	c <- res
+}
diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml
index d4b9266..9159de0 100644
--- a/vendor/github.com/pkg/errors/.travis.yml
+++ b/vendor/github.com/pkg/errors/.travis.yml
@@ -1,15 +1,10 @@
 language: go
 go_import_path: github.com/pkg/errors
 go:
-  - 1.4.x
-  - 1.5.x
-  - 1.6.x
-  - 1.7.x
-  - 1.8.x
-  - 1.9.x
-  - 1.10.x
   - 1.11.x
+  - 1.12.x
+  - 1.13.x
   - tip
 
 script:
-  - go test -v ./...
+  - make check
diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile
new file mode 100644
index 0000000..ce9d7cd
--- /dev/null
+++ b/vendor/github.com/pkg/errors/Makefile
@@ -0,0 +1,44 @@
+PKGS := github.com/pkg/errors
+SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS))
+GO := go
+
+check: test vet gofmt misspell unconvert staticcheck ineffassign unparam
+
+test: 
+	$(GO) test $(PKGS)
+
+vet: | test
+	$(GO) vet $(PKGS)
+
+staticcheck:
+	$(GO) get honnef.co/go/tools/cmd/staticcheck
+	staticcheck -checks all $(PKGS)
+
+misspell:
+	$(GO) get github.com/client9/misspell/cmd/misspell
+	misspell \
+		-locale GB \
+		-error \
+		*.md *.go
+
+unconvert:
+	$(GO) get github.com/mdempsky/unconvert
+	unconvert -v $(PKGS)
+
+ineffassign:
+	$(GO) get github.com/gordonklaus/ineffassign
+	find $(SRCDIRS) -name '*.go' | xargs ineffassign
+
+pedantic: check errcheck
+
+unparam:
+	$(GO) get mvdan.cc/unparam
+	unparam ./...
+
+errcheck:
+	$(GO) get github.com/kisielk/errcheck
+	errcheck $(PKGS)
+
+gofmt:  
+	@echo Checking code is gofmted
+	@test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)"
diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md
index 6483ba2..54dfdcb 100644
--- a/vendor/github.com/pkg/errors/README.md
+++ b/vendor/github.com/pkg/errors/README.md
@@ -41,11 +41,18 @@
 
 [Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
 
+## Roadmap
+
+With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows:
+
+- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible)
+- 1.0. Final release.
+
 ## Contributing
 
-We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high.
+Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports. 
 
-Before proposing a change, please discuss your change by raising an issue.
+Before sending a PR, please discuss your change by raising an issue.
 
 ## License
 
diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go
index 7421f32..161aea2 100644
--- a/vendor/github.com/pkg/errors/errors.go
+++ b/vendor/github.com/pkg/errors/errors.go
@@ -82,7 +82,7 @@
 //
 //     if err, ok := err.(stackTracer); ok {
 //             for _, f := range err.StackTrace() {
-//                     fmt.Printf("%+s:%d", f)
+//                     fmt.Printf("%+s:%d\n", f, f)
 //             }
 //     }
 //
@@ -159,6 +159,9 @@
 
 func (w *withStack) Cause() error { return w.error }
 
+// Unwrap provides compatibility for Go 1.13 error chains.
+func (w *withStack) Unwrap() error { return w.error }
+
 func (w *withStack) Format(s fmt.State, verb rune) {
 	switch verb {
 	case 'v':
@@ -241,6 +244,9 @@
 func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
 func (w *withMessage) Cause() error  { return w.cause }
 
+// Unwrap provides compatibility for Go 1.13 error chains.
+func (w *withMessage) Unwrap() error { return w.cause }
+
 func (w *withMessage) Format(s fmt.State, verb rune) {
 	switch verb {
 	case 'v':
diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go
new file mode 100644
index 0000000..be0d10d
--- /dev/null
+++ b/vendor/github.com/pkg/errors/go113.go
@@ -0,0 +1,38 @@
+// +build go1.13
+
+package errors
+
+import (
+	stderrors "errors"
+)
+
+// Is reports whether any error in err's chain matches target.
+//
+// The chain consists of err itself followed by the sequence of errors obtained by
+// repeatedly calling Unwrap.
+//
+// An error is considered to match a target if it is equal to that target or if
+// it implements a method Is(error) bool such that Is(target) returns true.
+func Is(err, target error) bool { return stderrors.Is(err, target) }
+
+// As finds the first error in err's chain that matches target, and if so, sets
+// target to that error value and returns true.
+//
+// The chain consists of err itself followed by the sequence of errors obtained by
+// repeatedly calling Unwrap.
+//
+// An error matches target if the error's concrete value is assignable to the value
+// pointed to by target, or if the error has a method As(interface{}) bool such that
+// As(target) returns true. In the latter case, the As method is responsible for
+// setting target.
+//
+// As will panic if target is not a non-nil pointer to either a type that implements
+// error, or to any interface type. As returns false if err is nil.
+func As(err error, target interface{}) bool { return stderrors.As(err, target) }
+
+// Unwrap returns the result of calling the Unwrap method on err, if err's
+// type contains an Unwrap method returning error.
+// Otherwise, Unwrap returns nil.
+func Unwrap(err error) error {
+	return stderrors.Unwrap(err)
+}
diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go
index 2874a04..779a834 100644
--- a/vendor/github.com/pkg/errors/stack.go
+++ b/vendor/github.com/pkg/errors/stack.go
@@ -5,10 +5,13 @@
 	"io"
 	"path"
 	"runtime"
+	"strconv"
 	"strings"
 )
 
 // Frame represents a program counter inside a stack frame.
+// For historical reasons if Frame is interpreted as a uintptr
+// its value represents the program counter + 1.
 type Frame uintptr
 
 // pc returns the program counter for this frame;
@@ -37,6 +40,15 @@
 	return line
 }
 
+// name returns the name of this function, if known.
+func (f Frame) name() string {
+	fn := runtime.FuncForPC(f.pc())
+	if fn == nil {
+		return "unknown"
+	}
+	return fn.Name()
+}
+
 // Format formats the frame according to the fmt.Formatter interface.
 //
 //    %s    source file
@@ -54,22 +66,16 @@
 	case 's':
 		switch {
 		case s.Flag('+'):
-			pc := f.pc()
-			fn := runtime.FuncForPC(pc)
-			if fn == nil {
-				io.WriteString(s, "unknown")
-			} else {
-				file, _ := fn.FileLine(pc)
-				fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file)
-			}
+			io.WriteString(s, f.name())
+			io.WriteString(s, "\n\t")
+			io.WriteString(s, f.file())
 		default:
 			io.WriteString(s, path.Base(f.file()))
 		}
 	case 'd':
-		fmt.Fprintf(s, "%d", f.line())
+		io.WriteString(s, strconv.Itoa(f.line()))
 	case 'n':
-		name := runtime.FuncForPC(f.pc()).Name()
-		io.WriteString(s, funcname(name))
+		io.WriteString(s, funcname(f.name()))
 	case 'v':
 		f.Format(s, 's')
 		io.WriteString(s, ":")
@@ -77,6 +83,16 @@
 	}
 }
 
+// MarshalText formats a stacktrace Frame as a text string. The output is the
+// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs.
+func (f Frame) MarshalText() ([]byte, error) {
+	name := f.name()
+	if name == "unknown" {
+		return []byte(name), nil
+	}
+	return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil
+}
+
 // StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
 type StackTrace []Frame
 
@@ -94,18 +110,32 @@
 		switch {
 		case s.Flag('+'):
 			for _, f := range st {
-				fmt.Fprintf(s, "\n%+v", f)
+				io.WriteString(s, "\n")
+				f.Format(s, verb)
 			}
 		case s.Flag('#'):
 			fmt.Fprintf(s, "%#v", []Frame(st))
 		default:
-			fmt.Fprintf(s, "%v", []Frame(st))
+			st.formatSlice(s, verb)
 		}
 	case 's':
-		fmt.Fprintf(s, "%s", []Frame(st))
+		st.formatSlice(s, verb)
 	}
 }
 
+// formatSlice will format this StackTrace into the given buffer as a slice of
+// Frame, only valid when called with '%s' or '%v'.
+func (st StackTrace) formatSlice(s fmt.State, verb rune) {
+	io.WriteString(s, "[")
+	for i, f := range st {
+		if i > 0 {
+			io.WriteString(s, " ")
+		}
+		f.Format(s, verb)
+	}
+	io.WriteString(s, "]")
+}
+
 // stack represents a stack of program counters.
 type stack []uintptr
 
diff --git a/vendor/github.com/rcrowley/go-metrics/.travis.yml b/vendor/github.com/rcrowley/go-metrics/.travis.yml
index aead076..ce9afea 100644
--- a/vendor/github.com/rcrowley/go-metrics/.travis.yml
+++ b/vendor/github.com/rcrowley/go-metrics/.travis.yml
@@ -11,6 +11,9 @@
     - "1.10"
     - "1.11"
     - "1.12"
+    - "1.13"
+    - "1.14"
+    - "1.15"
 
 script:
     - ./validate.sh
diff --git a/vendor/github.com/uber/jaeger-client-go/.travis.yml b/vendor/github.com/uber/jaeger-client-go/.travis.yml
index e81cc88..435aea1 100644
--- a/vendor/github.com/uber/jaeger-client-go/.travis.yml
+++ b/vendor/github.com/uber/jaeger-client-go/.travis.yml
@@ -7,26 +7,26 @@
 
 matrix:
   include:
-  - go: 1.13.x
-    env:
-    - TESTS=true
-    - USE_DEP=true
-    - COVERAGE=true
-  - go: 1.13.x
+  # - go: 1.15.x
+  #   env:
+  #   - TESTS=true
+  #   - USE_DEP=true
+  #   - COVERAGE=true
+  - go: 1.15.x
     env:
     - USE_DEP=true
     - CROSSDOCK=true
-  - go: 1.13.x
-    env:
-    - TESTS=true
-    - USE_DEP=false
-    - USE_GLIDE=true
+  # - go: 1.15.x
+  #   env:
+  #   - TESTS=true
+  #   - USE_DEP=false
+  #   - USE_GLIDE=true
   # test with previous version of Go
-  - go: 1.12.x
-    env:
-    - TESTS=true
-    - USE_DEP=true
-    - CI_SKIP_LINT=true
+  # - go: 1.14.x
+  #   env:
+  #   - TESTS=true
+  #   - USE_DEP=true
+  #   - CI_SKIP_LINT=true
 
 services:
   - docker
diff --git a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
index 944feb2..956790e 100644
--- a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
+++ b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
@@ -1,6 +1,66 @@
 Changes by Version
 ==================
 
+2.29.2 (unreleased)
+-------------------
+- Nothing yet.
+
+
+2.29.1 (2021-05-24)
+-------------------
+- Remove dependency on "testing" in "thrift" (#586) -- @yurishkuro
+
+
+2.29.0 (2021-05-20)
+-------------------
+- Update vendored thrift to 0.14.1 (#584) -- @nhatthm
+
+
+2.28.0 (2021-04-30)
+-------------------
+- HTTPSamplingStrategyFetcher: Use http client with 10 second timeout (#578) -- Joe Elliott
+
+
+2.27.0 (2021-04-19)
+-------------------
+- Don't override HTTP Reporter batch size to 1; default to 100, user can override (#571) -- R. Aidan Campbell
+
+
+2.26.0 (2021-04-16)
+-------------------
+- Delete a baggage item when value is blank (#562) -- evan.kim
+- Trim baggage key when parsing (#566) -- sicong.huang
+- feat: extend configuration to support custom randomNumber func (#555) -- NemoO_o
+- Support JAEGER_TRACEID_128BIT env var (#547) -- Yuri Shkuro
+- Additional context protections (#544) -- Joe Elliott
+- Lock RemotelyControlledSampler.sampler on callbacks (#543) -- Dima
+- Upgrade build to Go 1.15 (#539) -- Yuri Shkuro
+- Upgrade to jaeger-lib@2.3.0 to fix broken codahale/hdrhistogram dependency (#537) -- Yuri Shkuro
+- Prefix TraceID/SpanID.String() with zeroes (#533) -- Lukas Vogel
+- Upgrade to OpenTracing Go 1.2 (#525) -- Yuri Shkuro
+
+
+2.25.0 (2020-07-13)
+-------------------
+## Breaking changes
+- [feat] Periodically re-resolve UDP server address, with opt-out (#520) -- Trevor Foster
+
+  The re-resolving of UDP address is now enabled by default, to make the client more robust in Kubernetes deployments.
+  The old resolve-once behavior can be restored by setting DisableAttemptReconnecting=true in the Configuration struct,
+  or via JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED=true environment variable.
+
+## Bug fixes
+- Do not add invalid context to references (#521) -- Yuri Shkuro
+
+
+2.24.0 (2020-06-14)
+-------------------
+- Mention FromEnv() in the README, docs, and examples (#518) -- Martin Lercher
+- Serialize access to RemotelyControlledSampler.sampler (#515) -- Dima
+- Override reporter config only when agent host/port is set in env (#513) -- ilylia
+- Converge on JAEGER_SAMPLING_ENDPOINT env variable (#511) -- Eundoo Song
+
+
 2.23.1 (2020-04-28)
 -------------------
 - Fix regression by handling nil logger correctly ([#507](https://github.com/jaegertracing/jaeger-client-go/pull/507)) -- Prithvi Raj
diff --git a/vendor/github.com/uber/jaeger-client-go/CODEOWNERS b/vendor/github.com/uber/jaeger-client-go/CODEOWNERS
new file mode 100644
index 0000000..0572efc
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/CODEOWNERS
@@ -0,0 +1,2 @@
+
+* @jaegertracing/jaeger-maintainers
diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.lock b/vendor/github.com/uber/jaeger-client-go/Gopkg.lock
index 2a5215a..268289b 100644
--- a/vendor/github.com/uber/jaeger-client-go/Gopkg.lock
+++ b/vendor/github.com/uber/jaeger-client-go/Gopkg.lock
@@ -2,12 +2,12 @@
 
 
 [[projects]]
-  digest = "1:9f3b30d9f8e0d7040f729b82dcbc8f0dead820a133b3147ce355fc451f32d761"
-  name = "github.com/BurntSushi/toml"
+  digest = "1:4c4c33075b704791d6a7f09dfb55c66769e8a1dc6adf87026292d274fe8ad113"
+  name = "github.com/HdrHistogram/hdrhistogram-go"
   packages = ["."]
   pruneopts = "UT"
-  revision = "3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005"
-  version = "v0.3.1"
+  revision = "3a0bb77429bd3a61596f5e8a3172445844342120"
+  version = "0.9.0"
 
 [[projects]]
   digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d"
@@ -19,14 +19,6 @@
 
 [[projects]]
   branch = "master"
-  digest = "1:4c4c33075b704791d6a7f09dfb55c66769e8a1dc6adf87026292d274fe8ad113"
-  name = "github.com/codahale/hdrhistogram"
-  packages = ["."]
-  pruneopts = "UT"
-  revision = "3a0bb77429bd3a61596f5e8a3172445844342120"
-
-[[projects]]
-  branch = "master"
   digest = "1:a382acd6150713655ded76ab5fbcbc7924a7808dab4312dda5d1f23dd8ce5277"
   name = "github.com/crossdock/crossdock-go"
   packages = [
@@ -50,16 +42,22 @@
   name = "github.com/golang/mock"
   packages = ["gomock"]
   pruneopts = "UT"
-  revision = "3a35fb6e3e18b9dbfee291262260dee7372d2a92"
-  version = "v1.4.3"
+  revision = "f7b1909c82a8958747e5c87c6a5c3b2eaed8a33d"
+  version = "v1.4.4"
 
 [[projects]]
-  digest = "1:573ca21d3669500ff845bdebee890eb7fc7f0f50c59f2132f2a0c6b03d85086a"
+  digest = "1:4a32eb57407190eced21a21abee9ce4d4ab6f0bf113ca61cb1cb2d549a65c985"
   name = "github.com/golang/protobuf"
-  packages = ["proto"]
+  packages = [
+    "proto",
+    "ptypes",
+    "ptypes/any",
+    "ptypes/duration",
+    "ptypes/timestamp",
+  ]
   pruneopts = "UT"
-  revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7"
-  version = "v1.3.2"
+  revision = "d04d7b157bb510b1e0c10132224b616ac0e26b17"
+  version = "v1.4.2"
 
 [[projects]]
   digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc"
@@ -70,7 +68,7 @@
   version = "v1.0.1"
 
 [[projects]]
-  digest = "1:727b8f567a30d0739d6c26b9472b3422b351c93cf62095164c845a54b16fc18e"
+  digest = "1:fe5217d44ae8fb84f711968816fe50077cea9dfa8f44425b8e44e7e3de896d01"
   name = "github.com/opentracing/opentracing-go"
   packages = [
     ".",
@@ -79,16 +77,16 @@
     "log",
   ]
   pruneopts = "UT"
-  revision = "659c90643e714681897ec2521c60567dd21da733"
-  version = "v1.1.0"
+  revision = "d34af3eaa63c4d08ab54863a4bdd0daa45212e12"
+  version = "v1.2.0"
 
 [[projects]]
-  digest = "1:cf31692c14422fa27c83a05292eb5cbe0fb2775972e8f1f8446a71549bd8980b"
+  digest = "1:9e1d37b58d17113ec3cb5608ac0382313c5b59470b94ed97d0976e69c7022314"
   name = "github.com/pkg/errors"
   packages = ["."]
   pruneopts = "UT"
-  revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4"
-  version = "v0.8.1"
+  revision = "614d223910a179a466c1767a985424175c39b465"
+  version = "v0.9.1"
 
 [[projects]]
   digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
@@ -110,15 +108,15 @@
   version = "v1.1.0"
 
 [[projects]]
-  branch = "master"
-  digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4"
+  digest = "1:0db23933b8052702d980a3f029149b3f175f7c0eea0cff85b175017d0f2722c0"
   name = "github.com/prometheus/client_model"
   packages = ["go"]
   pruneopts = "UT"
-  revision = "14fe0d1b01d4d5fc031dd4bec1823bd3ebbe8016"
+  revision = "7bc5445566f0fe75b15de23e6b93886e982d7bf9"
+  version = "v0.2.0"
 
 [[projects]]
-  digest = "1:f119e3205d3a1f0f19dbd7038eb37528e2c6f0933269dc344e305951fb87d632"
+  digest = "1:4407525bde4e6ad9c1f60113d38cbb255d769e0ea506c8cf877db39da7753b3a"
   name = "github.com/prometheus/common"
   packages = [
     "expfmt",
@@ -126,11 +124,11 @@
     "model",
   ]
   pruneopts = "UT"
-  revision = "287d3e634a1e550c9e463dd7e5a75a422c614505"
-  version = "v0.7.0"
+  revision = "317b7b125e8fddda956d0c9574e5f03f438ed5bc"
+  version = "v0.14.0"
 
 [[projects]]
-  digest = "1:a210815b437763623ecca8eb91e6a0bf4f2d6773c5a6c9aec0e28f19e5fd6deb"
+  digest = "1:b2268435af85ee1a0fca0e37de4225f78e2d9d8b0b66acde3a29f127634efa87"
   name = "github.com/prometheus/procfs"
   packages = [
     ".",
@@ -138,23 +136,32 @@
     "internal/util",
   ]
   pruneopts = "UT"
-  revision = "499c85531f756d1129edd26485a5f73871eeb308"
-  version = "v0.0.5"
+  revision = "9dece15c53cd5e9fbfbd72d5108adcf526a3f486"
+  version = "v0.2.0"
 
 [[projects]]
-  digest = "1:0496f0e99014b7fd0a560c539f51d0882731137b85494142f47e550e4657176a"
+  digest = "1:86ff4af7b6bb3d27c2e89b5ef8c139678acff1cad74a3c5235fc5af6b94fcc9e"
+  name = "github.com/stretchr/objx"
+  packages = ["."]
+  pruneopts = "UT"
+  revision = "35313a95ee26395aa17d366c71a2ccf788fa69b6"
+  version = "v0.3.0"
+
+[[projects]]
+  digest = "1:5201127841a78d84d0ca68a2e564c08e3882c0fb9321a75997ce87926e0d63ea"
   name = "github.com/stretchr/testify"
   packages = [
     "assert",
+    "mock",
     "require",
     "suite",
   ]
   pruneopts = "UT"
-  revision = "221dbe5ed46703ee255b1da0dec05086f5035f62"
-  version = "v1.4.0"
+  revision = "f654a9112bbeac49ca2cd45bfbe11533c4666cf8"
+  version = "v1.6.1"
 
 [[projects]]
-  digest = "1:0ec60ffd594af00ba1660bc746aa0e443d27dd4003dee55f9d08a0b4ff5431a3"
+  digest = "1:4af46f2faea30e52c96ec9ec32bb654d2729579a80d242b0acfa193ad321eb61"
   name = "github.com/uber/jaeger-lib"
   packages = [
     "metrics",
@@ -162,35 +169,27 @@
     "metrics/prometheus",
   ]
   pruneopts = "UT"
-  revision = "a87ae9d84fb038a8d79266298970720be7c80fcd"
-  version = "v2.2.0"
+  revision = "48cc1df63e6be0d63b95677f0d22beb880bce1e4"
+  version = "v2.3.0"
 
 [[projects]]
-  digest = "1:0bdcb0c740d79d400bd3f7946ac22a715c94db62b20bfd2e01cd50693aba0600"
+  digest = "1:7a3de4371d6b68c6f37a0df2c09905664d9de59026c91cbe275aae55f4fe760f"
   name = "go.uber.org/atomic"
   packages = ["."]
   pruneopts = "UT"
-  revision = "9dc4df04d0d1c39369750a9f6c32c39560672089"
-  version = "v1.5.0"
+  revision = "12f27ba2637fa0e13772a4f05fa46a5d18d53182"
+  version = "v1.7.0"
 
 [[projects]]
-  digest = "1:002ebc50f3ef475ac325e1904be931d9dcba6dc6d73b5682afce0c63436e3902"
+  digest = "1:e9eeeabfd025a5e69b9c8e2857d3517ea67e747ae913bcb0a9e1e7bafdb9c298"
   name = "go.uber.org/multierr"
   packages = ["."]
   pruneopts = "UT"
-  revision = "c3fc3d02ec864719d8e25be2d7dde1e35a36aa27"
-  version = "v1.3.0"
+  revision = "3114a8b704d2d28dbacda34a872690aaef66aeed"
+  version = "v1.6.0"
 
 [[projects]]
-  branch = "master"
-  digest = "1:3032e90a153750ea149f68bf081f97ca738f041fba45c41c80737f572ffdf2f4"
-  name = "go.uber.org/tools"
-  packages = ["update-license"]
-  pruneopts = "UT"
-  revision = "2cfd321de3ee5d5f8a5fda2521d1703478334d98"
-
-[[projects]]
-  digest = "1:98a70115729234dc73ee7bb83973cb39cb8fedf278d17df77264382bad0183ec"
+  digest = "1:0398f5f0e2e9233f25fad702f3b323241daf9f876cc869ab259238cf1bced236"
   name = "go.uber.org/zap"
   packages = [
     ".",
@@ -202,19 +201,8 @@
     "zaptest/observer",
   ]
   pruneopts = "UT"
-  revision = "a6015e13fab9b744d96085308ce4e8f11bad1996"
-  version = "v1.12.0"
-
-[[projects]]
-  branch = "master"
-  digest = "1:21d7bad9b7da270fd2d50aba8971a041bd691165c95096a2a4c68db823cbc86a"
-  name = "golang.org/x/lint"
-  packages = [
-    ".",
-    "golint",
-  ]
-  pruneopts = "UT"
-  revision = "16217165b5de779cb6a5e4fc81fa9c1166fda457"
+  revision = "404189cf44aea95b0cd9bddcb0242dd4cf88c510"
+  version = "v1.16.0"
 
 [[projects]]
   branch = "master"
@@ -225,81 +213,65 @@
     "context/ctxhttp",
   ]
   pruneopts = "UT"
-  revision = "0deb6923b6d97481cb43bc1043fe5b72a0143032"
+  revision = "328152dc79b1547da63f950cd4cdd9afd50b2774"
 
 [[projects]]
   branch = "master"
-  digest = "1:5dfb17d45415b7b8927382f53955a66f55f9d9d11557aa82f7f481d642ab247a"
+  digest = "1:1e581fa394685ef0d84008ae04cf3414390c1a700c04846853869cb4ac2fec86"
   name = "golang.org/x/sys"
-  packages = ["windows"]
-  pruneopts = "UT"
-  revision = "f43be2a4598cf3a47be9f94f0c28197ed9eae611"
-
-[[projects]]
-  branch = "master"
-  digest = "1:bae8b3bf837d9d7f601776f37f44e031d46943677beff8fb2eb9c7317d44de2f"
-  name = "golang.org/x/tools"
   packages = [
-    "go/analysis",
-    "go/analysis/passes/inspect",
-    "go/ast/astutil",
-    "go/ast/inspector",
-    "go/buildutil",
-    "go/gcexportdata",
-    "go/internal/gcimporter",
-    "go/internal/packagesdriver",
-    "go/packages",
-    "go/types/objectpath",
-    "go/types/typeutil",
-    "internal/fastwalk",
-    "internal/gopathwalk",
-    "internal/semver",
-    "internal/span",
+    "internal/unsafeheader",
+    "unix",
+    "windows",
   ]
   pruneopts = "UT"
-  revision = "8dbcdeb83d3faec5315146800b375c4962a42fc6"
+  revision = "d9f96fdee20d1e5115ee34ba4016eae6cfb66eb9"
 
 [[projects]]
-  digest = "1:59f10c1537d2199d9115d946927fe31165959a95190849c82ff11e05803528b0"
-  name = "gopkg.in/yaml.v2"
+  digest = "1:fd328c5b52e433ea3ffc891bcc4f94469a82bf478558208db2b386aad8a304a1"
+  name = "google.golang.org/protobuf"
+  packages = [
+    "encoding/prototext",
+    "encoding/protowire",
+    "internal/descfmt",
+    "internal/descopts",
+    "internal/detrand",
+    "internal/encoding/defval",
+    "internal/encoding/messageset",
+    "internal/encoding/tag",
+    "internal/encoding/text",
+    "internal/errors",
+    "internal/fieldsort",
+    "internal/filedesc",
+    "internal/filetype",
+    "internal/flags",
+    "internal/genid",
+    "internal/impl",
+    "internal/mapsort",
+    "internal/pragma",
+    "internal/set",
+    "internal/strs",
+    "internal/version",
+    "proto",
+    "reflect/protoreflect",
+    "reflect/protoregistry",
+    "runtime/protoiface",
+    "runtime/protoimpl",
+    "types/known/anypb",
+    "types/known/durationpb",
+    "types/known/timestamppb",
+  ]
+  pruneopts = "UT"
+  revision = "3f7a61f89bb6813f89d981d1870ed68da0b3c3f1"
+  version = "v1.25.0"
+
+[[projects]]
+  branch = "v3"
+  digest = "1:229cb0f6192914f518cc1241ede6d6f1f458b31debfa18bf3a5c9e4f7b01e24b"
+  name = "gopkg.in/yaml.v3"
   packages = ["."]
   pruneopts = "UT"
-  revision = "f221b8435cfb71e54062f6c6e99e9ade30b124d5"
-  version = "v2.2.4"
-
-[[projects]]
-  digest = "1:131158a88aad1f94854d0aa21a64af2802d0a470fb0f01cb33c04fafd2047111"
-  name = "honnef.co/go/tools"
-  packages = [
-    "arg",
-    "cmd/staticcheck",
-    "config",
-    "deprecated",
-    "facts",
-    "functions",
-    "go/types/typeutil",
-    "internal/cache",
-    "internal/passes/buildssa",
-    "internal/renameio",
-    "internal/sharedcheck",
-    "lint",
-    "lint/lintdsl",
-    "lint/lintutil",
-    "lint/lintutil/format",
-    "loader",
-    "printf",
-    "simple",
-    "ssa",
-    "ssautil",
-    "staticcheck",
-    "staticcheck/vrp",
-    "stylecheck",
-    "unused",
-    "version",
-  ]
-  pruneopts = "UT"
-  revision = "afd67930eec2a9ed3e9b19f684d17a062285f16a"
-  version = "2019.2.3"
+  revision = "eeeca48fe7764f320e4870d231902bf9c1be2c08"
 
 [solve-meta]
   analyzer-name = "dep"
@@ -314,6 +286,7 @@
     "github.com/pkg/errors",
     "github.com/prometheus/client_golang/prometheus",
     "github.com/stretchr/testify/assert",
+    "github.com/stretchr/testify/mock",
     "github.com/stretchr/testify/require",
     "github.com/stretchr/testify/suite",
     "github.com/uber/jaeger-lib/metrics",
diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.toml b/vendor/github.com/uber/jaeger-client-go/Gopkg.toml
index 1fed7f8..3aa307a 100644
--- a/vendor/github.com/uber/jaeger-client-go/Gopkg.toml
+++ b/vendor/github.com/uber/jaeger-client-go/Gopkg.toml
@@ -4,7 +4,7 @@
 
 [[constraint]]
   name = "github.com/opentracing/opentracing-go"
-  version = "^1.1"
+  version = "^1.2"
 
 [[constraint]]
   name = "github.com/prometheus/client_golang"
@@ -20,7 +20,7 @@
 
 [[constraint]]
   name = "github.com/uber/jaeger-lib"
-  version = "^2.2"
+  version = "^2.3"
 
 [[constraint]]
   name = "go.uber.org/zap"
diff --git a/vendor/github.com/uber/jaeger-client-go/Makefile b/vendor/github.com/uber/jaeger-client-go/Makefile
index d5e962c..bb7463c 100644
--- a/vendor/github.com/uber/jaeger-client-go/Makefile
+++ b/vendor/github.com/uber/jaeger-client-go/Makefile
@@ -1,5 +1,6 @@
 PROJECT_ROOT=github.com/uber/jaeger-client-go
-PACKAGES := . $(shell go list ./... | awk -F/ 'NR>1 {print "./"$$4"/..."}' | grep -v -e ./thrift-gen/... -e ./thrift/... | sort -u)
+export GO111MODULE=off
+PACKAGES := . $(shell GO111MODULE=off go list ./... | awk -F/ 'NR>1 {print "./"$$4"/..."}' | grep -v -e ./thrift-gen/... -e ./thrift/... | sort -u)
 # all .go files that don't exist in hidden directories
 ALL_SRC := $(shell find . -name "*.go" | grep -v -e vendor -e thrift-gen -e ./thrift/ \
         -e ".*/\..*" \
@@ -18,8 +19,8 @@
 FMT_LOG=fmt.log
 LINT_LOG=lint.log
 
-THRIFT_VER=0.9.3
-THRIFT_IMG=thrift:$(THRIFT_VER)
+THRIFT_VER=0.14
+THRIFT_IMG=jaegertracing/thrift:$(THRIFT_VER)
 THRIFT=docker run -v "${PWD}:/data" $(THRIFT_IMG) thrift
 THRIFT_GO_ARGS=thrift_import="github.com/apache/thrift/lib/go/thrift"
 THRIFT_GEN_DIR=thrift-gen
@@ -46,22 +47,37 @@
 	./scripts/updateLicenses.sh
 
 .PHONY: lint
-lint:
+lint: vet golint lint-fmt lint-thrift-testing
+
+.PHONY: vet
+vet:
 	$(GOVET) $(PACKAGES)
+
+.PHONY: golint
+golint:
 	@cat /dev/null > $(LINT_LOG)
 	@$(foreach pkg, $(PACKAGES), $(GOLINT) $(pkg) | grep -v crossdock/thrift >> $(LINT_LOG) || true;)
 	@[ ! -s "$(LINT_LOG)" ] || (echo "Lint Failures" | cat - $(LINT_LOG) && false)
+
+.PHONY: lint-fmt
+lint-fmt:
 	@$(GOFMT) -e -s -l $(ALL_SRC) > $(FMT_LOG)
 	./scripts/updateLicenses.sh >> $(FMT_LOG)
 	@[ ! -s "$(FMT_LOG)" ] || (echo "go fmt or license check failures, run 'make fmt'" | cat - $(FMT_LOG) && false)
 
+# make sure thrift/ module does not import "testing"
+.PHONY: lint-thrift-testing
+lint-thrift-testing:
+	@cat /dev/null > $(LINT_LOG)
+	@(grep -rn '"testing"' thrift | grep -v README.md > $(LINT_LOG)) || true
+	@[ ! -s "$(LINT_LOG)" ] || (echo '"thrift" module must not import "testing", see issue #585' | cat - $(LINT_LOG) && false)
 
 .PHONY: install
 install:
 	@echo install: USE_DEP=$(USE_DEP) USE_GLIDE=$(USE_GLIDE)
 ifeq ($(USE_DEP),true)
 	dep version || make install-dep
-	dep ensure
+	dep ensure -vendor-only -v
 endif
 ifeq ($(USE_GLIDE),true)
 	glide --version || go get github.com/Masterminds/glide
@@ -131,4 +147,3 @@
 else
 	make lint
 endif
-
diff --git a/vendor/github.com/uber/jaeger-client-go/README.md b/vendor/github.com/uber/jaeger-client-go/README.md
index 7c348e7..687f578 100644
--- a/vendor/github.com/uber/jaeger-client-go/README.md
+++ b/vendor/github.com/uber/jaeger-client-go/README.md
@@ -44,28 +44,34 @@
 
 ### Environment variables
 
-The tracer can be initialized with values coming from environment variables. None of the env vars are required
-and all of them can be overridden via direct setting of the property on the configuration object.
+The tracer can be initialized with values coming from environment variables, if it is
+[built from a config](https://pkg.go.dev/github.com/uber/jaeger-client-go/config?tab=doc#Configuration.NewTracer)
+that was created via [FromEnv()](https://pkg.go.dev/github.com/uber/jaeger-client-go/config?tab=doc#FromEnv).
+None of the env vars are required and all of them can be overridden via direct setting 
+of the property on the configuration object.
 
 Property| Description
 --- | ---
-JAEGER_SERVICE_NAME | The service name
-JAEGER_AGENT_HOST | The hostname for communicating with agent via UDP
-JAEGER_AGENT_PORT | The port for communicating with agent via UDP
-JAEGER_ENDPOINT | The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces
-JAEGER_USER | Username to send as part of "Basic" authentication to the collector endpoint
-JAEGER_PASSWORD | Password to send as part of "Basic" authentication to the collector endpoint
-JAEGER_REPORTER_LOG_SPANS | Whether the reporter should also log the spans
-JAEGER_REPORTER_MAX_QUEUE_SIZE | The reporter's maximum queue size
-JAEGER_REPORTER_FLUSH_INTERVAL | The reporter's flush interval, with units, e.g. "500ms" or "2s" ([valid units][timeunits])
-JAEGER_SAMPLER_TYPE | The sampler type
-JAEGER_SAMPLER_PARAM | The sampler parameter (number)
-JAEGER_SAMPLER_MANAGER_HOST_PORT | The HTTP endpoint when using the remote sampler, i.e. http://jaeger-agent:5778/sampling
-JAEGER_SAMPLER_MAX_OPERATIONS | The maximum number of operations that the sampler will keep track of
-JAEGER_SAMPLER_REFRESH_INTERVAL | How often the remotely controlled sampler will poll jaeger-agent for the appropriate sampling strategy, with units, e.g. "1m" or "30s" ([valid units][timeunits])
-JAEGER_TAGS | A comma separated list of `name = value` tracer level tags, which get added to all reported spans. The value can also refer to an environment variable using the format `${envVarName:default}`, where the `:default` is optional, and identifies a value to be used if the environment variable cannot be found
-JAEGER_DISABLED | Whether the tracer is disabled or not. If true, the default `opentracing.NoopTracer` is used.
-JAEGER_RPC_METRICS | Whether to store RPC metrics
+JAEGER_SERVICE_NAME | The service name.
+JAEGER_AGENT_HOST | The hostname for communicating with agent via UDP (default `localhost`).
+JAEGER_AGENT_PORT | The port for communicating with agent via UDP (default `6831`).
+JAEGER_ENDPOINT | The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. If specified, the agent host/port are ignored.
+JAEGER_USER | Username to send as part of "Basic" authentication to the collector endpoint.
+JAEGER_PASSWORD | Password to send as part of "Basic" authentication to the collector endpoint.
+JAEGER_REPORTER_LOG_SPANS | Whether the reporter should also log the spans" `true` or `false` (default `false`).
+JAEGER_REPORTER_MAX_QUEUE_SIZE | The reporter's maximum queue size (default `100`).
+JAEGER_REPORTER_FLUSH_INTERVAL | The reporter's flush interval, with units, e.g. `500ms` or `2s` ([valid units][timeunits]; default `1s`).
+JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED | When true, disables udp connection helper that periodically re-resolves the agent's hostname and reconnects if there was a change (default `false`).
+JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL | Controls how often the agent client re-resolves the provided hostname in order to detect address changes ([valid units][timeunits]; default `30s`).
+JAEGER_SAMPLER_TYPE | The sampler type: `remote`, `const`, `probabilistic`, `ratelimiting` (default `remote`). See also https://www.jaegertracing.io/docs/latest/sampling/.
+JAEGER_SAMPLER_PARAM | The sampler parameter (number).
+JAEGER_SAMPLER_MANAGER_HOST_PORT | (deprecated) The HTTP endpoint when using the `remote` sampler.
+JAEGER_SAMPLING_ENDPOINT | The URL for the sampling configuration server when using sampler type `remote` (default `http://127.0.0.1:5778/sampling`).
+JAEGER_SAMPLER_MAX_OPERATIONS | The maximum number of operations that the sampler will keep track of (default `2000`).
+JAEGER_SAMPLER_REFRESH_INTERVAL | How often the `remote` sampler should poll the configuration server for the appropriate sampling strategy, e.g. "1m" or "30s" ([valid units][timeunits]; default `1m`).
+JAEGER_TAGS | A comma separated list of `name=value` tracer-level tags, which get added to all reported spans. The value can also refer to an environment variable using the format `${envVarName:defaultValue}`.
+JAEGER_DISABLED | Whether the tracer is disabled or not. If `true`, the `opentracing.NoopTracer` is used (default `false`).
+JAEGER_RPC_METRICS | Whether to store RPC metrics, `true` or `false` (default `false`).
 
 By default, the client sends traces via UDP to the agent at `localhost:6831`. Use `JAEGER_AGENT_HOST` and
 `JAEGER_AGENT_PORT` to send UDP traces to a different `host:port`. If `JAEGER_ENDPOINT` is set, the client sends traces
diff --git a/vendor/github.com/uber/jaeger-client-go/RELEASE.md b/vendor/github.com/uber/jaeger-client-go/RELEASE.md
index 115e49a..12438d8 100644
--- a/vendor/github.com/uber/jaeger-client-go/RELEASE.md
+++ b/vendor/github.com/uber/jaeger-client-go/RELEASE.md
@@ -2,6 +2,7 @@
 
 1. Create a PR "Preparing for release X.Y.Z" against master branch
     * Alter CHANGELOG.md from `<placeholder_version> (unreleased)` to `<X.Y.Z> (YYYY-MM-DD)`
+    * Use `git log --pretty=format:'- %s -- %an'` as the basis for for changelog entries
     * Update `JaegerClientVersion` in constants.go to `Go-X.Y.Z`
 2. Create a release "Release X.Y.Z" on Github
     * Create Tag `vX.Y.Z`
diff --git a/vendor/github.com/uber/jaeger-client-go/config/config.go b/vendor/github.com/uber/jaeger-client-go/config/config.go
index 44e9353..c2222f1 100644
--- a/vendor/github.com/uber/jaeger-client-go/config/config.go
+++ b/vendor/github.com/uber/jaeger-client-go/config/config.go
@@ -22,6 +22,7 @@
 	"time"
 
 	"github.com/opentracing/opentracing-go"
+	"github.com/uber/jaeger-client-go/utils"
 
 	"github.com/uber/jaeger-client-go"
 	"github.com/uber/jaeger-client-go/internal/baggage/remote"
@@ -36,16 +37,22 @@
 // Configuration configures and creates Jaeger Tracer
 type Configuration struct {
 	// ServiceName specifies the service name to use on the tracer.
-	// Can be provided via environment variable named JAEGER_SERVICE_NAME
+	// Can be provided by FromEnv() via the environment variable named JAEGER_SERVICE_NAME
 	ServiceName string `yaml:"serviceName"`
 
-	// Disabled can be provided via environment variable named JAEGER_DISABLED
+	// Disabled makes the config return opentracing.NoopTracer.
+	// Value can be provided by FromEnv() via the environment variable named JAEGER_DISABLED.
 	Disabled bool `yaml:"disabled"`
 
-	// RPCMetrics can be provided via environment variable named JAEGER_RPC_METRICS
+	// RPCMetrics enables generations of RPC metrics (requires metrics factory to be provided).
+	// Value can be provided by FromEnv() via the environment variable named JAEGER_RPC_METRICS
 	RPCMetrics bool `yaml:"rpc_metrics"`
 
-	// Tags can be provided via environment variable named JAEGER_TAGS
+	// Gen128Bit instructs the tracer to generate 128-bit wide trace IDs, compatible with W3C Trace Context.
+	// Value can be provided by FromEnv() via the environment variable named JAEGER_TRACEID_128BIT.
+	Gen128Bit bool `yaml:"traceid_128bit"`
+
+	// Tags can be provided by FromEnv() via the environment variable named JAEGER_TAGS
 	Tags []opentracing.Tag `yaml:"tags"`
 
 	Sampler             *SamplerConfig             `yaml:"sampler"`
@@ -57,8 +64,8 @@
 
 // SamplerConfig allows initializing a non-default sampler.  All fields are optional.
 type SamplerConfig struct {
-	// Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote
-	// Can be set by exporting an environment variable named JAEGER_SAMPLER_TYPE
+	// Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote.
+	// Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_TYPE
 	Type string `yaml:"type"`
 
 	// Param is a value passed to the sampler.
@@ -69,22 +76,23 @@
 	// - for "remote" sampler, param is the same as for "probabilistic"
 	//   and indicates the initial sampling rate before the actual one
 	//   is received from the mothership.
-	// Can be set by exporting an environment variable named JAEGER_SAMPLER_PARAM
+	// Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_PARAM
 	Param float64 `yaml:"param"`
 
-	// SamplingServerURL is the address of jaeger-agent's HTTP sampling server
-	// Can be set by exporting an environment variable named JAEGER_SAMPLER_MANAGER_HOST_PORT
+	// SamplingServerURL is the URL of sampling manager that can provide
+	// sampling strategy to this service.
+	// Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLING_ENDPOINT
 	SamplingServerURL string `yaml:"samplingServerURL"`
 
 	// SamplingRefreshInterval controls how often the remotely controlled sampler will poll
-	// jaeger-agent for the appropriate sampling strategy.
-	// Can be set by exporting an environment variable named JAEGER_SAMPLER_REFRESH_INTERVAL
+	// sampling manager for the appropriate sampling strategy.
+	// Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_REFRESH_INTERVAL
 	SamplingRefreshInterval time.Duration `yaml:"samplingRefreshInterval"`
 
 	// MaxOperations is the maximum number of operations that the PerOperationSampler
 	// will keep track of. If an operation is not tracked, a default probabilistic
 	// sampler will be used rather than the per operation specific sampler.
-	// Can be set by exporting an environment variable named JAEGER_SAMPLER_MAX_OPERATIONS.
+	// Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_MAX_OPERATIONS.
 	MaxOperations int `yaml:"maxOperations"`
 
 	// Opt-in feature for applications that require late binding of span name via explicit
@@ -105,34 +113,46 @@
 	// QueueSize controls how many spans the reporter can keep in memory before it starts dropping
 	// new spans. The queue is continuously drained by a background go-routine, as fast as spans
 	// can be sent out of process.
-	// Can be set by exporting an environment variable named JAEGER_REPORTER_MAX_QUEUE_SIZE
+	// Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_MAX_QUEUE_SIZE
 	QueueSize int `yaml:"queueSize"`
 
 	// BufferFlushInterval controls how often the buffer is force-flushed, even if it's not full.
 	// It is generally not useful, as it only matters for very low traffic services.
-	// Can be set by exporting an environment variable named JAEGER_REPORTER_FLUSH_INTERVAL
+	// Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_FLUSH_INTERVAL
 	BufferFlushInterval time.Duration
 
 	// LogSpans, when true, enables LoggingReporter that runs in parallel with the main reporter
 	// and logs all submitted spans. Main Configuration.Logger must be initialized in the code
 	// for this option to have any effect.
-	// Can be set by exporting an environment variable named JAEGER_REPORTER_LOG_SPANS
+	// Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_LOG_SPANS
 	LogSpans bool `yaml:"logSpans"`
 
-	// LocalAgentHostPort instructs reporter to send spans to jaeger-agent at this address
-	// Can be set by exporting an environment variable named JAEGER_AGENT_HOST / JAEGER_AGENT_PORT
+	// LocalAgentHostPort instructs reporter to send spans to jaeger-agent at this address.
+	// Can be provided by FromEnv() via the environment variable named JAEGER_AGENT_HOST / JAEGER_AGENT_PORT
 	LocalAgentHostPort string `yaml:"localAgentHostPort"`
 
-	// CollectorEndpoint instructs reporter to send spans to jaeger-collector at this URL
-	// Can be set by exporting an environment variable named JAEGER_ENDPOINT
+	// DisableAttemptReconnecting when true, disables udp connection helper that periodically re-resolves
+	// the agent's hostname and reconnects if there was a change. This option only
+	// applies if LocalAgentHostPort is specified.
+	// Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED
+	DisableAttemptReconnecting bool `yaml:"disableAttemptReconnecting"`
+
+	// AttemptReconnectInterval controls how often the agent client re-resolves the provided hostname
+	// in order to detect address changes. This option only applies if DisableAttemptReconnecting is false.
+	// Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL
+	AttemptReconnectInterval time.Duration
+
+	// CollectorEndpoint instructs reporter to send spans to jaeger-collector at this URL.
+	// Can be provided by FromEnv() via the environment variable named JAEGER_ENDPOINT
 	CollectorEndpoint string `yaml:"collectorEndpoint"`
 
 	// User instructs reporter to include a user for basic http authentication when sending spans to jaeger-collector.
-	// Can be set by exporting an environment variable named JAEGER_USER
+	// Can be provided by FromEnv() via the environment variable named JAEGER_USER
 	User string `yaml:"user"`
 
 	// Password instructs reporter to include a password for basic http authentication when sending spans to
-	// jaeger-collector. Can be set by exporting an environment variable named JAEGER_PASSWORD
+	// jaeger-collector.
+	// Can be provided by FromEnv() via the environment variable named JAEGER_PASSWORD
 	Password string `yaml:"password"`
 
 	// HTTPHeaders instructs the reporter to add these headers to the http request when reporting spans.
@@ -247,13 +267,20 @@
 		jaeger.TracerOptions.Metrics(tracerMetrics),
 		jaeger.TracerOptions.Logger(opts.logger),
 		jaeger.TracerOptions.CustomHeaderKeys(c.Headers),
-		jaeger.TracerOptions.Gen128Bit(opts.gen128Bit),
 		jaeger.TracerOptions.PoolSpans(opts.poolSpans),
 		jaeger.TracerOptions.ZipkinSharedRPCSpan(opts.zipkinSharedRPCSpan),
 		jaeger.TracerOptions.MaxTagValueLength(opts.maxTagValueLength),
 		jaeger.TracerOptions.NoDebugFlagOnForcedSampling(opts.noDebugFlagOnForcedSampling),
 	}
 
+	if c.Gen128Bit || opts.gen128Bit {
+		tracerOptions = append(tracerOptions, jaeger.TracerOptions.Gen128Bit(true))
+	}
+
+	if opts.randomNumber != nil {
+		tracerOptions = append(tracerOptions, jaeger.TracerOptions.RandomNumber(opts.randomNumber))
+	}
+
 	for _, tag := range opts.tags {
 		tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value))
 	}
@@ -382,7 +409,7 @@
 	metrics *jaeger.Metrics,
 	logger jaeger.Logger,
 ) (jaeger.Reporter, error) {
-	sender, err := rc.newTransport()
+	sender, err := rc.newTransport(logger)
 	if err != nil {
 		return nil, err
 	}
@@ -399,15 +426,22 @@
 	return reporter, err
 }
 
-func (rc *ReporterConfig) newTransport() (jaeger.Transport, error) {
+func (rc *ReporterConfig) newTransport(logger jaeger.Logger) (jaeger.Transport, error) {
 	switch {
 	case rc.CollectorEndpoint != "":
-		httpOptions := []transport.HTTPOption{transport.HTTPBatchSize(1), transport.HTTPHeaders(rc.HTTPHeaders)}
+		httpOptions := []transport.HTTPOption{transport.HTTPHeaders(rc.HTTPHeaders)}
 		if rc.User != "" && rc.Password != "" {
 			httpOptions = append(httpOptions, transport.HTTPBasicAuth(rc.User, rc.Password))
 		}
 		return transport.NewHTTPTransport(rc.CollectorEndpoint, httpOptions...), nil
 	default:
-		return jaeger.NewUDPTransport(rc.LocalAgentHostPort, 0)
+		return jaeger.NewUDPTransportWithParams(jaeger.UDPTransportParams{
+			AgentClientUDPParams: utils.AgentClientUDPParams{
+				HostPort:                   rc.LocalAgentHostPort,
+				Logger:                     logger,
+				DisableAttemptReconnecting: rc.DisableAttemptReconnecting,
+				AttemptReconnectInterval:   rc.AttemptReconnectInterval,
+			},
+		})
 	}
 }
diff --git a/vendor/github.com/uber/jaeger-client-go/config/config_env.go b/vendor/github.com/uber/jaeger-client-go/config/config_env.go
index a729bd8..0fc3c53 100644
--- a/vendor/github.com/uber/jaeger-client-go/config/config_env.go
+++ b/vendor/github.com/uber/jaeger-client-go/config/config_env.go
@@ -24,29 +24,32 @@
 
 	"github.com/opentracing/opentracing-go"
 	"github.com/pkg/errors"
-
 	"github.com/uber/jaeger-client-go"
 )
 
 const (
 	// environment variable names
-	envServiceName            = "JAEGER_SERVICE_NAME"
-	envDisabled               = "JAEGER_DISABLED"
-	envRPCMetrics             = "JAEGER_RPC_METRICS"
-	envTags                   = "JAEGER_TAGS"
-	envSamplerType            = "JAEGER_SAMPLER_TYPE"
-	envSamplerParam           = "JAEGER_SAMPLER_PARAM"
-	envSamplerManagerHostPort = "JAEGER_SAMPLER_MANAGER_HOST_PORT"
-	envSamplerMaxOperations   = "JAEGER_SAMPLER_MAX_OPERATIONS"
-	envSamplerRefreshInterval = "JAEGER_SAMPLER_REFRESH_INTERVAL"
-	envReporterMaxQueueSize   = "JAEGER_REPORTER_MAX_QUEUE_SIZE"
-	envReporterFlushInterval  = "JAEGER_REPORTER_FLUSH_INTERVAL"
-	envReporterLogSpans       = "JAEGER_REPORTER_LOG_SPANS"
-	envEndpoint               = "JAEGER_ENDPOINT"
-	envUser                   = "JAEGER_USER"
-	envPassword               = "JAEGER_PASSWORD"
-	envAgentHost              = "JAEGER_AGENT_HOST"
-	envAgentPort              = "JAEGER_AGENT_PORT"
+	envServiceName                         = "JAEGER_SERVICE_NAME"
+	envDisabled                            = "JAEGER_DISABLED"
+	envRPCMetrics                          = "JAEGER_RPC_METRICS"
+	envTags                                = "JAEGER_TAGS"
+	envSamplerType                         = "JAEGER_SAMPLER_TYPE"
+	envSamplerParam                        = "JAEGER_SAMPLER_PARAM"
+	envSamplerManagerHostPort              = "JAEGER_SAMPLER_MANAGER_HOST_PORT" // Deprecated by envSamplingEndpoint
+	envSamplingEndpoint                    = "JAEGER_SAMPLING_ENDPOINT"
+	envSamplerMaxOperations                = "JAEGER_SAMPLER_MAX_OPERATIONS"
+	envSamplerRefreshInterval              = "JAEGER_SAMPLER_REFRESH_INTERVAL"
+	envReporterMaxQueueSize                = "JAEGER_REPORTER_MAX_QUEUE_SIZE"
+	envReporterFlushInterval               = "JAEGER_REPORTER_FLUSH_INTERVAL"
+	envReporterLogSpans                    = "JAEGER_REPORTER_LOG_SPANS"
+	envReporterAttemptReconnectingDisabled = "JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED"
+	envReporterAttemptReconnectInterval    = "JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL"
+	envEndpoint                            = "JAEGER_ENDPOINT"
+	envUser                                = "JAEGER_USER"
+	envPassword                            = "JAEGER_PASSWORD"
+	envAgentHost                           = "JAEGER_AGENT_HOST"
+	envAgentPort                           = "JAEGER_AGENT_PORT"
+	env128bit                              = "JAEGER_TRACEID_128BIT"
 )
 
 // FromEnv uses environment variables to set the tracer's Configuration
@@ -81,6 +84,14 @@
 		c.Tags = parseTags(e)
 	}
 
+	if e := os.Getenv(env128bit); e != "" {
+		if value, err := strconv.ParseBool(e); err == nil {
+			c.Gen128Bit = value
+		} else {
+			return nil, errors.Wrapf(err, "cannot parse env var %s=%s", env128bit, e)
+		}
+	}
+
 	if c.Sampler == nil {
 		c.Sampler = &SamplerConfig{}
 	}
@@ -118,7 +129,9 @@
 		}
 	}
 
-	if e := os.Getenv(envSamplerManagerHostPort); e != "" {
+	if e := os.Getenv(envSamplingEndpoint); e != "" {
+		sc.SamplingServerURL = e
+	} else if e := os.Getenv(envSamplerManagerHostPort); e != "" {
 		sc.SamplingServerURL = e
 	} else if e := os.Getenv(envAgentHost); e != "" {
 		// Fallback if we know the agent host - try the sampling endpoint there
@@ -184,20 +197,43 @@
 		rc.User = user
 		rc.Password = pswd
 	} else {
+		useEnv := false
 		host := jaeger.DefaultUDPSpanServerHost
 		if e := os.Getenv(envAgentHost); e != "" {
 			host = e
+			useEnv = true
 		}
 
 		port := jaeger.DefaultUDPSpanServerPort
 		if e := os.Getenv(envAgentPort); e != "" {
 			if value, err := strconv.ParseInt(e, 10, 0); err == nil {
 				port = int(value)
+				useEnv = true
 			} else {
 				return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envAgentPort, e)
 			}
 		}
-		rc.LocalAgentHostPort = fmt.Sprintf("%s:%d", host, port)
+		if useEnv || rc.LocalAgentHostPort == "" {
+			rc.LocalAgentHostPort = fmt.Sprintf("%s:%d", host, port)
+		}
+
+		if e := os.Getenv(envReporterAttemptReconnectingDisabled); e != "" {
+			if value, err := strconv.ParseBool(e); err == nil {
+				rc.DisableAttemptReconnecting = value
+			} else {
+				return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterAttemptReconnectingDisabled, e)
+			}
+		}
+
+		if !rc.DisableAttemptReconnecting {
+			if e := os.Getenv(envReporterAttemptReconnectInterval); e != "" {
+				if value, err := time.ParseDuration(e); err == nil {
+					rc.AttemptReconnectInterval = value
+				} else {
+					return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterAttemptReconnectInterval, e)
+				}
+			}
+		}
 	}
 
 	return rc, nil
diff --git a/vendor/github.com/uber/jaeger-client-go/config/options.go b/vendor/github.com/uber/jaeger-client-go/config/options.go
index e0e50e8..a2b9cbc 100644
--- a/vendor/github.com/uber/jaeger-client-go/config/options.go
+++ b/vendor/github.com/uber/jaeger-client-go/config/options.go
@@ -40,6 +40,7 @@
 	tags                        []opentracing.Tag
 	injectors                   map[interface{}]jaeger.Injector
 	extractors                  map[interface{}]jaeger.Extractor
+	randomNumber                func() uint64
 }
 
 // Metrics creates an Option that initializes Metrics in the tracer,
@@ -147,6 +148,13 @@
 	}
 }
 
+// WithRandomNumber supplies a random number generator function to the Tracer used to generate trace and span IDs.
+func WithRandomNumber(f func() uint64) Option {
+	return func(c *Options) {
+		c.randomNumber = f
+	}
+}
+
 func applyOptions(options ...Option) Options {
 	opts := Options{
 		injectors:  make(map[interface{}]jaeger.Injector),
diff --git a/vendor/github.com/uber/jaeger-client-go/constants.go b/vendor/github.com/uber/jaeger-client-go/constants.go
index 1f8578f..d8eb698 100644
--- a/vendor/github.com/uber/jaeger-client-go/constants.go
+++ b/vendor/github.com/uber/jaeger-client-go/constants.go
@@ -22,7 +22,7 @@
 
 const (
 	// JaegerClientVersion is the version of the client library reported as Span tag.
-	JaegerClientVersion = "Go-2.23.1"
+	JaegerClientVersion = "Go-2.29.1"
 
 	// JaegerClientVersionTagKey is the name of the tag used to report client version.
 	JaegerClientVersionTagKey = "jaeger.version"
@@ -102,5 +102,5 @@
 
 var (
 	// DefaultSamplingServerURL is the default url to fetch sampling config from, via http
-	DefaultSamplingServerURL = fmt.Sprintf("http://localhost:%d/sampling", DefaultSamplingServerPort)
+	DefaultSamplingServerURL = fmt.Sprintf("http://127.0.0.1:%d/sampling", DefaultSamplingServerPort)
 )
diff --git a/vendor/github.com/uber/jaeger-client-go/glide.lock b/vendor/github.com/uber/jaeger-client-go/glide.lock
index f4c05b2..c1ec339 100644
--- a/vendor/github.com/uber/jaeger-client-go/glide.lock
+++ b/vendor/github.com/uber/jaeger-client-go/glide.lock
@@ -1,11 +1,11 @@
-hash: a4a449cfc060c2d7be850a69b171e4382a3bd00d1a0a72cfc944facc3fe263bf
-updated: 2019-09-23T17:10:15.213856-04:00
+hash: 63bec420a22b7e5abac8c602c5cc9b66a33d6a1bfec8918eecc77fd344b759ed
+updated: 2020-07-31T13:30:37.242608-04:00
 imports:
 - name: github.com/beorn7/perks
-  version: 37c8de3658fcb183f997c4e13e8337516ab753e6
+  version: 3a771d992973f24aa725d07868b467d1ddfceafb
   subpackages:
   - quantile
-- name: github.com/codahale/hdrhistogram
+- name: github.com/HdrHistogram/hdrhistogram-go
   version: 3a0bb77429bd3a61596f5e8a3172445844342120
 - name: github.com/crossdock/crossdock-go
   version: 049aabb0122b03bc9bd30cab8f3f91fb60166361
@@ -13,11 +13,15 @@
   - assert
   - require
 - name: github.com/davecgh/go-spew
-  version: d8f796af33cc11cb798c1aaeb27a4ebc5099927d
+  version: 8991bc29aa16c548c550c7ff78260e27b9ab7c73
   subpackages:
   - spew
+- name: github.com/golang/mock
+  version: 51421b967af1f557f93a59e0057aaf15ca02e29c
+  subpackages:
+  - gomock
 - name: github.com/golang/protobuf
-  version: 1680a479a2cfb3fa22b972af7e36d0a0fde47bf8
+  version: b5d812f8a3706043e23a9cd5babf2e5423744d30
   subpackages:
   - proto
 - name: github.com/matttproud/golang_protobuf_extensions
@@ -25,7 +29,7 @@
   subpackages:
   - pbutil
 - name: github.com/opentracing/opentracing-go
-  version: 659c90643e714681897ec2521c60567dd21da733
+  version: d34af3eaa63c4d08ab54863a4bdd0daa45212e12
   subpackages:
   - ext
   - harness
@@ -42,57 +46,60 @@
   - prometheus
   - prometheus/internal
 - name: github.com/prometheus/client_model
-  version: 14fe0d1b01d4d5fc031dd4bec1823bd3ebbe8016
+  version: fd36f4220a901265f90734c3183c5f0c91daa0b8
   subpackages:
   - go
 - name: github.com/prometheus/common
-  version: 287d3e634a1e550c9e463dd7e5a75a422c614505
+  version: 1ab4d74fc89940cfbc3c2b3a89821336cdefa119
   subpackages:
   - expfmt
   - internal/bitbucket.org/ww/goautoneg
   - model
 - name: github.com/prometheus/procfs
-  version: de25ac347ef9305868b04dc42425c973b863b18c
+  version: 8a055596020d692cf491851e47ba3e302d9f90ce
   subpackages:
   - internal/fs
   - internal/util
 - name: github.com/stretchr/testify
-  version: 85f2b59c4459e5bf57488796be8c3667cb8246d6
+  version: f654a9112bbeac49ca2cd45bfbe11533c4666cf8
   subpackages:
   - assert
+  - mock
   - require
   - suite
 - name: github.com/uber-go/atomic
-  version: df976f2515e274675050de7b3f42545de80594fd
+  version: 845920076a298bdb984fb0f1b86052e4ca0a281c
 - name: github.com/uber/jaeger-lib
-  version: a87ae9d84fb038a8d79266298970720be7c80fcd
+  version: 48cc1df63e6be0d63b95677f0d22beb880bce1e4
   subpackages:
   - metrics
   - metrics/metricstest
   - metrics/prometheus
 - name: go.uber.org/atomic
-  version: df976f2515e274675050de7b3f42545de80594fd
+  version: 845920076a298bdb984fb0f1b86052e4ca0a281c
 - name: go.uber.org/multierr
-  version: 3c4937480c32f4c13a875a1829af76c98ca3d40a
+  version: b587143a48b62b01d337824eab43700af6ffe222
 - name: go.uber.org/zap
-  version: 27376062155ad36be76b0f12cf1572a221d3a48c
+  version: feeb9a050b31b40eec6f2470e7599eeeadfe5bdd
   subpackages:
   - buffer
   - internal/bufferpool
   - internal/color
   - internal/exit
   - zapcore
+  - zaptest/observer
 - name: golang.org/x/net
-  version: aa69164e4478b84860dc6769c710c699c67058a3
+  version: addf6b3196f61cd44ce5a76657913698c73479d0
   subpackages:
   - context
   - context/ctxhttp
 - name: golang.org/x/sys
-  version: 0a153f010e6963173baba2306531d173aa843137
+  version: 3e129f6d46b10b0e1da36b3deffcb55e09631b64
   subpackages:
+  - internal/unsafeheader
   - windows
-- name: gopkg.in/yaml.v2
-  version: 51d6538a90f86fe93ac480b35f37b2be17fef232
-- name: github.com/golang/mock 
-  version: 3a35fb6e3e18b9dbfee291262260dee7372d2a92
-testImports: []
+- name: gopkg.in/yaml.v3
+  version: eeeca48fe7764f320e4870d231902bf9c1be2c08
+testImports:
+- name: github.com/stretchr/objx
+  version: 35313a95ee26395aa17d366c71a2ccf788fa69b6
diff --git a/vendor/github.com/uber/jaeger-client-go/glide.yaml b/vendor/github.com/uber/jaeger-client-go/glide.yaml
index eb58c67..295678c 100644
--- a/vendor/github.com/uber/jaeger-client-go/glide.yaml
+++ b/vendor/github.com/uber/jaeger-client-go/glide.yaml
@@ -1,13 +1,13 @@
 package: github.com/uber/jaeger-client-go
 import:
 - package: github.com/opentracing/opentracing-go
-  version: ^1.1
+  version: ^1.2
   subpackages:
   - ext
   - log
 - package: github.com/crossdock/crossdock-go
 - package: github.com/uber/jaeger-lib
-  version: ^2.0.0
+  version: ^2.3.0
   subpackages:
   - metrics
 - package: github.com/pkg/errors
@@ -18,7 +18,9 @@
 - package: github.com/uber-go/atomic
   version: ^1
 - package: github.com/prometheus/client_golang
-  version: ^1
+  version: 1.1
+- package: github.com/prometheus/procfs
+  version: 0.0.6
 testImport:
 - package: github.com/stretchr/testify
   subpackages:
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go
index a56515a..2f58bb5 100644
--- a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go
+++ b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go
@@ -15,6 +15,7 @@
 package remote
 
 import (
+	"context"
 	"fmt"
 	"net/url"
 	"sync"
@@ -37,7 +38,7 @@
 	}
 }
 
-func (s *httpBaggageRestrictionManagerProxy) GetBaggageRestrictions(serviceName string) ([]*thrift.BaggageRestriction, error) {
+func (s *httpBaggageRestrictionManagerProxy) GetBaggageRestrictions(context.Context, string) ([]*thrift.BaggageRestriction, error) {
 	var out []*thrift.BaggageRestriction
 	if err := utils.GetJSON(s.url, &out); err != nil {
 		return nil, err
@@ -134,7 +135,7 @@
 }
 
 func (m *RestrictionManager) updateRestrictions() error {
-	restrictions, err := m.thriftProxy.GetBaggageRestrictions(m.serviceName)
+	restrictions, err := m.thriftProxy.GetBaggageRestrictions(context.Background(), m.serviceName)
 	if err != nil {
 		m.metrics.BaggageRestrictionsUpdateFailure.Inc(1)
 		return err
diff --git a/vendor/github.com/uber/jaeger-client-go/propagation.go b/vendor/github.com/uber/jaeger-client-go/propagation.go
index 42fd64b..e06459b 100644
--- a/vendor/github.com/uber/jaeger-client-go/propagation.go
+++ b/vendor/github.com/uber/jaeger-client-go/propagation.go
@@ -215,6 +215,12 @@
 	return nil
 }
 
+// W3C limits https://github.com/w3c/baggage/blob/master/baggage/HTTP_HEADER_FORMAT.md#limits
+const (
+	maxBinaryBaggage      = 180
+	maxBinaryNameValueLen = 4096
+)
+
 // Extract implements Extractor of BinaryPropagator
 func (p *BinaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
 	carrier, ok := abstractCarrier.(io.Reader)
@@ -245,6 +251,9 @@
 	if err := binary.Read(carrier, binary.BigEndian, &numBaggage); err != nil {
 		return emptyContext, opentracing.ErrSpanContextCorrupted
 	}
+	if numBaggage > maxBinaryBaggage {
+		return emptyContext, opentracing.ErrSpanContextCorrupted
+	}
 	if iNumBaggage := int(numBaggage); iNumBaggage > 0 {
 		ctx.baggage = make(map[string]string, iNumBaggage)
 		buf := p.buffers.Get().(*bytes.Buffer)
@@ -265,6 +274,9 @@
 			if err := binary.Read(carrier, binary.BigEndian, &valLen); err != nil {
 				return emptyContext, opentracing.ErrSpanContextCorrupted
 			}
+			if keyLen+valLen > maxBinaryNameValueLen {
+				return emptyContext, opentracing.ErrSpanContextCorrupted
+			}
 			buf.Reset()
 			buf.Grow(int(valLen))
 			if n, err := io.CopyN(buf, carrier, int64(valLen)); err != nil || int32(n) != valLen {
@@ -292,7 +304,7 @@
 	for _, kvpair := range strings.Split(value, ",") {
 		kv := strings.Split(strings.TrimSpace(kvpair), "=")
 		if len(kv) == 2 {
-			baggage[kv[0]] = kv[1]
+			baggage[strings.TrimSpace(kv[0])] = kv[1]
 		} else {
 			log.Printf("Malformed value passed in for %s", p.headerKeys.JaegerBaggageHeader)
 		}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_remote.go b/vendor/github.com/uber/jaeger-client-go/sampler_remote.go
index 112e3e1..119f0a1 100644
--- a/vendor/github.com/uber/jaeger-client-go/sampler_remote.go
+++ b/vendor/github.com/uber/jaeger-client-go/sampler_remote.go
@@ -29,6 +29,7 @@
 )
 
 const (
+	defaultRemoteSamplingTimeout   = 10 * time.Second
 	defaultSamplingRefreshInterval = time.Minute
 )
 
@@ -64,7 +65,7 @@
 	// Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
 	closed int64 // 0 - not closed, 1 - closed
 
-	sync.RWMutex
+	sync.RWMutex // used to serialize access to samplerOptions.sampler
 	samplerOptions
 
 	serviceName string
@@ -95,21 +96,29 @@
 
 // OnCreateSpan implements OnCreateSpan of SamplerV2.
 func (s *RemotelyControlledSampler) OnCreateSpan(span *Span) SamplingDecision {
+	s.RLock()
+	defer s.RUnlock()
 	return s.sampler.OnCreateSpan(span)
 }
 
 // OnSetOperationName implements OnSetOperationName of SamplerV2.
 func (s *RemotelyControlledSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision {
+	s.RLock()
+	defer s.RUnlock()
 	return s.sampler.OnSetOperationName(span, operationName)
 }
 
 // OnSetTag implements OnSetTag of SamplerV2.
 func (s *RemotelyControlledSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
+	s.RLock()
+	defer s.RUnlock()
 	return s.sampler.OnSetTag(span, key, value)
 }
 
 // OnFinishSpan implements OnFinishSpan of SamplerV2.
 func (s *RemotelyControlledSampler) OnFinishSpan(span *Span) SamplingDecision {
+	s.RLock()
+	defer s.RUnlock()
 	return s.sampler.OnFinishSpan(span)
 }
 
@@ -153,11 +162,10 @@
 
 // Sampler returns the currently active sampler.
 func (s *RemotelyControlledSampler) Sampler() SamplerV2 {
-	s.Lock()
-	defer s.Unlock()
+	s.RLock()
+	defer s.RUnlock()
 	return s.sampler
 }
-
 func (s *RemotelyControlledSampler) setSampler(sampler SamplerV2) {
 	s.Lock()
 	defer s.Unlock()
@@ -291,8 +299,22 @@
 // -----------------------
 
 type httpSamplingStrategyFetcher struct {
-	serverURL string
-	logger    log.DebugLogger
+	serverURL  string
+	logger     log.DebugLogger
+	httpClient http.Client
+}
+
+func newHTTPSamplingStrategyFetcher(serverURL string, logger log.DebugLogger) *httpSamplingStrategyFetcher {
+	customTransport := http.DefaultTransport.(*http.Transport).Clone()
+	customTransport.ResponseHeaderTimeout = defaultRemoteSamplingTimeout
+
+	return &httpSamplingStrategyFetcher{
+		serverURL: serverURL,
+		logger:    logger,
+		httpClient: http.Client{
+			Transport: customTransport,
+		},
+	}
 }
 
 func (f *httpSamplingStrategyFetcher) Fetch(serviceName string) ([]byte, error) {
@@ -300,8 +322,7 @@
 	v.Set("service", serviceName)
 	uri := f.serverURL + "?" + v.Encode()
 
-	// TODO create and reuse http.Client with proper timeout settings, etc.
-	resp, err := http.Get(uri)
+	resp, err := f.httpClient.Get(uri)
 	if err != nil {
 		return nil, err
 	}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go b/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go
index e4a6108..64b028b 100644
--- a/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go
+++ b/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go
@@ -140,10 +140,7 @@
 		o.samplingRefreshInterval = defaultSamplingRefreshInterval
 	}
 	if o.samplingFetcher == nil {
-		o.samplingFetcher = &httpSamplingStrategyFetcher{
-			serverURL: o.samplingServerURL,
-			logger:    o.logger,
-		}
+		o.samplingFetcher = newHTTPSamplingStrategyFetcher(o.samplingServerURL, o.logger)
 	}
 	if o.samplingParser == nil {
 		o.samplingParser = new(samplingStrategyParser)
diff --git a/vendor/github.com/uber/jaeger-client-go/span.go b/vendor/github.com/uber/jaeger-client-go/span.go
index 42c9112..997cffd 100644
--- a/vendor/github.com/uber/jaeger-client-go/span.go
+++ b/vendor/github.com/uber/jaeger-client-go/span.go
@@ -85,8 +85,9 @@
 func (s *Span) SetOperationName(operationName string) opentracing.Span {
 	s.Lock()
 	s.operationName = operationName
+	ctx := s.context
 	s.Unlock()
-	if !s.isSamplingFinalized() {
+	if !ctx.isSamplingFinalized() {
 		decision := s.tracer.sampler.OnSetOperationName(s, operationName)
 		s.applySamplingDecision(decision, true)
 	}
@@ -100,15 +101,25 @@
 }
 
 func (s *Span) setTagInternal(key string, value interface{}, lock bool) opentracing.Span {
+	var ctx SpanContext
+	var operationName string
+	if lock {
+		ctx = s.SpanContext()
+		operationName = s.OperationName()
+	} else {
+		ctx = s.context
+		operationName = s.operationName
+	}
+
 	s.observer.OnSetTag(key, value)
-	if key == string(ext.SamplingPriority) && !setSamplingPriority(s, value) {
+	if key == string(ext.SamplingPriority) && !setSamplingPriority(ctx.samplingState, operationName, s.tracer, value) {
 		return s
 	}
-	if !s.isSamplingFinalized() {
+	if !ctx.isSamplingFinalized() {
 		decision := s.tracer.sampler.OnSetTag(s, key, value)
 		s.applySamplingDecision(decision, lock)
 	}
-	if s.isWriteable() {
+	if ctx.isWriteable() {
 		if lock {
 			s.Lock()
 			defer s.Unlock()
@@ -303,7 +314,14 @@
 	s.numDroppedLogs = 0
 }
 
-// SetBaggageItem implements SetBaggageItem() of opentracing.SpanContext
+// SetBaggageItem implements SetBaggageItem() of opentracing.SpanContext.
+// The call is proxied via tracer.baggageSetter to allow policies to be applied
+// before allowing to set/replace baggage keys.
+// The setter eventually stores a new SpanContext with extended baggage:
+//
+//     span.context = span.context.WithBaggageItem(key, value)
+//
+//  See SpanContext.WithBaggageItem() for explanation why it's done this way.
 func (s *Span) SetBaggageItem(key, value string) opentracing.Span {
 	s.Lock()
 	defer s.Unlock()
@@ -333,12 +351,13 @@
 	s.observer.OnFinish(options)
 	s.Lock()
 	s.duration = options.FinishTime.Sub(s.startTime)
+	ctx := s.context
 	s.Unlock()
-	if !s.isSamplingFinalized() {
+	if !ctx.isSamplingFinalized() {
 		decision := s.tracer.sampler.OnFinishSpan(s)
 		s.applySamplingDecision(decision, true)
 	}
-	if s.context.IsSampled() {
+	if ctx.IsSampled() {
 		s.Lock()
 		s.fixLogsIfDropped()
 		if len(options.LogRecords) > 0 || len(options.BulkLogData) > 0 {
@@ -419,11 +438,18 @@
 }
 
 func (s *Span) applySamplingDecision(decision SamplingDecision, lock bool) {
+	var ctx SpanContext
+	if lock {
+		ctx = s.SpanContext()
+	} else {
+		ctx = s.context
+	}
+
 	if !decision.Retryable {
-		s.context.samplingState.setFinal()
+		ctx.samplingState.setFinal()
 	}
 	if decision.Sample {
-		s.context.samplingState.setSampled()
+		ctx.samplingState.setSampled()
 		if len(decision.Tags) > 0 {
 			if lock {
 				s.Lock()
@@ -436,44 +462,34 @@
 	}
 }
 
-// Span can be written to if it is sampled or the sampling decision has not been finalized.
-func (s *Span) isWriteable() bool {
-	state := s.context.samplingState
-	return !state.isFinal() || state.isSampled()
-}
-
-func (s *Span) isSamplingFinalized() bool {
-	return s.context.samplingState.isFinal()
-}
-
 // setSamplingPriority returns true if the flag was updated successfully, false otherwise.
 // The behavior of setSamplingPriority is surprising
 // If noDebugFlagOnForcedSampling is set
-//     setSamplingPriority(span, 1) always sets only flagSampled
+//     setSamplingPriority(..., 1) always sets only flagSampled
 // If noDebugFlagOnForcedSampling is unset, and isDebugAllowed passes
-//     setSamplingPriority(span, 1) sets both flagSampled and flagDebug
+//     setSamplingPriority(..., 1) sets both flagSampled and flagDebug
 // However,
-//     setSamplingPriority(span, 0) always only resets flagSampled
+//     setSamplingPriority(..., 0) always only resets flagSampled
 //
-// This means that doing a setSamplingPriority(span, 1) followed by setSamplingPriority(span, 0) can
+// This means that doing a setSamplingPriority(..., 1) followed by setSamplingPriority(..., 0) can
 // leave flagDebug set
-func setSamplingPriority(s *Span, value interface{}) bool {
+func setSamplingPriority(state *samplingState, operationName string, tracer *Tracer, value interface{}) bool {
 	val, ok := value.(uint16)
 	if !ok {
 		return false
 	}
 	if val == 0 {
-		s.context.samplingState.unsetSampled()
-		s.context.samplingState.setFinal()
+		state.unsetSampled()
+		state.setFinal()
 		return true
 	}
-	if s.tracer.options.noDebugFlagOnForcedSampling {
-		s.context.samplingState.setSampled()
-		s.context.samplingState.setFinal()
+	if tracer.options.noDebugFlagOnForcedSampling {
+		state.setSampled()
+		state.setFinal()
 		return true
-	} else if s.tracer.isDebugAllowed(s.operationName) {
-		s.context.samplingState.setDebugAndSampled()
-		s.context.samplingState.setFinal()
+	} else if tracer.isDebugAllowed(operationName) {
+		state.setDebugAndSampled()
+		state.setFinal()
 		return true
 	}
 	return false
diff --git a/vendor/github.com/uber/jaeger-client-go/span_context.go b/vendor/github.com/uber/jaeger-client-go/span_context.go
index 1b44f3f..5b2307b 100644
--- a/vendor/github.com/uber/jaeger-client-go/span_context.go
+++ b/vendor/github.com/uber/jaeger-client-go/span_context.go
@@ -212,10 +212,14 @@
 }
 
 func (c SpanContext) String() string {
-	if c.traceID.High == 0 {
-		return fmt.Sprintf("%016x:%016x:%016x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.samplingState.stateFlags.Load())
+	var flags int32
+	if c.samplingState != nil {
+		flags = c.samplingState.stateFlags.Load()
 	}
-	return fmt.Sprintf("%016x%016x:%016x:%016x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.samplingState.stateFlags.Load())
+	if c.traceID.High == 0 {
+		return fmt.Sprintf("%016x:%016x:%016x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), flags)
+	}
+	return fmt.Sprintf("%016x%016x:%016x:%016x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), flags)
 }
 
 // ContextFromString reconstructs the Context encoded in a string
@@ -267,6 +271,16 @@
 	return c.samplingState.flags()
 }
 
+// Span can be written to if it is sampled or the sampling decision has not been finalized.
+func (c SpanContext) isWriteable() bool {
+	state := c.samplingState
+	return !state.isFinal() || state.isSampled()
+}
+
+func (c SpanContext) isSamplingFinalized() bool {
+	return c.samplingState.isFinal()
+}
+
 // NewSpanContext creates a new instance of SpanContext
 func NewSpanContext(traceID TraceID, spanID, parentID SpanID, sampled bool, baggage map[string]string) SpanContext {
 	samplingState := &samplingState{}
@@ -300,8 +314,29 @@
 }
 
 // WithBaggageItem creates a new context with an extra baggage item.
+// Delete a baggage item if provided blank value.
+//
+// The SpanContext is designed to be immutable and passed by value. As such,
+// it cannot contain any locks, and should only hold immutable data, including baggage.
+// Another reason for why baggage is immutable is when the span context is passed
+// as a parent when starting a new span. The new span's baggage cannot affect the parent
+// span's baggage, so the child span either needs to take a copy of the parent baggage
+// (which is expensive and unnecessary since baggage rarely changes in the life span of
+// a trace), or it needs to do a copy-on-write, which is the approach taken here.
 func (c SpanContext) WithBaggageItem(key, value string) SpanContext {
 	var newBaggage map[string]string
+	// unset baggage item
+	if value == "" {
+		if _, ok := c.baggage[key]; !ok {
+			return c
+		}
+		newBaggage = make(map[string]string, len(c.baggage))
+		for k, v := range c.baggage {
+			newBaggage[k] = v
+		}
+		delete(newBaggage, key)
+		return SpanContext{c.traceID, c.spanID, c.parentID, newBaggage, "", c.samplingState, c.remote}
+	}
 	if c.baggage == nil {
 		newBaggage = map[string]string{key: value}
 	} else {
@@ -332,9 +367,9 @@
 
 func (t TraceID) String() string {
 	if t.High == 0 {
-		return fmt.Sprintf("%x", t.Low)
+		return fmt.Sprintf("%016x", t.Low)
 	}
-	return fmt.Sprintf("%x%016x", t.High, t.Low)
+	return fmt.Sprintf("%016x%016x", t.High, t.Low)
 }
 
 // TraceIDFromString creates a TraceID from a hexadecimal string
@@ -367,7 +402,7 @@
 // ------- SpanID -------
 
 func (s SpanID) String() string {
-	return fmt.Sprintf("%x", uint64(s))
+	return fmt.Sprintf("%016x", uint64(s))
 }
 
 // SpanIDFromString creates a SpanID from a hexadecimal string
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/GoUnusedProtection__.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/GoUnusedProtection__.go
new file mode 100644
index 0000000..54cd3b0
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/GoUnusedProtection__.go
@@ -0,0 +1,6 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package agent
+
+var GoUnusedProtection__ int;
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent-consts.go
similarity index 75%
rename from vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/constants.go
rename to vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent-consts.go
index aa9857b..a0df507 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/constants.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent-consts.go
@@ -1,19 +1,23 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
 
 package agent
 
-import (
+import(
 	"bytes"
+	"context"
 	"fmt"
+	"time"
 	"github.com/uber/jaeger-client-go/thrift"
 	"github.com/uber/jaeger-client-go/thrift-gen/jaeger"
 	"github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
+
 )
 
 // (needed to ensure safety because of naive import list construction.)
 var _ = thrift.ZERO
 var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
 var _ = bytes.Equal
 
 var _ = jaeger.GoUnusedProtection__
@@ -21,3 +25,4 @@
 
 func init() {
 }
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go
index e48811c..6472e84 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go
@@ -1,411 +1,396 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
 
 package agent
 
-import (
+import(
 	"bytes"
+	"context"
 	"fmt"
+	"time"
 	"github.com/uber/jaeger-client-go/thrift"
 	"github.com/uber/jaeger-client-go/thrift-gen/jaeger"
 	"github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
+
 )
 
 // (needed to ensure safety because of naive import list construction.)
 var _ = thrift.ZERO
 var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
 var _ = bytes.Equal
 
 var _ = jaeger.GoUnusedProtection__
 var _ = zipkincore.GoUnusedProtection__
-
 type Agent interface {
-	// Parameters:
-	//  - Spans
-	EmitZipkinBatch(spans []*zipkincore.Span) (err error)
-	// Parameters:
-	//  - Batch
-	EmitBatch(batch *jaeger.Batch) (err error)
+  // Parameters:
+  //  - Spans
+  EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error)
+  // Parameters:
+  //  - Batch
+  EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error)
 }
 
 type AgentClient struct {
-	Transport       thrift.TTransport
-	ProtocolFactory thrift.TProtocolFactory
-	InputProtocol   thrift.TProtocol
-	OutputProtocol  thrift.TProtocol
-	SeqId           int32
+  c thrift.TClient
+  meta thrift.ResponseMeta
 }
 
 func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient {
-	return &AgentClient{Transport: t,
-		ProtocolFactory: f,
-		InputProtocol:   f.GetProtocol(t),
-		OutputProtocol:  f.GetProtocol(t),
-		SeqId:           0,
-	}
+  return &AgentClient{
+    c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
+  }
 }
 
 func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient {
-	return &AgentClient{Transport: t,
-		ProtocolFactory: nil,
-		InputProtocol:   iprot,
-		OutputProtocol:  oprot,
-		SeqId:           0,
-	}
+  return &AgentClient{
+    c: thrift.NewTStandardClient(iprot, oprot),
+  }
+}
+
+func NewAgentClient(c thrift.TClient) *AgentClient {
+  return &AgentClient{
+    c: c,
+  }
+}
+
+func (p *AgentClient) Client_() thrift.TClient {
+  return p.c
+}
+
+func (p *AgentClient) LastResponseMeta_() thrift.ResponseMeta {
+  return p.meta
+}
+
+func (p *AgentClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
+  p.meta = meta
 }
 
 // Parameters:
 //  - Spans
-func (p *AgentClient) EmitZipkinBatch(spans []*zipkincore.Span) (err error) {
-	if err = p.sendEmitZipkinBatch(spans); err != nil {
-		return
-	}
-	return
-}
-
-func (p *AgentClient) sendEmitZipkinBatch(spans []*zipkincore.Span) (err error) {
-	oprot := p.OutputProtocol
-	if oprot == nil {
-		oprot = p.ProtocolFactory.GetProtocol(p.Transport)
-		p.OutputProtocol = oprot
-	}
-	p.SeqId++
-	if err = oprot.WriteMessageBegin("emitZipkinBatch", thrift.ONEWAY, p.SeqId); err != nil {
-		return
-	}
-	args := AgentEmitZipkinBatchArgs{
-		Spans: spans,
-	}
-	if err = args.Write(oprot); err != nil {
-		return
-	}
-	if err = oprot.WriteMessageEnd(); err != nil {
-		return
-	}
-	return oprot.Flush()
+func (p *AgentClient) EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error) {
+  var _args0 AgentEmitZipkinBatchArgs
+  _args0.Spans = spans
+  p.SetLastResponseMeta_(thrift.ResponseMeta{})
+  if _, err := p.Client_().Call(ctx, "emitZipkinBatch", &_args0, nil); err != nil {
+    return err
+  }
+  return nil
 }
 
 // Parameters:
 //  - Batch
-func (p *AgentClient) EmitBatch(batch *jaeger.Batch) (err error) {
-	if err = p.sendEmitBatch(batch); err != nil {
-		return
-	}
-	return
-}
-
-func (p *AgentClient) sendEmitBatch(batch *jaeger.Batch) (err error) {
-	oprot := p.OutputProtocol
-	if oprot == nil {
-		oprot = p.ProtocolFactory.GetProtocol(p.Transport)
-		p.OutputProtocol = oprot
-	}
-	p.SeqId++
-	if err = oprot.WriteMessageBegin("emitBatch", thrift.ONEWAY, p.SeqId); err != nil {
-		return
-	}
-	args := AgentEmitBatchArgs{
-		Batch: batch,
-	}
-	if err = args.Write(oprot); err != nil {
-		return
-	}
-	if err = oprot.WriteMessageEnd(); err != nil {
-		return
-	}
-	return oprot.Flush()
+func (p *AgentClient) EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error) {
+  var _args1 AgentEmitBatchArgs
+  _args1.Batch = batch
+  p.SetLastResponseMeta_(thrift.ResponseMeta{})
+  if _, err := p.Client_().Call(ctx, "emitBatch", &_args1, nil); err != nil {
+    return err
+  }
+  return nil
 }
 
 type AgentProcessor struct {
-	processorMap map[string]thrift.TProcessorFunction
-	handler      Agent
+  processorMap map[string]thrift.TProcessorFunction
+  handler Agent
 }
 
 func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
-	p.processorMap[key] = processor
+  p.processorMap[key] = processor
 }
 
 func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
-	processor, ok = p.processorMap[key]
-	return processor, ok
+  processor, ok = p.processorMap[key]
+  return processor, ok
 }
 
 func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
-	return p.processorMap
+  return p.processorMap
 }
 
 func NewAgentProcessor(handler Agent) *AgentProcessor {
 
-	self0 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
-	self0.processorMap["emitZipkinBatch"] = &agentProcessorEmitZipkinBatch{handler: handler}
-	self0.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler}
-	return self0
+  self2 := &AgentProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)}
+  self2.processorMap["emitZipkinBatch"] = &agentProcessorEmitZipkinBatch{handler:handler}
+  self2.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler:handler}
+return self2
 }
 
-func (p *AgentProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
-	name, _, seqId, err := iprot.ReadMessageBegin()
-	if err != nil {
-		return false, err
-	}
-	if processor, ok := p.GetProcessorFunction(name); ok {
-		return processor.Process(seqId, iprot, oprot)
-	}
-	iprot.Skip(thrift.STRUCT)
-	iprot.ReadMessageEnd()
-	x1 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
-	oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
-	x1.Write(oprot)
-	oprot.WriteMessageEnd()
-	oprot.Flush()
-	return false, x1
+func (p *AgentProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+  name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
+  if err2 != nil { return false, thrift.WrapTException(err2) }
+  if processor, ok := p.GetProcessorFunction(name); ok {
+    return processor.Process(ctx, seqId, iprot, oprot)
+  }
+  iprot.Skip(ctx, thrift.STRUCT)
+  iprot.ReadMessageEnd(ctx)
+  x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name)
+  oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
+  x3.Write(ctx, oprot)
+  oprot.WriteMessageEnd(ctx)
+  oprot.Flush(ctx)
+  return false, x3
 
 }
 
 type agentProcessorEmitZipkinBatch struct {
-	handler Agent
+  handler Agent
 }
 
-func (p *agentProcessorEmitZipkinBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
-	args := AgentEmitZipkinBatchArgs{}
-	if err = args.Read(iprot); err != nil {
-		iprot.ReadMessageEnd()
-		return false, err
-	}
+func (p *agentProcessorEmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+  args := AgentEmitZipkinBatchArgs{}
+  var err2 error
+  if err2 = args.Read(ctx, iprot); err2 != nil {
+    iprot.ReadMessageEnd(ctx)
+    return false, thrift.WrapTException(err2)
+  }
+  iprot.ReadMessageEnd(ctx)
 
-	iprot.ReadMessageEnd()
-	var err2 error
-	if err2 = p.handler.EmitZipkinBatch(args.Spans); err2 != nil {
-		return true, err2
-	}
-	return true, nil
+  tickerCancel := func() {}
+  _ = tickerCancel
+
+  if err2 = p.handler.EmitZipkinBatch(ctx, args.Spans); err2 != nil {
+    tickerCancel()
+    return true, thrift.WrapTException(err2)
+  }
+  tickerCancel()
+  return true, nil
 }
 
 type agentProcessorEmitBatch struct {
-	handler Agent
+  handler Agent
 }
 
-func (p *agentProcessorEmitBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
-	args := AgentEmitBatchArgs{}
-	if err = args.Read(iprot); err != nil {
-		iprot.ReadMessageEnd()
-		return false, err
-	}
+func (p *agentProcessorEmitBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+  args := AgentEmitBatchArgs{}
+  var err2 error
+  if err2 = args.Read(ctx, iprot); err2 != nil {
+    iprot.ReadMessageEnd(ctx)
+    return false, thrift.WrapTException(err2)
+  }
+  iprot.ReadMessageEnd(ctx)
 
-	iprot.ReadMessageEnd()
-	var err2 error
-	if err2 = p.handler.EmitBatch(args.Batch); err2 != nil {
-		return true, err2
-	}
-	return true, nil
+  tickerCancel := func() {}
+  _ = tickerCancel
+
+  if err2 = p.handler.EmitBatch(ctx, args.Batch); err2 != nil {
+    tickerCancel()
+    return true, thrift.WrapTException(err2)
+  }
+  tickerCancel()
+  return true, nil
 }
 
+
 // HELPER FUNCTIONS AND STRUCTURES
 
 // Attributes:
 //  - Spans
 type AgentEmitZipkinBatchArgs struct {
-	Spans []*zipkincore.Span `thrift:"spans,1" json:"spans"`
+  Spans []*zipkincore.Span `thrift:"spans,1" db:"spans" json:"spans"`
 }
 
 func NewAgentEmitZipkinBatchArgs() *AgentEmitZipkinBatchArgs {
-	return &AgentEmitZipkinBatchArgs{}
+  return &AgentEmitZipkinBatchArgs{}
 }
 
+
 func (p *AgentEmitZipkinBatchArgs) GetSpans() []*zipkincore.Span {
-	return p.Spans
+  return p.Spans
 }
-func (p *AgentEmitZipkinBatchArgs) Read(iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
+func (p *AgentEmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
 
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if err := p.readField1(iprot); err != nil {
-				return err
-			}
-		default:
-			if err := iprot.Skip(fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	return nil
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
 }
 
-func (p *AgentEmitZipkinBatchArgs) readField1(iprot thrift.TProtocol) error {
-	_, size, err := iprot.ReadListBegin()
-	if err != nil {
-		return thrift.PrependError("error reading list begin: ", err)
-	}
-	tSlice := make([]*zipkincore.Span, 0, size)
-	p.Spans = tSlice
-	for i := 0; i < size; i++ {
-		_elem2 := &zipkincore.Span{}
-		if err := _elem2.Read(iprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err)
-		}
-		p.Spans = append(p.Spans, _elem2)
-	}
-	if err := iprot.ReadListEnd(); err != nil {
-		return thrift.PrependError("error reading list end: ", err)
-	}
-	return nil
+func (p *AgentEmitZipkinBatchArgs)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*zipkincore.Span, 0, size)
+  p.Spans =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem4 := &zipkincore.Span{}
+    if err := _elem4.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
+    }
+    p.Spans = append(p.Spans, _elem4)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
 }
 
-func (p *AgentEmitZipkinBatchArgs) Write(oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin("emitZipkinBatch_args"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if err := p.writeField1(oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteFieldStop(); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
+func (p *AgentEmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "emitZipkinBatch_args"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
 }
 
-func (p *AgentEmitZipkinBatchArgs) writeField1(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("spans", thrift.LIST, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err)
-	}
-	if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil {
-		return thrift.PrependError("error writing list begin: ", err)
-	}
-	for _, v := range p.Spans {
-		if err := v.Write(oprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
-		}
-	}
-	if err := oprot.WriteListEnd(); err != nil {
-		return thrift.PrependError("error writing list end: ", err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err)
-	}
-	return err
+func (p *AgentEmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) }
+  if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil {
+    return thrift.PrependError("error writing list begin: ", err)
+  }
+  for _, v := range p.Spans {
+    if err := v.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+    }
+  }
+  if err := oprot.WriteListEnd(ctx); err != nil {
+    return thrift.PrependError("error writing list end: ", err)
+  }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) }
+  return err
 }
 
 func (p *AgentEmitZipkinBatchArgs) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("AgentEmitZipkinBatchArgs(%+v)", *p)
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("AgentEmitZipkinBatchArgs(%+v)", *p)
 }
 
 // Attributes:
 //  - Batch
 type AgentEmitBatchArgs struct {
-	Batch *jaeger.Batch `thrift:"batch,1" json:"batch"`
+  Batch *jaeger.Batch `thrift:"batch,1" db:"batch" json:"batch"`
 }
 
 func NewAgentEmitBatchArgs() *AgentEmitBatchArgs {
-	return &AgentEmitBatchArgs{}
+  return &AgentEmitBatchArgs{}
 }
 
 var AgentEmitBatchArgs_Batch_DEFAULT *jaeger.Batch
-
 func (p *AgentEmitBatchArgs) GetBatch() *jaeger.Batch {
-	if !p.IsSetBatch() {
-		return AgentEmitBatchArgs_Batch_DEFAULT
-	}
-	return p.Batch
+  if !p.IsSetBatch() {
+    return AgentEmitBatchArgs_Batch_DEFAULT
+  }
+return p.Batch
 }
 func (p *AgentEmitBatchArgs) IsSetBatch() bool {
-	return p.Batch != nil
+  return p.Batch != nil
 }
 
-func (p *AgentEmitBatchArgs) Read(iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
+func (p *AgentEmitBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
 
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if err := p.readField1(iprot); err != nil {
-				return err
-			}
-		default:
-			if err := iprot.Skip(fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	return nil
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.STRUCT {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
 }
 
-func (p *AgentEmitBatchArgs) readField1(iprot thrift.TProtocol) error {
-	p.Batch = &jaeger.Batch{}
-	if err := p.Batch.Read(iprot); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err)
-	}
-	return nil
+func (p *AgentEmitBatchArgs)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  p.Batch = &jaeger.Batch{}
+  if err := p.Batch.Read(ctx, iprot); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err)
+  }
+  return nil
 }
 
-func (p *AgentEmitBatchArgs) Write(oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin("emitBatch_args"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if err := p.writeField1(oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteFieldStop(); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
+func (p *AgentEmitBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "emitBatch_args"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
 }
 
-func (p *AgentEmitBatchArgs) writeField1(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("batch", thrift.STRUCT, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err)
-	}
-	if err := p.Batch.Write(oprot); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err)
-	}
-	return err
+func (p *AgentEmitBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "batch", thrift.STRUCT, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err) }
+  if err := p.Batch.Write(ctx, oprot); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err)
+  }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err) }
+  return err
 }
 
 func (p *AgentEmitBatchArgs) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p)
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p)
 }
+
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go
deleted file mode 100644
index 9c28f11..0000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-
-package agent
-
-import (
-	"bytes"
-	"fmt"
-	"github.com/uber/jaeger-client-go/thrift"
-	"github.com/uber/jaeger-client-go/thrift-gen/jaeger"
-	"github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = bytes.Equal
-
-var _ = jaeger.GoUnusedProtection__
-var _ = zipkincore.GoUnusedProtection__
-var GoUnusedProtection__ int
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/GoUnusedProtection__.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/GoUnusedProtection__.go
new file mode 100644
index 0000000..712b6a9
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/GoUnusedProtection__.go
@@ -0,0 +1,6 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package baggage
+
+var GoUnusedProtection__ int;
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage-consts.go
similarity index 63%
rename from vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/constants.go
rename to vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage-consts.go
index ed35ce9..39b5a7e 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/constants.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage-consts.go
@@ -1,18 +1,23 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
 
 package baggage
 
-import (
+import(
 	"bytes"
+	"context"
 	"fmt"
+	"time"
 	"github.com/uber/jaeger-client-go/thrift"
 )
 
 // (needed to ensure safety because of naive import list construction.)
 var _ = thrift.ZERO
 var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
 var _ = bytes.Equal
 
+
 func init() {
 }
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage.go
new file mode 100644
index 0000000..e4d89d5
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage.go
@@ -0,0 +1,565 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package baggage
+
+import(
+	"bytes"
+	"context"
+	"fmt"
+	"time"
+	"github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+// Attributes:
+//  - BaggageKey
+//  - MaxValueLength
+type BaggageRestriction struct {
+  BaggageKey string `thrift:"baggageKey,1,required" db:"baggageKey" json:"baggageKey"`
+  MaxValueLength int32 `thrift:"maxValueLength,2,required" db:"maxValueLength" json:"maxValueLength"`
+}
+
+func NewBaggageRestriction() *BaggageRestriction {
+  return &BaggageRestriction{}
+}
+
+
+func (p *BaggageRestriction) GetBaggageKey() string {
+  return p.BaggageKey
+}
+
+func (p *BaggageRestriction) GetMaxValueLength() int32 {
+  return p.MaxValueLength
+}
+func (p *BaggageRestriction) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetBaggageKey bool = false;
+  var issetMaxValueLength bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetBaggageKey = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.I32 {
+        if err := p.ReadField2(ctx, iprot); err != nil {
+          return err
+        }
+        issetMaxValueLength = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetBaggageKey{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field BaggageKey is not set"));
+  }
+  if !issetMaxValueLength{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxValueLength is not set"));
+  }
+  return nil
+}
+
+func (p *BaggageRestriction)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.BaggageKey = v
+}
+  return nil
+}
+
+func (p *BaggageRestriction)  ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI32(ctx); err != nil {
+  return thrift.PrependError("error reading field 2: ", err)
+} else {
+  p.MaxValueLength = v
+}
+  return nil
+}
+
+func (p *BaggageRestriction) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "BaggageRestriction"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField2(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *BaggageRestriction) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "baggageKey", thrift.STRING, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:baggageKey: ", p), err) }
+  if err := oprot.WriteString(ctx, string(p.BaggageKey)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.baggageKey (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:baggageKey: ", p), err) }
+  return err
+}
+
+func (p *BaggageRestriction) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "maxValueLength", thrift.I32, 2); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:maxValueLength: ", p), err) }
+  if err := oprot.WriteI32(ctx, int32(p.MaxValueLength)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.maxValueLength (2) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 2:maxValueLength: ", p), err) }
+  return err
+}
+
+func (p *BaggageRestriction) Equals(other *BaggageRestriction) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.BaggageKey != other.BaggageKey { return false }
+  if p.MaxValueLength != other.MaxValueLength { return false }
+  return true
+}
+
+func (p *BaggageRestriction) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("BaggageRestriction(%+v)", *p)
+}
+
+type BaggageRestrictionManager interface {
+  // getBaggageRestrictions retrieves the baggage restrictions for a specific service.
+  // Usually, baggageRestrictions apply to all services however there may be situations
+  // where a baggageKey might only be allowed to be set by a specific service.
+  // 
+  // Parameters:
+  //  - ServiceName
+  GetBaggageRestrictions(ctx context.Context, serviceName string) (_r []*BaggageRestriction, _err error)
+}
+
+type BaggageRestrictionManagerClient struct {
+  c thrift.TClient
+  meta thrift.ResponseMeta
+}
+
+func NewBaggageRestrictionManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *BaggageRestrictionManagerClient {
+  return &BaggageRestrictionManagerClient{
+    c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
+  }
+}
+
+func NewBaggageRestrictionManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *BaggageRestrictionManagerClient {
+  return &BaggageRestrictionManagerClient{
+    c: thrift.NewTStandardClient(iprot, oprot),
+  }
+}
+
+func NewBaggageRestrictionManagerClient(c thrift.TClient) *BaggageRestrictionManagerClient {
+  return &BaggageRestrictionManagerClient{
+    c: c,
+  }
+}
+
+func (p *BaggageRestrictionManagerClient) Client_() thrift.TClient {
+  return p.c
+}
+
+func (p *BaggageRestrictionManagerClient) LastResponseMeta_() thrift.ResponseMeta {
+  return p.meta
+}
+
+func (p *BaggageRestrictionManagerClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
+  p.meta = meta
+}
+
+// getBaggageRestrictions retrieves the baggage restrictions for a specific service.
+// Usually, baggageRestrictions apply to all services however there may be situations
+// where a baggageKey might only be allowed to be set by a specific service.
+// 
+// Parameters:
+//  - ServiceName
+func (p *BaggageRestrictionManagerClient) GetBaggageRestrictions(ctx context.Context, serviceName string) (_r []*BaggageRestriction, _err error) {
+  var _args0 BaggageRestrictionManagerGetBaggageRestrictionsArgs
+  _args0.ServiceName = serviceName
+  var _result2 BaggageRestrictionManagerGetBaggageRestrictionsResult
+  var _meta1 thrift.ResponseMeta
+  _meta1, _err = p.Client_().Call(ctx, "getBaggageRestrictions", &_args0, &_result2)
+  p.SetLastResponseMeta_(_meta1)
+  if _err != nil {
+    return
+  }
+  return _result2.GetSuccess(), nil
+}
+
+type BaggageRestrictionManagerProcessor struct {
+  processorMap map[string]thrift.TProcessorFunction
+  handler BaggageRestrictionManager
+}
+
+func (p *BaggageRestrictionManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+  p.processorMap[key] = processor
+}
+
+func (p *BaggageRestrictionManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+  processor, ok = p.processorMap[key]
+  return processor, ok
+}
+
+func (p *BaggageRestrictionManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+  return p.processorMap
+}
+
+func NewBaggageRestrictionManagerProcessor(handler BaggageRestrictionManager) *BaggageRestrictionManagerProcessor {
+
+  self3 := &BaggageRestrictionManagerProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)}
+  self3.processorMap["getBaggageRestrictions"] = &baggageRestrictionManagerProcessorGetBaggageRestrictions{handler:handler}
+return self3
+}
+
+func (p *BaggageRestrictionManagerProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+  name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
+  if err2 != nil { return false, thrift.WrapTException(err2) }
+  if processor, ok := p.GetProcessorFunction(name); ok {
+    return processor.Process(ctx, seqId, iprot, oprot)
+  }
+  iprot.Skip(ctx, thrift.STRUCT)
+  iprot.ReadMessageEnd(ctx)
+  x4 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name)
+  oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
+  x4.Write(ctx, oprot)
+  oprot.WriteMessageEnd(ctx)
+  oprot.Flush(ctx)
+  return false, x4
+
+}
+
+type baggageRestrictionManagerProcessorGetBaggageRestrictions struct {
+  handler BaggageRestrictionManager
+}
+
+func (p *baggageRestrictionManagerProcessorGetBaggageRestrictions) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+  args := BaggageRestrictionManagerGetBaggageRestrictionsArgs{}
+  var err2 error
+  if err2 = args.Read(ctx, iprot); err2 != nil {
+    iprot.ReadMessageEnd(ctx)
+    x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error())
+    oprot.WriteMessageBegin(ctx, "getBaggageRestrictions", thrift.EXCEPTION, seqId)
+    x.Write(ctx, oprot)
+    oprot.WriteMessageEnd(ctx)
+    oprot.Flush(ctx)
+    return false, thrift.WrapTException(err2)
+  }
+  iprot.ReadMessageEnd(ctx)
+
+  tickerCancel := func() {}
+  // Start a goroutine to do server side connectivity check.
+  if thrift.ServerConnectivityCheckInterval > 0 {
+    var cancel context.CancelFunc
+    ctx, cancel = context.WithCancel(ctx)
+    defer cancel()
+    var tickerCtx context.Context
+    tickerCtx, tickerCancel = context.WithCancel(context.Background())
+    defer tickerCancel()
+    go func(ctx context.Context, cancel context.CancelFunc) {
+      ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval)
+      defer ticker.Stop()
+      for {
+        select {
+        case <-ctx.Done():
+          return
+        case <-ticker.C:
+          if !iprot.Transport().IsOpen() {
+            cancel()
+            return
+          }
+        }
+      }
+    }(tickerCtx, cancel)
+  }
+
+  result := BaggageRestrictionManagerGetBaggageRestrictionsResult{}
+  var retval []*BaggageRestriction
+  if retval, err2 = p.handler.GetBaggageRestrictions(ctx, args.ServiceName); err2 != nil {
+    tickerCancel()
+    if err2 == thrift.ErrAbandonRequest {
+      return false, thrift.WrapTException(err2)
+    }
+    x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getBaggageRestrictions: " + err2.Error())
+    oprot.WriteMessageBegin(ctx, "getBaggageRestrictions", thrift.EXCEPTION, seqId)
+    x.Write(ctx, oprot)
+    oprot.WriteMessageEnd(ctx)
+    oprot.Flush(ctx)
+    return true, thrift.WrapTException(err2)
+  } else {
+    result.Success = retval
+  }
+  tickerCancel()
+  if err2 = oprot.WriteMessageBegin(ctx, "getBaggageRestrictions", thrift.REPLY, seqId); err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err2 = result.Write(ctx, oprot); err == nil && err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err != nil {
+    return
+  }
+  return true, err
+}
+
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+//  - ServiceName
+type BaggageRestrictionManagerGetBaggageRestrictionsArgs struct {
+  ServiceName string `thrift:"serviceName,1" db:"serviceName" json:"serviceName"`
+}
+
+func NewBaggageRestrictionManagerGetBaggageRestrictionsArgs() *BaggageRestrictionManagerGetBaggageRestrictionsArgs {
+  return &BaggageRestrictionManagerGetBaggageRestrictionsArgs{}
+}
+
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) GetServiceName() string {
+  return p.ServiceName
+}
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.ServiceName = v
+}
+  return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "getBaggageRestrictions_args"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) }
+  if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) }
+  return err
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsArgs(%+v)", *p)
+}
+
+// Attributes:
+//  - Success
+type BaggageRestrictionManagerGetBaggageRestrictionsResult struct {
+  Success []*BaggageRestriction `thrift:"success,0" db:"success" json:"success,omitempty"`
+}
+
+func NewBaggageRestrictionManagerGetBaggageRestrictionsResult() *BaggageRestrictionManagerGetBaggageRestrictionsResult {
+  return &BaggageRestrictionManagerGetBaggageRestrictionsResult{}
+}
+
+var BaggageRestrictionManagerGetBaggageRestrictionsResult_Success_DEFAULT []*BaggageRestriction
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) GetSuccess() []*BaggageRestriction {
+  return p.Success
+}
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) IsSetSuccess() bool {
+  return p.Success != nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 0:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField0(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult)  ReadField0(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*BaggageRestriction, 0, size)
+  p.Success =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem5 := &BaggageRestriction{}
+    if err := _elem5.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem5), err)
+    }
+    p.Success = append(p.Success, _elem5)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "getBaggageRestrictions_result"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField0(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetSuccess() {
+    if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) }
+    if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil {
+      return thrift.PrependError("error writing list begin: ", err)
+    }
+    for _, v := range p.Success {
+      if err := v.Write(ctx, oprot); err != nil {
+        return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+      }
+    }
+    if err := oprot.WriteListEnd(ctx); err != nil {
+      return thrift.PrependError("error writing list end: ", err)
+    }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) }
+  }
+  return err
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsResult(%+v)", *p)
+}
+
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggagerestrictionmanager.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggagerestrictionmanager.go
deleted file mode 100644
index 1f79c12..0000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggagerestrictionmanager.go
+++ /dev/null
@@ -1,435 +0,0 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-
-package baggage
-
-import (
-	"bytes"
-	"fmt"
-	"github.com/uber/jaeger-client-go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = bytes.Equal
-
-type BaggageRestrictionManager interface {
-	// getBaggageRestrictions retrieves the baggage restrictions for a specific service.
-	// Usually, baggageRestrictions apply to all services however there may be situations
-	// where a baggageKey might only be allowed to be set by a specific service.
-	//
-	// Parameters:
-	//  - ServiceName
-	GetBaggageRestrictions(serviceName string) (r []*BaggageRestriction, err error)
-}
-
-type BaggageRestrictionManagerClient struct {
-	Transport       thrift.TTransport
-	ProtocolFactory thrift.TProtocolFactory
-	InputProtocol   thrift.TProtocol
-	OutputProtocol  thrift.TProtocol
-	SeqId           int32
-}
-
-func NewBaggageRestrictionManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *BaggageRestrictionManagerClient {
-	return &BaggageRestrictionManagerClient{Transport: t,
-		ProtocolFactory: f,
-		InputProtocol:   f.GetProtocol(t),
-		OutputProtocol:  f.GetProtocol(t),
-		SeqId:           0,
-	}
-}
-
-func NewBaggageRestrictionManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *BaggageRestrictionManagerClient {
-	return &BaggageRestrictionManagerClient{Transport: t,
-		ProtocolFactory: nil,
-		InputProtocol:   iprot,
-		OutputProtocol:  oprot,
-		SeqId:           0,
-	}
-}
-
-// getBaggageRestrictions retrieves the baggage restrictions for a specific service.
-// Usually, baggageRestrictions apply to all services however there may be situations
-// where a baggageKey might only be allowed to be set by a specific service.
-//
-// Parameters:
-//  - ServiceName
-func (p *BaggageRestrictionManagerClient) GetBaggageRestrictions(serviceName string) (r []*BaggageRestriction, err error) {
-	if err = p.sendGetBaggageRestrictions(serviceName); err != nil {
-		return
-	}
-	return p.recvGetBaggageRestrictions()
-}
-
-func (p *BaggageRestrictionManagerClient) sendGetBaggageRestrictions(serviceName string) (err error) {
-	oprot := p.OutputProtocol
-	if oprot == nil {
-		oprot = p.ProtocolFactory.GetProtocol(p.Transport)
-		p.OutputProtocol = oprot
-	}
-	p.SeqId++
-	if err = oprot.WriteMessageBegin("getBaggageRestrictions", thrift.CALL, p.SeqId); err != nil {
-		return
-	}
-	args := BaggageRestrictionManagerGetBaggageRestrictionsArgs{
-		ServiceName: serviceName,
-	}
-	if err = args.Write(oprot); err != nil {
-		return
-	}
-	if err = oprot.WriteMessageEnd(); err != nil {
-		return
-	}
-	return oprot.Flush()
-}
-
-func (p *BaggageRestrictionManagerClient) recvGetBaggageRestrictions() (value []*BaggageRestriction, err error) {
-	iprot := p.InputProtocol
-	if iprot == nil {
-		iprot = p.ProtocolFactory.GetProtocol(p.Transport)
-		p.InputProtocol = iprot
-	}
-	method, mTypeId, seqId, err := iprot.ReadMessageBegin()
-	if err != nil {
-		return
-	}
-	if method != "getBaggageRestrictions" {
-		err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getBaggageRestrictions failed: wrong method name")
-		return
-	}
-	if p.SeqId != seqId {
-		err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getBaggageRestrictions failed: out of sequence response")
-		return
-	}
-	if mTypeId == thrift.EXCEPTION {
-		error0 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
-		var error1 error
-		error1, err = error0.Read(iprot)
-		if err != nil {
-			return
-		}
-		if err = iprot.ReadMessageEnd(); err != nil {
-			return
-		}
-		err = error1
-		return
-	}
-	if mTypeId != thrift.REPLY {
-		err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getBaggageRestrictions failed: invalid message type")
-		return
-	}
-	result := BaggageRestrictionManagerGetBaggageRestrictionsResult{}
-	if err = result.Read(iprot); err != nil {
-		return
-	}
-	if err = iprot.ReadMessageEnd(); err != nil {
-		return
-	}
-	value = result.GetSuccess()
-	return
-}
-
-type BaggageRestrictionManagerProcessor struct {
-	processorMap map[string]thrift.TProcessorFunction
-	handler      BaggageRestrictionManager
-}
-
-func (p *BaggageRestrictionManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
-	p.processorMap[key] = processor
-}
-
-func (p *BaggageRestrictionManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
-	processor, ok = p.processorMap[key]
-	return processor, ok
-}
-
-func (p *BaggageRestrictionManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
-	return p.processorMap
-}
-
-func NewBaggageRestrictionManagerProcessor(handler BaggageRestrictionManager) *BaggageRestrictionManagerProcessor {
-
-	self2 := &BaggageRestrictionManagerProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
-	self2.processorMap["getBaggageRestrictions"] = &baggageRestrictionManagerProcessorGetBaggageRestrictions{handler: handler}
-	return self2
-}
-
-func (p *BaggageRestrictionManagerProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
-	name, _, seqId, err := iprot.ReadMessageBegin()
-	if err != nil {
-		return false, err
-	}
-	if processor, ok := p.GetProcessorFunction(name); ok {
-		return processor.Process(seqId, iprot, oprot)
-	}
-	iprot.Skip(thrift.STRUCT)
-	iprot.ReadMessageEnd()
-	x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
-	oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
-	x3.Write(oprot)
-	oprot.WriteMessageEnd()
-	oprot.Flush()
-	return false, x3
-
-}
-
-type baggageRestrictionManagerProcessorGetBaggageRestrictions struct {
-	handler BaggageRestrictionManager
-}
-
-func (p *baggageRestrictionManagerProcessorGetBaggageRestrictions) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
-	args := BaggageRestrictionManagerGetBaggageRestrictionsArgs{}
-	if err = args.Read(iprot); err != nil {
-		iprot.ReadMessageEnd()
-		x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
-		oprot.WriteMessageBegin("getBaggageRestrictions", thrift.EXCEPTION, seqId)
-		x.Write(oprot)
-		oprot.WriteMessageEnd()
-		oprot.Flush()
-		return false, err
-	}
-
-	iprot.ReadMessageEnd()
-	result := BaggageRestrictionManagerGetBaggageRestrictionsResult{}
-	var retval []*BaggageRestriction
-	var err2 error
-	if retval, err2 = p.handler.GetBaggageRestrictions(args.ServiceName); err2 != nil {
-		x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getBaggageRestrictions: "+err2.Error())
-		oprot.WriteMessageBegin("getBaggageRestrictions", thrift.EXCEPTION, seqId)
-		x.Write(oprot)
-		oprot.WriteMessageEnd()
-		oprot.Flush()
-		return true, err2
-	} else {
-		result.Success = retval
-	}
-	if err2 = oprot.WriteMessageBegin("getBaggageRestrictions", thrift.REPLY, seqId); err2 != nil {
-		err = err2
-	}
-	if err2 = result.Write(oprot); err == nil && err2 != nil {
-		err = err2
-	}
-	if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
-		err = err2
-	}
-	if err2 = oprot.Flush(); err == nil && err2 != nil {
-		err = err2
-	}
-	if err != nil {
-		return
-	}
-	return true, err
-}
-
-// HELPER FUNCTIONS AND STRUCTURES
-
-// Attributes:
-//  - ServiceName
-type BaggageRestrictionManagerGetBaggageRestrictionsArgs struct {
-	ServiceName string `thrift:"serviceName,1" json:"serviceName"`
-}
-
-func NewBaggageRestrictionManagerGetBaggageRestrictionsArgs() *BaggageRestrictionManagerGetBaggageRestrictionsArgs {
-	return &BaggageRestrictionManagerGetBaggageRestrictionsArgs{}
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) GetServiceName() string {
-	return p.ServiceName
-}
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Read(iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if err := p.readField1(iprot); err != nil {
-				return err
-			}
-		default:
-			if err := iprot.Skip(fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	return nil
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) readField1(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadString(); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.ServiceName = v
-	}
-	return nil
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Write(oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin("getBaggageRestrictions_args"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if err := p.writeField1(oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteFieldStop(); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) writeField1(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err)
-	}
-	if err := oprot.WriteString(string(p.ServiceName)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err)
-	}
-	return err
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsArgs(%+v)", *p)
-}
-
-// Attributes:
-//  - Success
-type BaggageRestrictionManagerGetBaggageRestrictionsResult struct {
-	Success []*BaggageRestriction `thrift:"success,0" json:"success,omitempty"`
-}
-
-func NewBaggageRestrictionManagerGetBaggageRestrictionsResult() *BaggageRestrictionManagerGetBaggageRestrictionsResult {
-	return &BaggageRestrictionManagerGetBaggageRestrictionsResult{}
-}
-
-var BaggageRestrictionManagerGetBaggageRestrictionsResult_Success_DEFAULT []*BaggageRestriction
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) GetSuccess() []*BaggageRestriction {
-	return p.Success
-}
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) IsSetSuccess() bool {
-	return p.Success != nil
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Read(iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 0:
-			if err := p.readField0(iprot); err != nil {
-				return err
-			}
-		default:
-			if err := iprot.Skip(fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	return nil
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) readField0(iprot thrift.TProtocol) error {
-	_, size, err := iprot.ReadListBegin()
-	if err != nil {
-		return thrift.PrependError("error reading list begin: ", err)
-	}
-	tSlice := make([]*BaggageRestriction, 0, size)
-	p.Success = tSlice
-	for i := 0; i < size; i++ {
-		_elem4 := &BaggageRestriction{}
-		if err := _elem4.Read(iprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
-		}
-		p.Success = append(p.Success, _elem4)
-	}
-	if err := iprot.ReadListEnd(); err != nil {
-		return thrift.PrependError("error reading list end: ", err)
-	}
-	return nil
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Write(oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin("getBaggageRestrictions_result"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if err := p.writeField0(oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteFieldStop(); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) writeField0(oprot thrift.TProtocol) (err error) {
-	if p.IsSetSuccess() {
-		if err := oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
-		}
-		if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil {
-			return thrift.PrependError("error writing list begin: ", err)
-		}
-		for _, v := range p.Success {
-			if err := v.Write(oprot); err != nil {
-				return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
-			}
-		}
-		if err := oprot.WriteListEnd(); err != nil {
-			return thrift.PrependError("error writing list end: ", err)
-		}
-		if err := oprot.WriteFieldEnd(); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsResult(%+v)", *p)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/ttypes.go
deleted file mode 100644
index 7888892..0000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/ttypes.go
+++ /dev/null
@@ -1,154 +0,0 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-
-package baggage
-
-import (
-	"bytes"
-	"fmt"
-	"github.com/uber/jaeger-client-go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = bytes.Equal
-
-var GoUnusedProtection__ int
-
-// Attributes:
-//  - BaggageKey
-//  - MaxValueLength
-type BaggageRestriction struct {
-	BaggageKey     string `thrift:"baggageKey,1,required" json:"baggageKey"`
-	MaxValueLength int32  `thrift:"maxValueLength,2,required" json:"maxValueLength"`
-}
-
-func NewBaggageRestriction() *BaggageRestriction {
-	return &BaggageRestriction{}
-}
-
-func (p *BaggageRestriction) GetBaggageKey() string {
-	return p.BaggageKey
-}
-
-func (p *BaggageRestriction) GetMaxValueLength() int32 {
-	return p.MaxValueLength
-}
-func (p *BaggageRestriction) Read(iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	var issetBaggageKey bool = false
-	var issetMaxValueLength bool = false
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if err := p.readField1(iprot); err != nil {
-				return err
-			}
-			issetBaggageKey = true
-		case 2:
-			if err := p.readField2(iprot); err != nil {
-				return err
-			}
-			issetMaxValueLength = true
-		default:
-			if err := iprot.Skip(fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	if !issetBaggageKey {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field BaggageKey is not set"))
-	}
-	if !issetMaxValueLength {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxValueLength is not set"))
-	}
-	return nil
-}
-
-func (p *BaggageRestriction) readField1(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadString(); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.BaggageKey = v
-	}
-	return nil
-}
-
-func (p *BaggageRestriction) readField2(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI32(); err != nil {
-		return thrift.PrependError("error reading field 2: ", err)
-	} else {
-		p.MaxValueLength = v
-	}
-	return nil
-}
-
-func (p *BaggageRestriction) Write(oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin("BaggageRestriction"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if err := p.writeField1(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField2(oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteFieldStop(); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *BaggageRestriction) writeField1(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("baggageKey", thrift.STRING, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:baggageKey: ", p), err)
-	}
-	if err := oprot.WriteString(string(p.BaggageKey)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.baggageKey (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:baggageKey: ", p), err)
-	}
-	return err
-}
-
-func (p *BaggageRestriction) writeField2(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("maxValueLength", thrift.I32, 2); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:maxValueLength: ", p), err)
-	}
-	if err := oprot.WriteI32(int32(p.MaxValueLength)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.maxValueLength (2) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:maxValueLength: ", p), err)
-	}
-	return err
-}
-
-func (p *BaggageRestriction) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("BaggageRestriction(%+v)", *p)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/GoUnusedProtection__.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/GoUnusedProtection__.go
new file mode 100644
index 0000000..fe45a9f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/GoUnusedProtection__.go
@@ -0,0 +1,6 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package jaeger
+
+var GoUnusedProtection__ int;
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go
deleted file mode 100644
index b32c37d..0000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go
+++ /dev/null
@@ -1,242 +0,0 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-
-package jaeger
-
-import (
-	"bytes"
-	"fmt"
-	"github.com/uber/jaeger-client-go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = bytes.Equal
-
-type Agent interface {
-	// Parameters:
-	//  - Batch
-	EmitBatch(batch *Batch) (err error)
-}
-
-type AgentClient struct {
-	Transport       thrift.TTransport
-	ProtocolFactory thrift.TProtocolFactory
-	InputProtocol   thrift.TProtocol
-	OutputProtocol  thrift.TProtocol
-	SeqId           int32
-}
-
-func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient {
-	return &AgentClient{Transport: t,
-		ProtocolFactory: f,
-		InputProtocol:   f.GetProtocol(t),
-		OutputProtocol:  f.GetProtocol(t),
-		SeqId:           0,
-	}
-}
-
-func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient {
-	return &AgentClient{Transport: t,
-		ProtocolFactory: nil,
-		InputProtocol:   iprot,
-		OutputProtocol:  oprot,
-		SeqId:           0,
-	}
-}
-
-// Parameters:
-//  - Batch
-func (p *AgentClient) EmitBatch(batch *Batch) (err error) {
-	if err = p.sendEmitBatch(batch); err != nil {
-		return
-	}
-	return
-}
-
-func (p *AgentClient) sendEmitBatch(batch *Batch) (err error) {
-	oprot := p.OutputProtocol
-	if oprot == nil {
-		oprot = p.ProtocolFactory.GetProtocol(p.Transport)
-		p.OutputProtocol = oprot
-	}
-	p.SeqId++
-	if err = oprot.WriteMessageBegin("emitBatch", thrift.ONEWAY, p.SeqId); err != nil {
-		return
-	}
-	args := AgentEmitBatchArgs{
-		Batch: batch,
-	}
-	if err = args.Write(oprot); err != nil {
-		return
-	}
-	if err = oprot.WriteMessageEnd(); err != nil {
-		return
-	}
-	return oprot.Flush()
-}
-
-type AgentProcessor struct {
-	processorMap map[string]thrift.TProcessorFunction
-	handler      Agent
-}
-
-func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
-	p.processorMap[key] = processor
-}
-
-func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
-	processor, ok = p.processorMap[key]
-	return processor, ok
-}
-
-func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
-	return p.processorMap
-}
-
-func NewAgentProcessor(handler Agent) *AgentProcessor {
-
-	self6 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
-	self6.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler}
-	return self6
-}
-
-func (p *AgentProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
-	name, _, seqId, err := iprot.ReadMessageBegin()
-	if err != nil {
-		return false, err
-	}
-	if processor, ok := p.GetProcessorFunction(name); ok {
-		return processor.Process(seqId, iprot, oprot)
-	}
-	iprot.Skip(thrift.STRUCT)
-	iprot.ReadMessageEnd()
-	x7 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
-	oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
-	x7.Write(oprot)
-	oprot.WriteMessageEnd()
-	oprot.Flush()
-	return false, x7
-
-}
-
-type agentProcessorEmitBatch struct {
-	handler Agent
-}
-
-func (p *agentProcessorEmitBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
-	args := AgentEmitBatchArgs{}
-	if err = args.Read(iprot); err != nil {
-		iprot.ReadMessageEnd()
-		return false, err
-	}
-
-	iprot.ReadMessageEnd()
-	var err2 error
-	if err2 = p.handler.EmitBatch(args.Batch); err2 != nil {
-		return true, err2
-	}
-	return true, nil
-}
-
-// HELPER FUNCTIONS AND STRUCTURES
-
-// Attributes:
-//  - Batch
-type AgentEmitBatchArgs struct {
-	Batch *Batch `thrift:"batch,1" json:"batch"`
-}
-
-func NewAgentEmitBatchArgs() *AgentEmitBatchArgs {
-	return &AgentEmitBatchArgs{}
-}
-
-var AgentEmitBatchArgs_Batch_DEFAULT *Batch
-
-func (p *AgentEmitBatchArgs) GetBatch() *Batch {
-	if !p.IsSetBatch() {
-		return AgentEmitBatchArgs_Batch_DEFAULT
-	}
-	return p.Batch
-}
-func (p *AgentEmitBatchArgs) IsSetBatch() bool {
-	return p.Batch != nil
-}
-
-func (p *AgentEmitBatchArgs) Read(iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if err := p.readField1(iprot); err != nil {
-				return err
-			}
-		default:
-			if err := iprot.Skip(fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	return nil
-}
-
-func (p *AgentEmitBatchArgs) readField1(iprot thrift.TProtocol) error {
-	p.Batch = &Batch{}
-	if err := p.Batch.Read(iprot); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err)
-	}
-	return nil
-}
-
-func (p *AgentEmitBatchArgs) Write(oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin("emitBatch_args"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if err := p.writeField1(oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteFieldStop(); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *AgentEmitBatchArgs) writeField1(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("batch", thrift.STRUCT, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err)
-	}
-	if err := p.Batch.Write(oprot); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err)
-	}
-	return err
-}
-
-func (p *AgentEmitBatchArgs) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger-consts.go
similarity index 63%
rename from vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/constants.go
rename to vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger-consts.go
index 621b8b1..b6ce855 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/constants.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger-consts.go
@@ -1,18 +1,23 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
 
 package jaeger
 
-import (
+import(
 	"bytes"
+	"context"
 	"fmt"
+	"time"
 	"github.com/uber/jaeger-client-go/thrift"
 )
 
 // (needed to ensure safety because of naive import list construction.)
 var _ = thrift.ZERO
 var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
 var _ = bytes.Equal
 
+
 func init() {
 }
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger.go
new file mode 100644
index 0000000..d55cca0
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger.go
@@ -0,0 +1,2698 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package jaeger
+
+import(
+	"bytes"
+	"context"
+	"database/sql/driver"
+	"errors"
+	"fmt"
+	"time"
+	"github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+type TagType int64
+const (
+  TagType_STRING TagType = 0
+  TagType_DOUBLE TagType = 1
+  TagType_BOOL TagType = 2
+  TagType_LONG TagType = 3
+  TagType_BINARY TagType = 4
+)
+
+func (p TagType) String() string {
+  switch p {
+  case TagType_STRING: return "STRING"
+  case TagType_DOUBLE: return "DOUBLE"
+  case TagType_BOOL: return "BOOL"
+  case TagType_LONG: return "LONG"
+  case TagType_BINARY: return "BINARY"
+  }
+  return "<UNSET>"
+}
+
+func TagTypeFromString(s string) (TagType, error) {
+  switch s {
+  case "STRING": return TagType_STRING, nil 
+  case "DOUBLE": return TagType_DOUBLE, nil 
+  case "BOOL": return TagType_BOOL, nil 
+  case "LONG": return TagType_LONG, nil 
+  case "BINARY": return TagType_BINARY, nil 
+  }
+  return TagType(0), fmt.Errorf("not a valid TagType string")
+}
+
+
+func TagTypePtr(v TagType) *TagType { return &v }
+
+func (p TagType) MarshalText() ([]byte, error) {
+return []byte(p.String()), nil
+}
+
+func (p *TagType) UnmarshalText(text []byte) error {
+q, err := TagTypeFromString(string(text))
+if (err != nil) {
+return err
+}
+*p = q
+return nil
+}
+
+func (p *TagType) Scan(value interface{}) error {
+v, ok := value.(int64)
+if !ok {
+return errors.New("Scan value is not int64")
+}
+*p = TagType(v)
+return nil
+}
+
+func (p * TagType) Value() (driver.Value, error) {
+  if p == nil {
+    return nil, nil
+  }
+return int64(*p), nil
+}
+type SpanRefType int64
+const (
+  SpanRefType_CHILD_OF SpanRefType = 0
+  SpanRefType_FOLLOWS_FROM SpanRefType = 1
+)
+
+func (p SpanRefType) String() string {
+  switch p {
+  case SpanRefType_CHILD_OF: return "CHILD_OF"
+  case SpanRefType_FOLLOWS_FROM: return "FOLLOWS_FROM"
+  }
+  return "<UNSET>"
+}
+
+func SpanRefTypeFromString(s string) (SpanRefType, error) {
+  switch s {
+  case "CHILD_OF": return SpanRefType_CHILD_OF, nil 
+  case "FOLLOWS_FROM": return SpanRefType_FOLLOWS_FROM, nil 
+  }
+  return SpanRefType(0), fmt.Errorf("not a valid SpanRefType string")
+}
+
+
+func SpanRefTypePtr(v SpanRefType) *SpanRefType { return &v }
+
+func (p SpanRefType) MarshalText() ([]byte, error) {
+return []byte(p.String()), nil
+}
+
+func (p *SpanRefType) UnmarshalText(text []byte) error {
+q, err := SpanRefTypeFromString(string(text))
+if (err != nil) {
+return err
+}
+*p = q
+return nil
+}
+
+func (p *SpanRefType) Scan(value interface{}) error {
+v, ok := value.(int64)
+if !ok {
+return errors.New("Scan value is not int64")
+}
+*p = SpanRefType(v)
+return nil
+}
+
+func (p * SpanRefType) Value() (driver.Value, error) {
+  if p == nil {
+    return nil, nil
+  }
+return int64(*p), nil
+}
+// Attributes:
+//  - Key
+//  - VType
+//  - VStr
+//  - VDouble
+//  - VBool
+//  - VLong
+//  - VBinary
+type Tag struct {
+  Key string `thrift:"key,1,required" db:"key" json:"key"`
+  VType TagType `thrift:"vType,2,required" db:"vType" json:"vType"`
+  VStr *string `thrift:"vStr,3" db:"vStr" json:"vStr,omitempty"`
+  VDouble *float64 `thrift:"vDouble,4" db:"vDouble" json:"vDouble,omitempty"`
+  VBool *bool `thrift:"vBool,5" db:"vBool" json:"vBool,omitempty"`
+  VLong *int64 `thrift:"vLong,6" db:"vLong" json:"vLong,omitempty"`
+  VBinary []byte `thrift:"vBinary,7" db:"vBinary" json:"vBinary,omitempty"`
+}
+
+func NewTag() *Tag {
+  return &Tag{}
+}
+
+
+func (p *Tag) GetKey() string {
+  return p.Key
+}
+
+func (p *Tag) GetVType() TagType {
+  return p.VType
+}
+var Tag_VStr_DEFAULT string
+func (p *Tag) GetVStr() string {
+  if !p.IsSetVStr() {
+    return Tag_VStr_DEFAULT
+  }
+return *p.VStr
+}
+var Tag_VDouble_DEFAULT float64
+func (p *Tag) GetVDouble() float64 {
+  if !p.IsSetVDouble() {
+    return Tag_VDouble_DEFAULT
+  }
+return *p.VDouble
+}
+var Tag_VBool_DEFAULT bool
+func (p *Tag) GetVBool() bool {
+  if !p.IsSetVBool() {
+    return Tag_VBool_DEFAULT
+  }
+return *p.VBool
+}
+var Tag_VLong_DEFAULT int64
+func (p *Tag) GetVLong() int64 {
+  if !p.IsSetVLong() {
+    return Tag_VLong_DEFAULT
+  }
+return *p.VLong
+}
+var Tag_VBinary_DEFAULT []byte
+
+func (p *Tag) GetVBinary() []byte {
+  return p.VBinary
+}
+func (p *Tag) IsSetVStr() bool {
+  return p.VStr != nil
+}
+
+func (p *Tag) IsSetVDouble() bool {
+  return p.VDouble != nil
+}
+
+func (p *Tag) IsSetVBool() bool {
+  return p.VBool != nil
+}
+
+func (p *Tag) IsSetVLong() bool {
+  return p.VLong != nil
+}
+
+func (p *Tag) IsSetVBinary() bool {
+  return p.VBinary != nil
+}
+
+func (p *Tag) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetKey bool = false;
+  var issetVType bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetKey = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.I32 {
+        if err := p.ReadField2(ctx, iprot); err != nil {
+          return err
+        }
+        issetVType = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 3:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField3(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 4:
+      if fieldTypeId == thrift.DOUBLE {
+        if err := p.ReadField4(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 5:
+      if fieldTypeId == thrift.BOOL {
+        if err := p.ReadField5(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 6:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField6(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 7:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField7(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetKey{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Key is not set"));
+  }
+  if !issetVType{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field VType is not set"));
+  }
+  return nil
+}
+
+func (p *Tag)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.Key = v
+}
+  return nil
+}
+
+func (p *Tag)  ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI32(ctx); err != nil {
+  return thrift.PrependError("error reading field 2: ", err)
+} else {
+  temp := TagType(v)
+  p.VType = temp
+}
+  return nil
+}
+
+func (p *Tag)  ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(ctx); err != nil {
+  return thrift.PrependError("error reading field 3: ", err)
+} else {
+  p.VStr = &v
+}
+  return nil
+}
+
+func (p *Tag)  ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadDouble(ctx); err != nil {
+  return thrift.PrependError("error reading field 4: ", err)
+} else {
+  p.VDouble = &v
+}
+  return nil
+}
+
+func (p *Tag)  ReadField5(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadBool(ctx); err != nil {
+  return thrift.PrependError("error reading field 5: ", err)
+} else {
+  p.VBool = &v
+}
+  return nil
+}
+
+func (p *Tag)  ReadField6(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 6: ", err)
+} else {
+  p.VLong = &v
+}
+  return nil
+}
+
+func (p *Tag)  ReadField7(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadBinary(ctx); err != nil {
+  return thrift.PrependError("error reading field 7: ", err)
+} else {
+  p.VBinary = v
+}
+  return nil
+}
+
+func (p *Tag) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "Tag"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField2(ctx, oprot); err != nil { return err }
+    if err := p.writeField3(ctx, oprot); err != nil { return err }
+    if err := p.writeField4(ctx, oprot); err != nil { return err }
+    if err := p.writeField5(ctx, oprot); err != nil { return err }
+    if err := p.writeField6(ctx, oprot); err != nil { return err }
+    if err := p.writeField7(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *Tag) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) }
+  if err := oprot.WriteString(ctx, string(p.Key)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) }
+  return err
+}
+
+func (p *Tag) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "vType", thrift.I32, 2); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:vType: ", p), err) }
+  if err := oprot.WriteI32(ctx, int32(p.VType)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.vType (2) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 2:vType: ", p), err) }
+  return err
+}
+
+func (p *Tag) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetVStr() {
+    if err := oprot.WriteFieldBegin(ctx, "vStr", thrift.STRING, 3); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:vStr: ", p), err) }
+    if err := oprot.WriteString(ctx, string(*p.VStr)); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.vStr (3) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 3:vStr: ", p), err) }
+  }
+  return err
+}
+
+func (p *Tag) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetVDouble() {
+    if err := oprot.WriteFieldBegin(ctx, "vDouble", thrift.DOUBLE, 4); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:vDouble: ", p), err) }
+    if err := oprot.WriteDouble(ctx, float64(*p.VDouble)); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.vDouble (4) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 4:vDouble: ", p), err) }
+  }
+  return err
+}
+
+func (p *Tag) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetVBool() {
+    if err := oprot.WriteFieldBegin(ctx, "vBool", thrift.BOOL, 5); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:vBool: ", p), err) }
+    if err := oprot.WriteBool(ctx, bool(*p.VBool)); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.vBool (5) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 5:vBool: ", p), err) }
+  }
+  return err
+}
+
+func (p *Tag) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetVLong() {
+    if err := oprot.WriteFieldBegin(ctx, "vLong", thrift.I64, 6); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:vLong: ", p), err) }
+    if err := oprot.WriteI64(ctx, int64(*p.VLong)); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.vLong (6) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 6:vLong: ", p), err) }
+  }
+  return err
+}
+
+func (p *Tag) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetVBinary() {
+    if err := oprot.WriteFieldBegin(ctx, "vBinary", thrift.STRING, 7); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:vBinary: ", p), err) }
+    if err := oprot.WriteBinary(ctx, p.VBinary); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.vBinary (7) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 7:vBinary: ", p), err) }
+  }
+  return err
+}
+
+func (p *Tag) Equals(other *Tag) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.Key != other.Key { return false }
+  if p.VType != other.VType { return false }
+  if p.VStr != other.VStr {
+    if p.VStr == nil || other.VStr == nil {
+      return false
+    }
+    if (*p.VStr) != (*other.VStr) { return false }
+  }
+  if p.VDouble != other.VDouble {
+    if p.VDouble == nil || other.VDouble == nil {
+      return false
+    }
+    if (*p.VDouble) != (*other.VDouble) { return false }
+  }
+  if p.VBool != other.VBool {
+    if p.VBool == nil || other.VBool == nil {
+      return false
+    }
+    if (*p.VBool) != (*other.VBool) { return false }
+  }
+  if p.VLong != other.VLong {
+    if p.VLong == nil || other.VLong == nil {
+      return false
+    }
+    if (*p.VLong) != (*other.VLong) { return false }
+  }
+  if bytes.Compare(p.VBinary, other.VBinary) != 0 { return false }
+  return true
+}
+
+func (p *Tag) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("Tag(%+v)", *p)
+}
+
+// Attributes:
+//  - Timestamp
+//  - Fields
+type Log struct {
+  Timestamp int64 `thrift:"timestamp,1,required" db:"timestamp" json:"timestamp"`
+  Fields []*Tag `thrift:"fields,2,required" db:"fields" json:"fields"`
+}
+
+func NewLog() *Log {
+  return &Log{}
+}
+
+
+func (p *Log) GetTimestamp() int64 {
+  return p.Timestamp
+}
+
+func (p *Log) GetFields() []*Tag {
+  return p.Fields
+}
+func (p *Log) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetTimestamp bool = false;
+  var issetFields bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetTimestamp = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField2(ctx, iprot); err != nil {
+          return err
+        }
+        issetFields = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetTimestamp{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Timestamp is not set"));
+  }
+  if !issetFields{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Fields is not set"));
+  }
+  return nil
+}
+
+func (p *Log)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.Timestamp = v
+}
+  return nil
+}
+
+func (p *Log)  ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*Tag, 0, size)
+  p.Fields =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem0 := &Tag{}
+    if err := _elem0.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
+    }
+    p.Fields = append(p.Fields, _elem0)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *Log) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "Log"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField2(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *Log) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) }
+  return err
+}
+
+func (p *Log) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "fields", thrift.LIST, 2); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:fields: ", p), err) }
+  if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Fields)); err != nil {
+    return thrift.PrependError("error writing list begin: ", err)
+  }
+  for _, v := range p.Fields {
+    if err := v.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+    }
+  }
+  if err := oprot.WriteListEnd(ctx); err != nil {
+    return thrift.PrependError("error writing list end: ", err)
+  }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 2:fields: ", p), err) }
+  return err
+}
+
+func (p *Log) Equals(other *Log) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.Timestamp != other.Timestamp { return false }
+  if len(p.Fields) != len(other.Fields) { return false }
+  for i, _tgt := range p.Fields {
+    _src1 := other.Fields[i]
+    if !_tgt.Equals(_src1) { return false }
+  }
+  return true
+}
+
+func (p *Log) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("Log(%+v)", *p)
+}
+
+// Attributes:
+//  - RefType
+//  - TraceIdLow
+//  - TraceIdHigh
+//  - SpanId
+type SpanRef struct {
+  RefType SpanRefType `thrift:"refType,1,required" db:"refType" json:"refType"`
+  TraceIdLow int64 `thrift:"traceIdLow,2,required" db:"traceIdLow" json:"traceIdLow"`
+  TraceIdHigh int64 `thrift:"traceIdHigh,3,required" db:"traceIdHigh" json:"traceIdHigh"`
+  SpanId int64 `thrift:"spanId,4,required" db:"spanId" json:"spanId"`
+}
+
+func NewSpanRef() *SpanRef {
+  return &SpanRef{}
+}
+
+
+func (p *SpanRef) GetRefType() SpanRefType {
+  return p.RefType
+}
+
+func (p *SpanRef) GetTraceIdLow() int64 {
+  return p.TraceIdLow
+}
+
+func (p *SpanRef) GetTraceIdHigh() int64 {
+  return p.TraceIdHigh
+}
+
+func (p *SpanRef) GetSpanId() int64 {
+  return p.SpanId
+}
+func (p *SpanRef) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetRefType bool = false;
+  var issetTraceIdLow bool = false;
+  var issetTraceIdHigh bool = false;
+  var issetSpanId bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.I32 {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetRefType = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField2(ctx, iprot); err != nil {
+          return err
+        }
+        issetTraceIdLow = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 3:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField3(ctx, iprot); err != nil {
+          return err
+        }
+        issetTraceIdHigh = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 4:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField4(ctx, iprot); err != nil {
+          return err
+        }
+        issetSpanId = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetRefType{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RefType is not set"));
+  }
+  if !issetTraceIdLow{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set"));
+  }
+  if !issetTraceIdHigh{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set"));
+  }
+  if !issetSpanId{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set"));
+  }
+  return nil
+}
+
+func (p *SpanRef)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI32(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  temp := SpanRefType(v)
+  p.RefType = temp
+}
+  return nil
+}
+
+func (p *SpanRef)  ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 2: ", err)
+} else {
+  p.TraceIdLow = v
+}
+  return nil
+}
+
+func (p *SpanRef)  ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 3: ", err)
+} else {
+  p.TraceIdHigh = v
+}
+  return nil
+}
+
+func (p *SpanRef)  ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 4: ", err)
+} else {
+  p.SpanId = v
+}
+  return nil
+}
+
+func (p *SpanRef) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "SpanRef"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField2(ctx, oprot); err != nil { return err }
+    if err := p.writeField3(ctx, oprot); err != nil { return err }
+    if err := p.writeField4(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *SpanRef) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "refType", thrift.I32, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:refType: ", p), err) }
+  if err := oprot.WriteI32(ctx, int32(p.RefType)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.refType (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:refType: ", p), err) }
+  return err
+}
+
+func (p *SpanRef) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 2); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdLow: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (2) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdLow: ", p), err) }
+  return err
+}
+
+func (p *SpanRef) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 3); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:traceIdHigh: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (3) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 3:traceIdHigh: ", p), err) }
+  return err
+}
+
+func (p *SpanRef) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 4); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:spanId: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.spanId (4) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 4:spanId: ", p), err) }
+  return err
+}
+
+func (p *SpanRef) Equals(other *SpanRef) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.RefType != other.RefType { return false }
+  if p.TraceIdLow != other.TraceIdLow { return false }
+  if p.TraceIdHigh != other.TraceIdHigh { return false }
+  if p.SpanId != other.SpanId { return false }
+  return true
+}
+
+func (p *SpanRef) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("SpanRef(%+v)", *p)
+}
+
+// Attributes:
+//  - TraceIdLow
+//  - TraceIdHigh
+//  - SpanId
+//  - ParentSpanId
+//  - OperationName
+//  - References
+//  - Flags
+//  - StartTime
+//  - Duration
+//  - Tags
+//  - Logs
+type Span struct {
+  TraceIdLow int64 `thrift:"traceIdLow,1,required" db:"traceIdLow" json:"traceIdLow"`
+  TraceIdHigh int64 `thrift:"traceIdHigh,2,required" db:"traceIdHigh" json:"traceIdHigh"`
+  SpanId int64 `thrift:"spanId,3,required" db:"spanId" json:"spanId"`
+  ParentSpanId int64 `thrift:"parentSpanId,4,required" db:"parentSpanId" json:"parentSpanId"`
+  OperationName string `thrift:"operationName,5,required" db:"operationName" json:"operationName"`
+  References []*SpanRef `thrift:"references,6" db:"references" json:"references,omitempty"`
+  Flags int32 `thrift:"flags,7,required" db:"flags" json:"flags"`
+  StartTime int64 `thrift:"startTime,8,required" db:"startTime" json:"startTime"`
+  Duration int64 `thrift:"duration,9,required" db:"duration" json:"duration"`
+  Tags []*Tag `thrift:"tags,10" db:"tags" json:"tags,omitempty"`
+  Logs []*Log `thrift:"logs,11" db:"logs" json:"logs,omitempty"`
+}
+
+func NewSpan() *Span {
+  return &Span{}
+}
+
+
+func (p *Span) GetTraceIdLow() int64 {
+  return p.TraceIdLow
+}
+
+func (p *Span) GetTraceIdHigh() int64 {
+  return p.TraceIdHigh
+}
+
+func (p *Span) GetSpanId() int64 {
+  return p.SpanId
+}
+
+func (p *Span) GetParentSpanId() int64 {
+  return p.ParentSpanId
+}
+
+func (p *Span) GetOperationName() string {
+  return p.OperationName
+}
+var Span_References_DEFAULT []*SpanRef
+
+func (p *Span) GetReferences() []*SpanRef {
+  return p.References
+}
+
+func (p *Span) GetFlags() int32 {
+  return p.Flags
+}
+
+func (p *Span) GetStartTime() int64 {
+  return p.StartTime
+}
+
+func (p *Span) GetDuration() int64 {
+  return p.Duration
+}
+var Span_Tags_DEFAULT []*Tag
+
+func (p *Span) GetTags() []*Tag {
+  return p.Tags
+}
+var Span_Logs_DEFAULT []*Log
+
+func (p *Span) GetLogs() []*Log {
+  return p.Logs
+}
+func (p *Span) IsSetReferences() bool {
+  return p.References != nil
+}
+
+func (p *Span) IsSetTags() bool {
+  return p.Tags != nil
+}
+
+func (p *Span) IsSetLogs() bool {
+  return p.Logs != nil
+}
+
+func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetTraceIdLow bool = false;
+  var issetTraceIdHigh bool = false;
+  var issetSpanId bool = false;
+  var issetParentSpanId bool = false;
+  var issetOperationName bool = false;
+  var issetFlags bool = false;
+  var issetStartTime bool = false;
+  var issetDuration bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetTraceIdLow = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField2(ctx, iprot); err != nil {
+          return err
+        }
+        issetTraceIdHigh = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 3:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField3(ctx, iprot); err != nil {
+          return err
+        }
+        issetSpanId = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 4:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField4(ctx, iprot); err != nil {
+          return err
+        }
+        issetParentSpanId = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 5:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField5(ctx, iprot); err != nil {
+          return err
+        }
+        issetOperationName = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 6:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField6(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 7:
+      if fieldTypeId == thrift.I32 {
+        if err := p.ReadField7(ctx, iprot); err != nil {
+          return err
+        }
+        issetFlags = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 8:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField8(ctx, iprot); err != nil {
+          return err
+        }
+        issetStartTime = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 9:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField9(ctx, iprot); err != nil {
+          return err
+        }
+        issetDuration = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 10:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField10(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 11:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField11(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetTraceIdLow{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set"));
+  }
+  if !issetTraceIdHigh{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set"));
+  }
+  if !issetSpanId{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set"));
+  }
+  if !issetParentSpanId{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ParentSpanId is not set"));
+  }
+  if !issetOperationName{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationName is not set"));
+  }
+  if !issetFlags{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Flags is not set"));
+  }
+  if !issetStartTime{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StartTime is not set"));
+  }
+  if !issetDuration{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Duration is not set"));
+  }
+  return nil
+}
+
+func (p *Span)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.TraceIdLow = v
+}
+  return nil
+}
+
+func (p *Span)  ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 2: ", err)
+} else {
+  p.TraceIdHigh = v
+}
+  return nil
+}
+
+func (p *Span)  ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 3: ", err)
+} else {
+  p.SpanId = v
+}
+  return nil
+}
+
+func (p *Span)  ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 4: ", err)
+} else {
+  p.ParentSpanId = v
+}
+  return nil
+}
+
+func (p *Span)  ReadField5(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(ctx); err != nil {
+  return thrift.PrependError("error reading field 5: ", err)
+} else {
+  p.OperationName = v
+}
+  return nil
+}
+
+func (p *Span)  ReadField6(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*SpanRef, 0, size)
+  p.References =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem2 := &SpanRef{}
+    if err := _elem2.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err)
+    }
+    p.References = append(p.References, _elem2)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *Span)  ReadField7(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI32(ctx); err != nil {
+  return thrift.PrependError("error reading field 7: ", err)
+} else {
+  p.Flags = v
+}
+  return nil
+}
+
+func (p *Span)  ReadField8(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 8: ", err)
+} else {
+  p.StartTime = v
+}
+  return nil
+}
+
+func (p *Span)  ReadField9(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 9: ", err)
+} else {
+  p.Duration = v
+}
+  return nil
+}
+
+func (p *Span)  ReadField10(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*Tag, 0, size)
+  p.Tags =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem3 := &Tag{}
+    if err := _elem3.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err)
+    }
+    p.Tags = append(p.Tags, _elem3)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *Span)  ReadField11(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*Log, 0, size)
+  p.Logs =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem4 := &Log{}
+    if err := _elem4.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
+    }
+    p.Logs = append(p.Logs, _elem4)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "Span"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField2(ctx, oprot); err != nil { return err }
+    if err := p.writeField3(ctx, oprot); err != nil { return err }
+    if err := p.writeField4(ctx, oprot); err != nil { return err }
+    if err := p.writeField5(ctx, oprot); err != nil { return err }
+    if err := p.writeField6(ctx, oprot); err != nil { return err }
+    if err := p.writeField7(ctx, oprot); err != nil { return err }
+    if err := p.writeField8(ctx, oprot); err != nil { return err }
+    if err := p.writeField9(ctx, oprot); err != nil { return err }
+    if err := p.writeField10(ctx, oprot); err != nil { return err }
+    if err := p.writeField11(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:traceIdLow: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:traceIdLow: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 2); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdHigh: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (2) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdHigh: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 3); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:spanId: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.spanId (3) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 3:spanId: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "parentSpanId", thrift.I64, 4); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:parentSpanId: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.ParentSpanId)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.parentSpanId (4) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 4:parentSpanId: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "operationName", thrift.STRING, 5); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:operationName: ", p), err) }
+  if err := oprot.WriteString(ctx, string(p.OperationName)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.operationName (5) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 5:operationName: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetReferences() {
+    if err := oprot.WriteFieldBegin(ctx, "references", thrift.LIST, 6); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:references: ", p), err) }
+    if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.References)); err != nil {
+      return thrift.PrependError("error writing list begin: ", err)
+    }
+    for _, v := range p.References {
+      if err := v.Write(ctx, oprot); err != nil {
+        return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+      }
+    }
+    if err := oprot.WriteListEnd(ctx); err != nil {
+      return thrift.PrependError("error writing list end: ", err)
+    }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 6:references: ", p), err) }
+  }
+  return err
+}
+
+func (p *Span) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "flags", thrift.I32, 7); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:flags: ", p), err) }
+  if err := oprot.WriteI32(ctx, int32(p.Flags)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.flags (7) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 7:flags: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "startTime", thrift.I64, 8); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:startTime: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.StartTime)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.startTime (8) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 8:startTime: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 9); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:duration: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.Duration)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.duration (9) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 9:duration: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetTags() {
+    if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 10); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:tags: ", p), err) }
+    if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil {
+      return thrift.PrependError("error writing list begin: ", err)
+    }
+    for _, v := range p.Tags {
+      if err := v.Write(ctx, oprot); err != nil {
+        return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+      }
+    }
+    if err := oprot.WriteListEnd(ctx); err != nil {
+      return thrift.PrependError("error writing list end: ", err)
+    }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 10:tags: ", p), err) }
+  }
+  return err
+}
+
+func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetLogs() {
+    if err := oprot.WriteFieldBegin(ctx, "logs", thrift.LIST, 11); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:logs: ", p), err) }
+    if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Logs)); err != nil {
+      return thrift.PrependError("error writing list begin: ", err)
+    }
+    for _, v := range p.Logs {
+      if err := v.Write(ctx, oprot); err != nil {
+        return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+      }
+    }
+    if err := oprot.WriteListEnd(ctx); err != nil {
+      return thrift.PrependError("error writing list end: ", err)
+    }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 11:logs: ", p), err) }
+  }
+  return err
+}
+
+func (p *Span) Equals(other *Span) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.TraceIdLow != other.TraceIdLow { return false }
+  if p.TraceIdHigh != other.TraceIdHigh { return false }
+  if p.SpanId != other.SpanId { return false }
+  if p.ParentSpanId != other.ParentSpanId { return false }
+  if p.OperationName != other.OperationName { return false }
+  if len(p.References) != len(other.References) { return false }
+  for i, _tgt := range p.References {
+    _src5 := other.References[i]
+    if !_tgt.Equals(_src5) { return false }
+  }
+  if p.Flags != other.Flags { return false }
+  if p.StartTime != other.StartTime { return false }
+  if p.Duration != other.Duration { return false }
+  if len(p.Tags) != len(other.Tags) { return false }
+  for i, _tgt := range p.Tags {
+    _src6 := other.Tags[i]
+    if !_tgt.Equals(_src6) { return false }
+  }
+  if len(p.Logs) != len(other.Logs) { return false }
+  for i, _tgt := range p.Logs {
+    _src7 := other.Logs[i]
+    if !_tgt.Equals(_src7) { return false }
+  }
+  return true
+}
+
+func (p *Span) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("Span(%+v)", *p)
+}
+
+// Attributes:
+//  - ServiceName
+//  - Tags
+type Process struct {
+  ServiceName string `thrift:"serviceName,1,required" db:"serviceName" json:"serviceName"`
+  Tags []*Tag `thrift:"tags,2" db:"tags" json:"tags,omitempty"`
+}
+
+func NewProcess() *Process {
+  return &Process{}
+}
+
+
+func (p *Process) GetServiceName() string {
+  return p.ServiceName
+}
+var Process_Tags_DEFAULT []*Tag
+
+func (p *Process) GetTags() []*Tag {
+  return p.Tags
+}
+func (p *Process) IsSetTags() bool {
+  return p.Tags != nil
+}
+
+func (p *Process) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetServiceName bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetServiceName = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField2(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetServiceName{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ServiceName is not set"));
+  }
+  return nil
+}
+
+func (p *Process)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.ServiceName = v
+}
+  return nil
+}
+
+func (p *Process)  ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*Tag, 0, size)
+  p.Tags =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem8 := &Tag{}
+    if err := _elem8.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem8), err)
+    }
+    p.Tags = append(p.Tags, _elem8)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *Process) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "Process"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField2(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *Process) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) }
+  if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) }
+  return err
+}
+
+func (p *Process) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetTags() {
+    if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 2); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tags: ", p), err) }
+    if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil {
+      return thrift.PrependError("error writing list begin: ", err)
+    }
+    for _, v := range p.Tags {
+      if err := v.Write(ctx, oprot); err != nil {
+        return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+      }
+    }
+    if err := oprot.WriteListEnd(ctx); err != nil {
+      return thrift.PrependError("error writing list end: ", err)
+    }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tags: ", p), err) }
+  }
+  return err
+}
+
+func (p *Process) Equals(other *Process) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.ServiceName != other.ServiceName { return false }
+  if len(p.Tags) != len(other.Tags) { return false }
+  for i, _tgt := range p.Tags {
+    _src9 := other.Tags[i]
+    if !_tgt.Equals(_src9) { return false }
+  }
+  return true
+}
+
+func (p *Process) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("Process(%+v)", *p)
+}
+
+// Attributes:
+//  - FullQueueDroppedSpans
+//  - TooLargeDroppedSpans
+//  - FailedToEmitSpans
+type ClientStats struct {
+  FullQueueDroppedSpans int64 `thrift:"fullQueueDroppedSpans,1,required" db:"fullQueueDroppedSpans" json:"fullQueueDroppedSpans"`
+  TooLargeDroppedSpans int64 `thrift:"tooLargeDroppedSpans,2,required" db:"tooLargeDroppedSpans" json:"tooLargeDroppedSpans"`
+  FailedToEmitSpans int64 `thrift:"failedToEmitSpans,3,required" db:"failedToEmitSpans" json:"failedToEmitSpans"`
+}
+
+func NewClientStats() *ClientStats {
+  return &ClientStats{}
+}
+
+
+func (p *ClientStats) GetFullQueueDroppedSpans() int64 {
+  return p.FullQueueDroppedSpans
+}
+
+func (p *ClientStats) GetTooLargeDroppedSpans() int64 {
+  return p.TooLargeDroppedSpans
+}
+
+func (p *ClientStats) GetFailedToEmitSpans() int64 {
+  return p.FailedToEmitSpans
+}
+func (p *ClientStats) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetFullQueueDroppedSpans bool = false;
+  var issetTooLargeDroppedSpans bool = false;
+  var issetFailedToEmitSpans bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetFullQueueDroppedSpans = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField2(ctx, iprot); err != nil {
+          return err
+        }
+        issetTooLargeDroppedSpans = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 3:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField3(ctx, iprot); err != nil {
+          return err
+        }
+        issetFailedToEmitSpans = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetFullQueueDroppedSpans{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FullQueueDroppedSpans is not set"));
+  }
+  if !issetTooLargeDroppedSpans{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TooLargeDroppedSpans is not set"));
+  }
+  if !issetFailedToEmitSpans{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FailedToEmitSpans is not set"));
+  }
+  return nil
+}
+
+func (p *ClientStats)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.FullQueueDroppedSpans = v
+}
+  return nil
+}
+
+func (p *ClientStats)  ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 2: ", err)
+} else {
+  p.TooLargeDroppedSpans = v
+}
+  return nil
+}
+
+func (p *ClientStats)  ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 3: ", err)
+} else {
+  p.FailedToEmitSpans = v
+}
+  return nil
+}
+
+func (p *ClientStats) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "ClientStats"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField2(ctx, oprot); err != nil { return err }
+    if err := p.writeField3(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *ClientStats) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "fullQueueDroppedSpans", thrift.I64, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:fullQueueDroppedSpans: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.FullQueueDroppedSpans)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.fullQueueDroppedSpans (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:fullQueueDroppedSpans: ", p), err) }
+  return err
+}
+
+func (p *ClientStats) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "tooLargeDroppedSpans", thrift.I64, 2); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tooLargeDroppedSpans: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.TooLargeDroppedSpans)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.tooLargeDroppedSpans (2) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tooLargeDroppedSpans: ", p), err) }
+  return err
+}
+
+func (p *ClientStats) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "failedToEmitSpans", thrift.I64, 3); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:failedToEmitSpans: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.FailedToEmitSpans)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.failedToEmitSpans (3) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 3:failedToEmitSpans: ", p), err) }
+  return err
+}
+
+func (p *ClientStats) Equals(other *ClientStats) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.FullQueueDroppedSpans != other.FullQueueDroppedSpans { return false }
+  if p.TooLargeDroppedSpans != other.TooLargeDroppedSpans { return false }
+  if p.FailedToEmitSpans != other.FailedToEmitSpans { return false }
+  return true
+}
+
+func (p *ClientStats) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("ClientStats(%+v)", *p)
+}
+
+// Attributes:
+//  - Process
+//  - Spans
+//  - SeqNo
+//  - Stats
+type Batch struct {
+  Process *Process `thrift:"process,1,required" db:"process" json:"process"`
+  Spans []*Span `thrift:"spans,2,required" db:"spans" json:"spans"`
+  SeqNo *int64 `thrift:"seqNo,3" db:"seqNo" json:"seqNo,omitempty"`
+  Stats *ClientStats `thrift:"stats,4" db:"stats" json:"stats,omitempty"`
+}
+
+func NewBatch() *Batch {
+  return &Batch{}
+}
+
+var Batch_Process_DEFAULT *Process
+func (p *Batch) GetProcess() *Process {
+  if !p.IsSetProcess() {
+    return Batch_Process_DEFAULT
+  }
+return p.Process
+}
+
+func (p *Batch) GetSpans() []*Span {
+  return p.Spans
+}
+var Batch_SeqNo_DEFAULT int64
+func (p *Batch) GetSeqNo() int64 {
+  if !p.IsSetSeqNo() {
+    return Batch_SeqNo_DEFAULT
+  }
+return *p.SeqNo
+}
+var Batch_Stats_DEFAULT *ClientStats
+func (p *Batch) GetStats() *ClientStats {
+  if !p.IsSetStats() {
+    return Batch_Stats_DEFAULT
+  }
+return p.Stats
+}
+func (p *Batch) IsSetProcess() bool {
+  return p.Process != nil
+}
+
+func (p *Batch) IsSetSeqNo() bool {
+  return p.SeqNo != nil
+}
+
+func (p *Batch) IsSetStats() bool {
+  return p.Stats != nil
+}
+
+func (p *Batch) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetProcess bool = false;
+  var issetSpans bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.STRUCT {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetProcess = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField2(ctx, iprot); err != nil {
+          return err
+        }
+        issetSpans = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 3:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField3(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 4:
+      if fieldTypeId == thrift.STRUCT {
+        if err := p.ReadField4(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetProcess{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Process is not set"));
+  }
+  if !issetSpans{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Spans is not set"));
+  }
+  return nil
+}
+
+func (p *Batch)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  p.Process = &Process{}
+  if err := p.Process.Read(ctx, iprot); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Process), err)
+  }
+  return nil
+}
+
+func (p *Batch)  ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*Span, 0, size)
+  p.Spans =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem10 := &Span{}
+    if err := _elem10.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err)
+    }
+    p.Spans = append(p.Spans, _elem10)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *Batch)  ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 3: ", err)
+} else {
+  p.SeqNo = &v
+}
+  return nil
+}
+
+func (p *Batch)  ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+  p.Stats = &ClientStats{}
+  if err := p.Stats.Read(ctx, iprot); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Stats), err)
+  }
+  return nil
+}
+
+func (p *Batch) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "Batch"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField2(ctx, oprot); err != nil { return err }
+    if err := p.writeField3(ctx, oprot); err != nil { return err }
+    if err := p.writeField4(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *Batch) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "process", thrift.STRUCT, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:process: ", p), err) }
+  if err := p.Process.Write(ctx, oprot); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Process), err)
+  }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:process: ", p), err) }
+  return err
+}
+
+func (p *Batch) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 2); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:spans: ", p), err) }
+  if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil {
+    return thrift.PrependError("error writing list begin: ", err)
+  }
+  for _, v := range p.Spans {
+    if err := v.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+    }
+  }
+  if err := oprot.WriteListEnd(ctx); err != nil {
+    return thrift.PrependError("error writing list end: ", err)
+  }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 2:spans: ", p), err) }
+  return err
+}
+
+func (p *Batch) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetSeqNo() {
+    if err := oprot.WriteFieldBegin(ctx, "seqNo", thrift.I64, 3); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:seqNo: ", p), err) }
+    if err := oprot.WriteI64(ctx, int64(*p.SeqNo)); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.seqNo (3) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 3:seqNo: ", p), err) }
+  }
+  return err
+}
+
+func (p *Batch) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetStats() {
+    if err := oprot.WriteFieldBegin(ctx, "stats", thrift.STRUCT, 4); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:stats: ", p), err) }
+    if err := p.Stats.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Stats), err)
+    }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 4:stats: ", p), err) }
+  }
+  return err
+}
+
+func (p *Batch) Equals(other *Batch) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if !p.Process.Equals(other.Process) { return false }
+  if len(p.Spans) != len(other.Spans) { return false }
+  for i, _tgt := range p.Spans {
+    _src11 := other.Spans[i]
+    if !_tgt.Equals(_src11) { return false }
+  }
+  if p.SeqNo != other.SeqNo {
+    if p.SeqNo == nil || other.SeqNo == nil {
+      return false
+    }
+    if (*p.SeqNo) != (*other.SeqNo) { return false }
+  }
+  if !p.Stats.Equals(other.Stats) { return false }
+  return true
+}
+
+func (p *Batch) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("Batch(%+v)", *p)
+}
+
+// Attributes:
+//  - Ok
+type BatchSubmitResponse struct {
+  Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"`
+}
+
+func NewBatchSubmitResponse() *BatchSubmitResponse {
+  return &BatchSubmitResponse{}
+}
+
+
+func (p *BatchSubmitResponse) GetOk() bool {
+  return p.Ok
+}
+func (p *BatchSubmitResponse) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetOk bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.BOOL {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetOk = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetOk{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set"));
+  }
+  return nil
+}
+
+func (p *BatchSubmitResponse)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadBool(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.Ok = v
+}
+  return nil
+}
+
+func (p *BatchSubmitResponse) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "BatchSubmitResponse"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *BatchSubmitResponse) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) }
+  if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) }
+  return err
+}
+
+func (p *BatchSubmitResponse) Equals(other *BatchSubmitResponse) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.Ok != other.Ok { return false }
+  return true
+}
+
+func (p *BatchSubmitResponse) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("BatchSubmitResponse(%+v)", *p)
+}
+
+type Collector interface {
+  // Parameters:
+  //  - Batches
+  SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error)
+}
+
+type CollectorClient struct {
+  c thrift.TClient
+  meta thrift.ResponseMeta
+}
+
+func NewCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *CollectorClient {
+  return &CollectorClient{
+    c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
+  }
+}
+
+func NewCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *CollectorClient {
+  return &CollectorClient{
+    c: thrift.NewTStandardClient(iprot, oprot),
+  }
+}
+
+func NewCollectorClient(c thrift.TClient) *CollectorClient {
+  return &CollectorClient{
+    c: c,
+  }
+}
+
+func (p *CollectorClient) Client_() thrift.TClient {
+  return p.c
+}
+
+func (p *CollectorClient) LastResponseMeta_() thrift.ResponseMeta {
+  return p.meta
+}
+
+func (p *CollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
+  p.meta = meta
+}
+
+// Parameters:
+//  - Batches
+func (p *CollectorClient) SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error) {
+  var _args12 CollectorSubmitBatchesArgs
+  _args12.Batches = batches
+  var _result14 CollectorSubmitBatchesResult
+  var _meta13 thrift.ResponseMeta
+  _meta13, _err = p.Client_().Call(ctx, "submitBatches", &_args12, &_result14)
+  p.SetLastResponseMeta_(_meta13)
+  if _err != nil {
+    return
+  }
+  return _result14.GetSuccess(), nil
+}
+
+type CollectorProcessor struct {
+  processorMap map[string]thrift.TProcessorFunction
+  handler Collector
+}
+
+func (p *CollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+  p.processorMap[key] = processor
+}
+
+func (p *CollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+  processor, ok = p.processorMap[key]
+  return processor, ok
+}
+
+func (p *CollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+  return p.processorMap
+}
+
+func NewCollectorProcessor(handler Collector) *CollectorProcessor {
+
+  self15 := &CollectorProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)}
+  self15.processorMap["submitBatches"] = &collectorProcessorSubmitBatches{handler:handler}
+return self15
+}
+
+func (p *CollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+  name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
+  if err2 != nil { return false, thrift.WrapTException(err2) }
+  if processor, ok := p.GetProcessorFunction(name); ok {
+    return processor.Process(ctx, seqId, iprot, oprot)
+  }
+  iprot.Skip(ctx, thrift.STRUCT)
+  iprot.ReadMessageEnd(ctx)
+  x16 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name)
+  oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
+  x16.Write(ctx, oprot)
+  oprot.WriteMessageEnd(ctx)
+  oprot.Flush(ctx)
+  return false, x16
+
+}
+
+type collectorProcessorSubmitBatches struct {
+  handler Collector
+}
+
+func (p *collectorProcessorSubmitBatches) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+  args := CollectorSubmitBatchesArgs{}
+  var err2 error
+  if err2 = args.Read(ctx, iprot); err2 != nil {
+    iprot.ReadMessageEnd(ctx)
+    x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error())
+    oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId)
+    x.Write(ctx, oprot)
+    oprot.WriteMessageEnd(ctx)
+    oprot.Flush(ctx)
+    return false, thrift.WrapTException(err2)
+  }
+  iprot.ReadMessageEnd(ctx)
+
+  tickerCancel := func() {}
+  // Start a goroutine to do server side connectivity check.
+  if thrift.ServerConnectivityCheckInterval > 0 {
+    var cancel context.CancelFunc
+    ctx, cancel = context.WithCancel(ctx)
+    defer cancel()
+    var tickerCtx context.Context
+    tickerCtx, tickerCancel = context.WithCancel(context.Background())
+    defer tickerCancel()
+    go func(ctx context.Context, cancel context.CancelFunc) {
+      ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval)
+      defer ticker.Stop()
+      for {
+        select {
+        case <-ctx.Done():
+          return
+        case <-ticker.C:
+          if !iprot.Transport().IsOpen() {
+            cancel()
+            return
+          }
+        }
+      }
+    }(tickerCtx, cancel)
+  }
+
+  result := CollectorSubmitBatchesResult{}
+  var retval []*BatchSubmitResponse
+  if retval, err2 = p.handler.SubmitBatches(ctx, args.Batches); err2 != nil {
+    tickerCancel()
+    if err2 == thrift.ErrAbandonRequest {
+      return false, thrift.WrapTException(err2)
+    }
+    x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitBatches: " + err2.Error())
+    oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId)
+    x.Write(ctx, oprot)
+    oprot.WriteMessageEnd(ctx)
+    oprot.Flush(ctx)
+    return true, thrift.WrapTException(err2)
+  } else {
+    result.Success = retval
+  }
+  tickerCancel()
+  if err2 = oprot.WriteMessageBegin(ctx, "submitBatches", thrift.REPLY, seqId); err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err2 = result.Write(ctx, oprot); err == nil && err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err != nil {
+    return
+  }
+  return true, err
+}
+
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+//  - Batches
+type CollectorSubmitBatchesArgs struct {
+  Batches []*Batch `thrift:"batches,1" db:"batches" json:"batches"`
+}
+
+func NewCollectorSubmitBatchesArgs() *CollectorSubmitBatchesArgs {
+  return &CollectorSubmitBatchesArgs{}
+}
+
+
+func (p *CollectorSubmitBatchesArgs) GetBatches() []*Batch {
+  return p.Batches
+}
+func (p *CollectorSubmitBatchesArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *CollectorSubmitBatchesArgs)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*Batch, 0, size)
+  p.Batches =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem17 := &Batch{}
+    if err := _elem17.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem17), err)
+    }
+    p.Batches = append(p.Batches, _elem17)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *CollectorSubmitBatchesArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "submitBatches_args"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *CollectorSubmitBatchesArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "batches", thrift.LIST, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batches: ", p), err) }
+  if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Batches)); err != nil {
+    return thrift.PrependError("error writing list begin: ", err)
+  }
+  for _, v := range p.Batches {
+    if err := v.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+    }
+  }
+  if err := oprot.WriteListEnd(ctx); err != nil {
+    return thrift.PrependError("error writing list end: ", err)
+  }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batches: ", p), err) }
+  return err
+}
+
+func (p *CollectorSubmitBatchesArgs) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("CollectorSubmitBatchesArgs(%+v)", *p)
+}
+
+// Attributes:
+//  - Success
+type CollectorSubmitBatchesResult struct {
+  Success []*BatchSubmitResponse `thrift:"success,0" db:"success" json:"success,omitempty"`
+}
+
+func NewCollectorSubmitBatchesResult() *CollectorSubmitBatchesResult {
+  return &CollectorSubmitBatchesResult{}
+}
+
+var CollectorSubmitBatchesResult_Success_DEFAULT []*BatchSubmitResponse
+
+func (p *CollectorSubmitBatchesResult) GetSuccess() []*BatchSubmitResponse {
+  return p.Success
+}
+func (p *CollectorSubmitBatchesResult) IsSetSuccess() bool {
+  return p.Success != nil
+}
+
+func (p *CollectorSubmitBatchesResult) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 0:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField0(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *CollectorSubmitBatchesResult)  ReadField0(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*BatchSubmitResponse, 0, size)
+  p.Success =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem18 := &BatchSubmitResponse{}
+    if err := _elem18.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem18), err)
+    }
+    p.Success = append(p.Success, _elem18)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *CollectorSubmitBatchesResult) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "submitBatches_result"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField0(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *CollectorSubmitBatchesResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetSuccess() {
+    if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) }
+    if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil {
+      return thrift.PrependError("error writing list begin: ", err)
+    }
+    for _, v := range p.Success {
+      if err := v.Write(ctx, oprot); err != nil {
+        return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+      }
+    }
+    if err := oprot.WriteListEnd(ctx); err != nil {
+      return thrift.PrependError("error writing list end: ", err)
+    }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) }
+  }
+  return err
+}
+
+func (p *CollectorSubmitBatchesResult) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("CollectorSubmitBatchesResult(%+v)", *p)
+}
+
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go
deleted file mode 100644
index e69c6d6..0000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go
+++ /dev/null
@@ -1,2106 +0,0 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-
-package jaeger
-
-import (
-	"bytes"
-	"fmt"
-	"github.com/uber/jaeger-client-go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = bytes.Equal
-
-var GoUnusedProtection__ int
-
-type TagType int64
-
-const (
-	TagType_STRING TagType = 0
-	TagType_DOUBLE TagType = 1
-	TagType_BOOL   TagType = 2
-	TagType_LONG   TagType = 3
-	TagType_BINARY TagType = 4
-)
-
-func (p TagType) String() string {
-	switch p {
-	case TagType_STRING:
-		return "STRING"
-	case TagType_DOUBLE:
-		return "DOUBLE"
-	case TagType_BOOL:
-		return "BOOL"
-	case TagType_LONG:
-		return "LONG"
-	case TagType_BINARY:
-		return "BINARY"
-	}
-	return "<UNSET>"
-}
-
-func TagTypeFromString(s string) (TagType, error) {
-	switch s {
-	case "STRING":
-		return TagType_STRING, nil
-	case "DOUBLE":
-		return TagType_DOUBLE, nil
-	case "BOOL":
-		return TagType_BOOL, nil
-	case "LONG":
-		return TagType_LONG, nil
-	case "BINARY":
-		return TagType_BINARY, nil
-	}
-	return TagType(0), fmt.Errorf("not a valid TagType string")
-}
-
-func TagTypePtr(v TagType) *TagType { return &v }
-
-func (p TagType) MarshalText() ([]byte, error) {
-	return []byte(p.String()), nil
-}
-
-func (p *TagType) UnmarshalText(text []byte) error {
-	q, err := TagTypeFromString(string(text))
-	if err != nil {
-		return err
-	}
-	*p = q
-	return nil
-}
-
-type SpanRefType int64
-
-const (
-	SpanRefType_CHILD_OF     SpanRefType = 0
-	SpanRefType_FOLLOWS_FROM SpanRefType = 1
-)
-
-func (p SpanRefType) String() string {
-	switch p {
-	case SpanRefType_CHILD_OF:
-		return "CHILD_OF"
-	case SpanRefType_FOLLOWS_FROM:
-		return "FOLLOWS_FROM"
-	}
-	return "<UNSET>"
-}
-
-func SpanRefTypeFromString(s string) (SpanRefType, error) {
-	switch s {
-	case "CHILD_OF":
-		return SpanRefType_CHILD_OF, nil
-	case "FOLLOWS_FROM":
-		return SpanRefType_FOLLOWS_FROM, nil
-	}
-	return SpanRefType(0), fmt.Errorf("not a valid SpanRefType string")
-}
-
-func SpanRefTypePtr(v SpanRefType) *SpanRefType { return &v }
-
-func (p SpanRefType) MarshalText() ([]byte, error) {
-	return []byte(p.String()), nil
-}
-
-func (p *SpanRefType) UnmarshalText(text []byte) error {
-	q, err := SpanRefTypeFromString(string(text))
-	if err != nil {
-		return err
-	}
-	*p = q
-	return nil
-}
-
-// Attributes:
-//  - Key
-//  - VType
-//  - VStr
-//  - VDouble
-//  - VBool
-//  - VLong
-//  - VBinary
-type Tag struct {
-	Key     string   `thrift:"key,1,required" json:"key"`
-	VType   TagType  `thrift:"vType,2,required" json:"vType"`
-	VStr    *string  `thrift:"vStr,3" json:"vStr,omitempty"`
-	VDouble *float64 `thrift:"vDouble,4" json:"vDouble,omitempty"`
-	VBool   *bool    `thrift:"vBool,5" json:"vBool,omitempty"`
-	VLong   *int64   `thrift:"vLong,6" json:"vLong,omitempty"`
-	VBinary []byte   `thrift:"vBinary,7" json:"vBinary,omitempty"`
-}
-
-func NewTag() *Tag {
-	return &Tag{}
-}
-
-func (p *Tag) GetKey() string {
-	return p.Key
-}
-
-func (p *Tag) GetVType() TagType {
-	return p.VType
-}
-
-var Tag_VStr_DEFAULT string
-
-func (p *Tag) GetVStr() string {
-	if !p.IsSetVStr() {
-		return Tag_VStr_DEFAULT
-	}
-	return *p.VStr
-}
-
-var Tag_VDouble_DEFAULT float64
-
-func (p *Tag) GetVDouble() float64 {
-	if !p.IsSetVDouble() {
-		return Tag_VDouble_DEFAULT
-	}
-	return *p.VDouble
-}
-
-var Tag_VBool_DEFAULT bool
-
-func (p *Tag) GetVBool() bool {
-	if !p.IsSetVBool() {
-		return Tag_VBool_DEFAULT
-	}
-	return *p.VBool
-}
-
-var Tag_VLong_DEFAULT int64
-
-func (p *Tag) GetVLong() int64 {
-	if !p.IsSetVLong() {
-		return Tag_VLong_DEFAULT
-	}
-	return *p.VLong
-}
-
-var Tag_VBinary_DEFAULT []byte
-
-func (p *Tag) GetVBinary() []byte {
-	return p.VBinary
-}
-func (p *Tag) IsSetVStr() bool {
-	return p.VStr != nil
-}
-
-func (p *Tag) IsSetVDouble() bool {
-	return p.VDouble != nil
-}
-
-func (p *Tag) IsSetVBool() bool {
-	return p.VBool != nil
-}
-
-func (p *Tag) IsSetVLong() bool {
-	return p.VLong != nil
-}
-
-func (p *Tag) IsSetVBinary() bool {
-	return p.VBinary != nil
-}
-
-func (p *Tag) Read(iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	var issetKey bool = false
-	var issetVType bool = false
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if err := p.readField1(iprot); err != nil {
-				return err
-			}
-			issetKey = true
-		case 2:
-			if err := p.readField2(iprot); err != nil {
-				return err
-			}
-			issetVType = true
-		case 3:
-			if err := p.readField3(iprot); err != nil {
-				return err
-			}
-		case 4:
-			if err := p.readField4(iprot); err != nil {
-				return err
-			}
-		case 5:
-			if err := p.readField5(iprot); err != nil {
-				return err
-			}
-		case 6:
-			if err := p.readField6(iprot); err != nil {
-				return err
-			}
-		case 7:
-			if err := p.readField7(iprot); err != nil {
-				return err
-			}
-		default:
-			if err := iprot.Skip(fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	if !issetKey {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Key is not set"))
-	}
-	if !issetVType {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field VType is not set"))
-	}
-	return nil
-}
-
-func (p *Tag) readField1(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadString(); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.Key = v
-	}
-	return nil
-}
-
-func (p *Tag) readField2(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI32(); err != nil {
-		return thrift.PrependError("error reading field 2: ", err)
-	} else {
-		temp := TagType(v)
-		p.VType = temp
-	}
-	return nil
-}
-
-func (p *Tag) readField3(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadString(); err != nil {
-		return thrift.PrependError("error reading field 3: ", err)
-	} else {
-		p.VStr = &v
-	}
-	return nil
-}
-
-func (p *Tag) readField4(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadDouble(); err != nil {
-		return thrift.PrependError("error reading field 4: ", err)
-	} else {
-		p.VDouble = &v
-	}
-	return nil
-}
-
-func (p *Tag) readField5(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadBool(); err != nil {
-		return thrift.PrependError("error reading field 5: ", err)
-	} else {
-		p.VBool = &v
-	}
-	return nil
-}
-
-func (p *Tag) readField6(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(); err != nil {
-		return thrift.PrependError("error reading field 6: ", err)
-	} else {
-		p.VLong = &v
-	}
-	return nil
-}
-
-func (p *Tag) readField7(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadBinary(); err != nil {
-		return thrift.PrependError("error reading field 7: ", err)
-	} else {
-		p.VBinary = v
-	}
-	return nil
-}
-
-func (p *Tag) Write(oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin("Tag"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if err := p.writeField1(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField2(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField3(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField4(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField5(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField6(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField7(oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteFieldStop(); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *Tag) writeField1(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("key", thrift.STRING, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err)
-	}
-	if err := oprot.WriteString(string(p.Key)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err)
-	}
-	return err
-}
-
-func (p *Tag) writeField2(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("vType", thrift.I32, 2); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:vType: ", p), err)
-	}
-	if err := oprot.WriteI32(int32(p.VType)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.vType (2) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:vType: ", p), err)
-	}
-	return err
-}
-
-func (p *Tag) writeField3(oprot thrift.TProtocol) (err error) {
-	if p.IsSetVStr() {
-		if err := oprot.WriteFieldBegin("vStr", thrift.STRING, 3); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:vStr: ", p), err)
-		}
-		if err := oprot.WriteString(string(*p.VStr)); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T.vStr (3) field write error: ", p), err)
-		}
-		if err := oprot.WriteFieldEnd(); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 3:vStr: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Tag) writeField4(oprot thrift.TProtocol) (err error) {
-	if p.IsSetVDouble() {
-		if err := oprot.WriteFieldBegin("vDouble", thrift.DOUBLE, 4); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:vDouble: ", p), err)
-		}
-		if err := oprot.WriteDouble(float64(*p.VDouble)); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T.vDouble (4) field write error: ", p), err)
-		}
-		if err := oprot.WriteFieldEnd(); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 4:vDouble: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Tag) writeField5(oprot thrift.TProtocol) (err error) {
-	if p.IsSetVBool() {
-		if err := oprot.WriteFieldBegin("vBool", thrift.BOOL, 5); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:vBool: ", p), err)
-		}
-		if err := oprot.WriteBool(bool(*p.VBool)); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T.vBool (5) field write error: ", p), err)
-		}
-		if err := oprot.WriteFieldEnd(); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 5:vBool: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Tag) writeField6(oprot thrift.TProtocol) (err error) {
-	if p.IsSetVLong() {
-		if err := oprot.WriteFieldBegin("vLong", thrift.I64, 6); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:vLong: ", p), err)
-		}
-		if err := oprot.WriteI64(int64(*p.VLong)); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T.vLong (6) field write error: ", p), err)
-		}
-		if err := oprot.WriteFieldEnd(); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 6:vLong: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Tag) writeField7(oprot thrift.TProtocol) (err error) {
-	if p.IsSetVBinary() {
-		if err := oprot.WriteFieldBegin("vBinary", thrift.STRING, 7); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:vBinary: ", p), err)
-		}
-		if err := oprot.WriteBinary(p.VBinary); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T.vBinary (7) field write error: ", p), err)
-		}
-		if err := oprot.WriteFieldEnd(); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 7:vBinary: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Tag) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("Tag(%+v)", *p)
-}
-
-// Attributes:
-//  - Timestamp
-//  - Fields
-type Log struct {
-	Timestamp int64  `thrift:"timestamp,1,required" json:"timestamp"`
-	Fields    []*Tag `thrift:"fields,2,required" json:"fields"`
-}
-
-func NewLog() *Log {
-	return &Log{}
-}
-
-func (p *Log) GetTimestamp() int64 {
-	return p.Timestamp
-}
-
-func (p *Log) GetFields() []*Tag {
-	return p.Fields
-}
-func (p *Log) Read(iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	var issetTimestamp bool = false
-	var issetFields bool = false
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if err := p.readField1(iprot); err != nil {
-				return err
-			}
-			issetTimestamp = true
-		case 2:
-			if err := p.readField2(iprot); err != nil {
-				return err
-			}
-			issetFields = true
-		default:
-			if err := iprot.Skip(fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	if !issetTimestamp {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Timestamp is not set"))
-	}
-	if !issetFields {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Fields is not set"))
-	}
-	return nil
-}
-
-func (p *Log) readField1(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.Timestamp = v
-	}
-	return nil
-}
-
-func (p *Log) readField2(iprot thrift.TProtocol) error {
-	_, size, err := iprot.ReadListBegin()
-	if err != nil {
-		return thrift.PrependError("error reading list begin: ", err)
-	}
-	tSlice := make([]*Tag, 0, size)
-	p.Fields = tSlice
-	for i := 0; i < size; i++ {
-		_elem0 := &Tag{}
-		if err := _elem0.Read(iprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
-		}
-		p.Fields = append(p.Fields, _elem0)
-	}
-	if err := iprot.ReadListEnd(); err != nil {
-		return thrift.PrependError("error reading list end: ", err)
-	}
-	return nil
-}
-
-func (p *Log) Write(oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin("Log"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if err := p.writeField1(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField2(oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteFieldStop(); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *Log) writeField1(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err)
-	}
-	if err := oprot.WriteI64(int64(p.Timestamp)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err)
-	}
-	return err
-}
-
-func (p *Log) writeField2(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("fields", thrift.LIST, 2); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:fields: ", p), err)
-	}
-	if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Fields)); err != nil {
-		return thrift.PrependError("error writing list begin: ", err)
-	}
-	for _, v := range p.Fields {
-		if err := v.Write(oprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
-		}
-	}
-	if err := oprot.WriteListEnd(); err != nil {
-		return thrift.PrependError("error writing list end: ", err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:fields: ", p), err)
-	}
-	return err
-}
-
-func (p *Log) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("Log(%+v)", *p)
-}
-
-// Attributes:
-//  - RefType
-//  - TraceIdLow
-//  - TraceIdHigh
-//  - SpanId
-type SpanRef struct {
-	RefType     SpanRefType `thrift:"refType,1,required" json:"refType"`
-	TraceIdLow  int64       `thrift:"traceIdLow,2,required" json:"traceIdLow"`
-	TraceIdHigh int64       `thrift:"traceIdHigh,3,required" json:"traceIdHigh"`
-	SpanId      int64       `thrift:"spanId,4,required" json:"spanId"`
-}
-
-func NewSpanRef() *SpanRef {
-	return &SpanRef{}
-}
-
-func (p *SpanRef) GetRefType() SpanRefType {
-	return p.RefType
-}
-
-func (p *SpanRef) GetTraceIdLow() int64 {
-	return p.TraceIdLow
-}
-
-func (p *SpanRef) GetTraceIdHigh() int64 {
-	return p.TraceIdHigh
-}
-
-func (p *SpanRef) GetSpanId() int64 {
-	return p.SpanId
-}
-func (p *SpanRef) Read(iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	var issetRefType bool = false
-	var issetTraceIdLow bool = false
-	var issetTraceIdHigh bool = false
-	var issetSpanId bool = false
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if err := p.readField1(iprot); err != nil {
-				return err
-			}
-			issetRefType = true
-		case 2:
-			if err := p.readField2(iprot); err != nil {
-				return err
-			}
-			issetTraceIdLow = true
-		case 3:
-			if err := p.readField3(iprot); err != nil {
-				return err
-			}
-			issetTraceIdHigh = true
-		case 4:
-			if err := p.readField4(iprot); err != nil {
-				return err
-			}
-			issetSpanId = true
-		default:
-			if err := iprot.Skip(fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	if !issetRefType {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RefType is not set"))
-	}
-	if !issetTraceIdLow {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set"))
-	}
-	if !issetTraceIdHigh {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set"))
-	}
-	if !issetSpanId {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set"))
-	}
-	return nil
-}
-
-func (p *SpanRef) readField1(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI32(); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		temp := SpanRefType(v)
-		p.RefType = temp
-	}
-	return nil
-}
-
-func (p *SpanRef) readField2(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(); err != nil {
-		return thrift.PrependError("error reading field 2: ", err)
-	} else {
-		p.TraceIdLow = v
-	}
-	return nil
-}
-
-func (p *SpanRef) readField3(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(); err != nil {
-		return thrift.PrependError("error reading field 3: ", err)
-	} else {
-		p.TraceIdHigh = v
-	}
-	return nil
-}
-
-func (p *SpanRef) readField4(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(); err != nil {
-		return thrift.PrependError("error reading field 4: ", err)
-	} else {
-		p.SpanId = v
-	}
-	return nil
-}
-
-func (p *SpanRef) Write(oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin("SpanRef"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if err := p.writeField1(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField2(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField3(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField4(oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteFieldStop(); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *SpanRef) writeField1(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("refType", thrift.I32, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:refType: ", p), err)
-	}
-	if err := oprot.WriteI32(int32(p.RefType)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.refType (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:refType: ", p), err)
-	}
-	return err
-}
-
-func (p *SpanRef) writeField2(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("traceIdLow", thrift.I64, 2); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdLow: ", p), err)
-	}
-	if err := oprot.WriteI64(int64(p.TraceIdLow)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (2) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdLow: ", p), err)
-	}
-	return err
-}
-
-func (p *SpanRef) writeField3(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("traceIdHigh", thrift.I64, 3); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:traceIdHigh: ", p), err)
-	}
-	if err := oprot.WriteI64(int64(p.TraceIdHigh)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (3) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 3:traceIdHigh: ", p), err)
-	}
-	return err
-}
-
-func (p *SpanRef) writeField4(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("spanId", thrift.I64, 4); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:spanId: ", p), err)
-	}
-	if err := oprot.WriteI64(int64(p.SpanId)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.spanId (4) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 4:spanId: ", p), err)
-	}
-	return err
-}
-
-func (p *SpanRef) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("SpanRef(%+v)", *p)
-}
-
-// Attributes:
-//  - TraceIdLow
-//  - TraceIdHigh
-//  - SpanId
-//  - ParentSpanId
-//  - OperationName
-//  - References
-//  - Flags
-//  - StartTime
-//  - Duration
-//  - Tags
-//  - Logs
-type Span struct {
-	TraceIdLow    int64      `thrift:"traceIdLow,1,required" json:"traceIdLow"`
-	TraceIdHigh   int64      `thrift:"traceIdHigh,2,required" json:"traceIdHigh"`
-	SpanId        int64      `thrift:"spanId,3,required" json:"spanId"`
-	ParentSpanId  int64      `thrift:"parentSpanId,4,required" json:"parentSpanId"`
-	OperationName string     `thrift:"operationName,5,required" json:"operationName"`
-	References    []*SpanRef `thrift:"references,6" json:"references,omitempty"`
-	Flags         int32      `thrift:"flags,7,required" json:"flags"`
-	StartTime     int64      `thrift:"startTime,8,required" json:"startTime"`
-	Duration      int64      `thrift:"duration,9,required" json:"duration"`
-	Tags          []*Tag     `thrift:"tags,10" json:"tags,omitempty"`
-	Logs          []*Log     `thrift:"logs,11" json:"logs,omitempty"`
-}
-
-func NewSpan() *Span {
-	return &Span{}
-}
-
-func (p *Span) GetTraceIdLow() int64 {
-	return p.TraceIdLow
-}
-
-func (p *Span) GetTraceIdHigh() int64 {
-	return p.TraceIdHigh
-}
-
-func (p *Span) GetSpanId() int64 {
-	return p.SpanId
-}
-
-func (p *Span) GetParentSpanId() int64 {
-	return p.ParentSpanId
-}
-
-func (p *Span) GetOperationName() string {
-	return p.OperationName
-}
-
-var Span_References_DEFAULT []*SpanRef
-
-func (p *Span) GetReferences() []*SpanRef {
-	return p.References
-}
-
-func (p *Span) GetFlags() int32 {
-	return p.Flags
-}
-
-func (p *Span) GetStartTime() int64 {
-	return p.StartTime
-}
-
-func (p *Span) GetDuration() int64 {
-	return p.Duration
-}
-
-var Span_Tags_DEFAULT []*Tag
-
-func (p *Span) GetTags() []*Tag {
-	return p.Tags
-}
-
-var Span_Logs_DEFAULT []*Log
-
-func (p *Span) GetLogs() []*Log {
-	return p.Logs
-}
-func (p *Span) IsSetReferences() bool {
-	return p.References != nil
-}
-
-func (p *Span) IsSetTags() bool {
-	return p.Tags != nil
-}
-
-func (p *Span) IsSetLogs() bool {
-	return p.Logs != nil
-}
-
-func (p *Span) Read(iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	var issetTraceIdLow bool = false
-	var issetTraceIdHigh bool = false
-	var issetSpanId bool = false
-	var issetParentSpanId bool = false
-	var issetOperationName bool = false
-	var issetFlags bool = false
-	var issetStartTime bool = false
-	var issetDuration bool = false
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if err := p.readField1(iprot); err != nil {
-				return err
-			}
-			issetTraceIdLow = true
-		case 2:
-			if err := p.readField2(iprot); err != nil {
-				return err
-			}
-			issetTraceIdHigh = true
-		case 3:
-			if err := p.readField3(iprot); err != nil {
-				return err
-			}
-			issetSpanId = true
-		case 4:
-			if err := p.readField4(iprot); err != nil {
-				return err
-			}
-			issetParentSpanId = true
-		case 5:
-			if err := p.readField5(iprot); err != nil {
-				return err
-			}
-			issetOperationName = true
-		case 6:
-			if err := p.readField6(iprot); err != nil {
-				return err
-			}
-		case 7:
-			if err := p.readField7(iprot); err != nil {
-				return err
-			}
-			issetFlags = true
-		case 8:
-			if err := p.readField8(iprot); err != nil {
-				return err
-			}
-			issetStartTime = true
-		case 9:
-			if err := p.readField9(iprot); err != nil {
-				return err
-			}
-			issetDuration = true
-		case 10:
-			if err := p.readField10(iprot); err != nil {
-				return err
-			}
-		case 11:
-			if err := p.readField11(iprot); err != nil {
-				return err
-			}
-		default:
-			if err := iprot.Skip(fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	if !issetTraceIdLow {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set"))
-	}
-	if !issetTraceIdHigh {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set"))
-	}
-	if !issetSpanId {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set"))
-	}
-	if !issetParentSpanId {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ParentSpanId is not set"))
-	}
-	if !issetOperationName {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationName is not set"))
-	}
-	if !issetFlags {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Flags is not set"))
-	}
-	if !issetStartTime {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StartTime is not set"))
-	}
-	if !issetDuration {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Duration is not set"))
-	}
-	return nil
-}
-
-func (p *Span) readField1(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.TraceIdLow = v
-	}
-	return nil
-}
-
-func (p *Span) readField2(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(); err != nil {
-		return thrift.PrependError("error reading field 2: ", err)
-	} else {
-		p.TraceIdHigh = v
-	}
-	return nil
-}
-
-func (p *Span) readField3(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(); err != nil {
-		return thrift.PrependError("error reading field 3: ", err)
-	} else {
-		p.SpanId = v
-	}
-	return nil
-}
-
-func (p *Span) readField4(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(); err != nil {
-		return thrift.PrependError("error reading field 4: ", err)
-	} else {
-		p.ParentSpanId = v
-	}
-	return nil
-}
-
-func (p *Span) readField5(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadString(); err != nil {
-		return thrift.PrependError("error reading field 5: ", err)
-	} else {
-		p.OperationName = v
-	}
-	return nil
-}
-
-func (p *Span) readField6(iprot thrift.TProtocol) error {
-	_, size, err := iprot.ReadListBegin()
-	if err != nil {
-		return thrift.PrependError("error reading list begin: ", err)
-	}
-	tSlice := make([]*SpanRef, 0, size)
-	p.References = tSlice
-	for i := 0; i < size; i++ {
-		_elem1 := &SpanRef{}
-		if err := _elem1.Read(iprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err)
-		}
-		p.References = append(p.References, _elem1)
-	}
-	if err := iprot.ReadListEnd(); err != nil {
-		return thrift.PrependError("error reading list end: ", err)
-	}
-	return nil
-}
-
-func (p *Span) readField7(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI32(); err != nil {
-		return thrift.PrependError("error reading field 7: ", err)
-	} else {
-		p.Flags = v
-	}
-	return nil
-}
-
-func (p *Span) readField8(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(); err != nil {
-		return thrift.PrependError("error reading field 8: ", err)
-	} else {
-		p.StartTime = v
-	}
-	return nil
-}
-
-func (p *Span) readField9(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(); err != nil {
-		return thrift.PrependError("error reading field 9: ", err)
-	} else {
-		p.Duration = v
-	}
-	return nil
-}
-
-func (p *Span) readField10(iprot thrift.TProtocol) error {
-	_, size, err := iprot.ReadListBegin()
-	if err != nil {
-		return thrift.PrependError("error reading list begin: ", err)
-	}
-	tSlice := make([]*Tag, 0, size)
-	p.Tags = tSlice
-	for i := 0; i < size; i++ {
-		_elem2 := &Tag{}
-		if err := _elem2.Read(iprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err)
-		}
-		p.Tags = append(p.Tags, _elem2)
-	}
-	if err := iprot.ReadListEnd(); err != nil {
-		return thrift.PrependError("error reading list end: ", err)
-	}
-	return nil
-}
-
-func (p *Span) readField11(iprot thrift.TProtocol) error {
-	_, size, err := iprot.ReadListBegin()
-	if err != nil {
-		return thrift.PrependError("error reading list begin: ", err)
-	}
-	tSlice := make([]*Log, 0, size)
-	p.Logs = tSlice
-	for i := 0; i < size; i++ {
-		_elem3 := &Log{}
-		if err := _elem3.Read(iprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err)
-		}
-		p.Logs = append(p.Logs, _elem3)
-	}
-	if err := iprot.ReadListEnd(); err != nil {
-		return thrift.PrependError("error reading list end: ", err)
-	}
-	return nil
-}
-
-func (p *Span) Write(oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin("Span"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if err := p.writeField1(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField2(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField3(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField4(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField5(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField6(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField7(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField8(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField9(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField10(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField11(oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteFieldStop(); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *Span) writeField1(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("traceIdLow", thrift.I64, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:traceIdLow: ", p), err)
-	}
-	if err := oprot.WriteI64(int64(p.TraceIdLow)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:traceIdLow: ", p), err)
-	}
-	return err
-}
-
-func (p *Span) writeField2(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("traceIdHigh", thrift.I64, 2); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdHigh: ", p), err)
-	}
-	if err := oprot.WriteI64(int64(p.TraceIdHigh)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (2) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdHigh: ", p), err)
-	}
-	return err
-}
-
-func (p *Span) writeField3(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("spanId", thrift.I64, 3); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:spanId: ", p), err)
-	}
-	if err := oprot.WriteI64(int64(p.SpanId)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.spanId (3) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 3:spanId: ", p), err)
-	}
-	return err
-}
-
-func (p *Span) writeField4(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("parentSpanId", thrift.I64, 4); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:parentSpanId: ", p), err)
-	}
-	if err := oprot.WriteI64(int64(p.ParentSpanId)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.parentSpanId (4) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 4:parentSpanId: ", p), err)
-	}
-	return err
-}
-
-func (p *Span) writeField5(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("operationName", thrift.STRING, 5); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:operationName: ", p), err)
-	}
-	if err := oprot.WriteString(string(p.OperationName)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.operationName (5) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 5:operationName: ", p), err)
-	}
-	return err
-}
-
-func (p *Span) writeField6(oprot thrift.TProtocol) (err error) {
-	if p.IsSetReferences() {
-		if err := oprot.WriteFieldBegin("references", thrift.LIST, 6); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:references: ", p), err)
-		}
-		if err := oprot.WriteListBegin(thrift.STRUCT, len(p.References)); err != nil {
-			return thrift.PrependError("error writing list begin: ", err)
-		}
-		for _, v := range p.References {
-			if err := v.Write(oprot); err != nil {
-				return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
-			}
-		}
-		if err := oprot.WriteListEnd(); err != nil {
-			return thrift.PrependError("error writing list end: ", err)
-		}
-		if err := oprot.WriteFieldEnd(); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 6:references: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Span) writeField7(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("flags", thrift.I32, 7); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:flags: ", p), err)
-	}
-	if err := oprot.WriteI32(int32(p.Flags)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.flags (7) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 7:flags: ", p), err)
-	}
-	return err
-}
-
-func (p *Span) writeField8(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("startTime", thrift.I64, 8); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:startTime: ", p), err)
-	}
-	if err := oprot.WriteI64(int64(p.StartTime)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.startTime (8) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 8:startTime: ", p), err)
-	}
-	return err
-}
-
-func (p *Span) writeField9(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("duration", thrift.I64, 9); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:duration: ", p), err)
-	}
-	if err := oprot.WriteI64(int64(p.Duration)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.duration (9) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 9:duration: ", p), err)
-	}
-	return err
-}
-
-func (p *Span) writeField10(oprot thrift.TProtocol) (err error) {
-	if p.IsSetTags() {
-		if err := oprot.WriteFieldBegin("tags", thrift.LIST, 10); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:tags: ", p), err)
-		}
-		if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tags)); err != nil {
-			return thrift.PrependError("error writing list begin: ", err)
-		}
-		for _, v := range p.Tags {
-			if err := v.Write(oprot); err != nil {
-				return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
-			}
-		}
-		if err := oprot.WriteListEnd(); err != nil {
-			return thrift.PrependError("error writing list end: ", err)
-		}
-		if err := oprot.WriteFieldEnd(); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 10:tags: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Span) writeField11(oprot thrift.TProtocol) (err error) {
-	if p.IsSetLogs() {
-		if err := oprot.WriteFieldBegin("logs", thrift.LIST, 11); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:logs: ", p), err)
-		}
-		if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Logs)); err != nil {
-			return thrift.PrependError("error writing list begin: ", err)
-		}
-		for _, v := range p.Logs {
-			if err := v.Write(oprot); err != nil {
-				return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
-			}
-		}
-		if err := oprot.WriteListEnd(); err != nil {
-			return thrift.PrependError("error writing list end: ", err)
-		}
-		if err := oprot.WriteFieldEnd(); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 11:logs: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Span) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("Span(%+v)", *p)
-}
-
-// Attributes:
-//  - ServiceName
-//  - Tags
-type Process struct {
-	ServiceName string `thrift:"serviceName,1,required" json:"serviceName"`
-	Tags        []*Tag `thrift:"tags,2" json:"tags,omitempty"`
-}
-
-func NewProcess() *Process {
-	return &Process{}
-}
-
-func (p *Process) GetServiceName() string {
-	return p.ServiceName
-}
-
-var Process_Tags_DEFAULT []*Tag
-
-func (p *Process) GetTags() []*Tag {
-	return p.Tags
-}
-func (p *Process) IsSetTags() bool {
-	return p.Tags != nil
-}
-
-func (p *Process) Read(iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	var issetServiceName bool = false
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if err := p.readField1(iprot); err != nil {
-				return err
-			}
-			issetServiceName = true
-		case 2:
-			if err := p.readField2(iprot); err != nil {
-				return err
-			}
-		default:
-			if err := iprot.Skip(fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	if !issetServiceName {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ServiceName is not set"))
-	}
-	return nil
-}
-
-func (p *Process) readField1(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadString(); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.ServiceName = v
-	}
-	return nil
-}
-
-func (p *Process) readField2(iprot thrift.TProtocol) error {
-	_, size, err := iprot.ReadListBegin()
-	if err != nil {
-		return thrift.PrependError("error reading list begin: ", err)
-	}
-	tSlice := make([]*Tag, 0, size)
-	p.Tags = tSlice
-	for i := 0; i < size; i++ {
-		_elem4 := &Tag{}
-		if err := _elem4.Read(iprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
-		}
-		p.Tags = append(p.Tags, _elem4)
-	}
-	if err := iprot.ReadListEnd(); err != nil {
-		return thrift.PrependError("error reading list end: ", err)
-	}
-	return nil
-}
-
-func (p *Process) Write(oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin("Process"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if err := p.writeField1(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField2(oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteFieldStop(); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *Process) writeField1(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err)
-	}
-	if err := oprot.WriteString(string(p.ServiceName)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err)
-	}
-	return err
-}
-
-func (p *Process) writeField2(oprot thrift.TProtocol) (err error) {
-	if p.IsSetTags() {
-		if err := oprot.WriteFieldBegin("tags", thrift.LIST, 2); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tags: ", p), err)
-		}
-		if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tags)); err != nil {
-			return thrift.PrependError("error writing list begin: ", err)
-		}
-		for _, v := range p.Tags {
-			if err := v.Write(oprot); err != nil {
-				return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
-			}
-		}
-		if err := oprot.WriteListEnd(); err != nil {
-			return thrift.PrependError("error writing list end: ", err)
-		}
-		if err := oprot.WriteFieldEnd(); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tags: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Process) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("Process(%+v)", *p)
-}
-
-// Attributes:
-//  - FullQueueDroppedSpans
-//  - TooLargeDroppedSpans
-//  - FailedToEmitSpans
-type ClientStats struct {
-	FullQueueDroppedSpans int64 `thrift:"fullQueueDroppedSpans,1,required" json:"fullQueueDroppedSpans"`
-	TooLargeDroppedSpans  int64 `thrift:"tooLargeDroppedSpans,2,required" json:"tooLargeDroppedSpans"`
-	FailedToEmitSpans     int64 `thrift:"failedToEmitSpans,3,required" json:"failedToEmitSpans"`
-}
-
-func NewClientStats() *ClientStats {
-	return &ClientStats{}
-}
-
-func (p *ClientStats) GetFullQueueDroppedSpans() int64 {
-	return p.FullQueueDroppedSpans
-}
-
-func (p *ClientStats) GetTooLargeDroppedSpans() int64 {
-	return p.TooLargeDroppedSpans
-}
-
-func (p *ClientStats) GetFailedToEmitSpans() int64 {
-	return p.FailedToEmitSpans
-}
-func (p *ClientStats) Read(iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	var issetFullQueueDroppedSpans bool = false
-	var issetTooLargeDroppedSpans bool = false
-	var issetFailedToEmitSpans bool = false
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if err := p.readField1(iprot); err != nil {
-				return err
-			}
-			issetFullQueueDroppedSpans = true
-		case 2:
-			if err := p.readField2(iprot); err != nil {
-				return err
-			}
-			issetTooLargeDroppedSpans = true
-		case 3:
-			if err := p.readField3(iprot); err != nil {
-				return err
-			}
-			issetFailedToEmitSpans = true
-		default:
-			if err := iprot.Skip(fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	if !issetFullQueueDroppedSpans {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FullQueueDroppedSpans is not set"))
-	}
-	if !issetTooLargeDroppedSpans {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TooLargeDroppedSpans is not set"))
-	}
-	if !issetFailedToEmitSpans {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FailedToEmitSpans is not set"))
-	}
-	return nil
-}
-
-func (p *ClientStats) readField1(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.FullQueueDroppedSpans = v
-	}
-	return nil
-}
-
-func (p *ClientStats) readField2(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(); err != nil {
-		return thrift.PrependError("error reading field 2: ", err)
-	} else {
-		p.TooLargeDroppedSpans = v
-	}
-	return nil
-}
-
-func (p *ClientStats) readField3(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(); err != nil {
-		return thrift.PrependError("error reading field 3: ", err)
-	} else {
-		p.FailedToEmitSpans = v
-	}
-	return nil
-}
-
-func (p *ClientStats) Write(oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin("ClientStats"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if err := p.writeField1(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField2(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField3(oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteFieldStop(); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *ClientStats) writeField1(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("fullQueueDroppedSpans", thrift.I64, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:fullQueueDroppedSpans: ", p), err)
-	}
-	if err := oprot.WriteI64(int64(p.FullQueueDroppedSpans)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.fullQueueDroppedSpans (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:fullQueueDroppedSpans: ", p), err)
-	}
-	return err
-}
-
-func (p *ClientStats) writeField2(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("tooLargeDroppedSpans", thrift.I64, 2); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tooLargeDroppedSpans: ", p), err)
-	}
-	if err := oprot.WriteI64(int64(p.TooLargeDroppedSpans)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.tooLargeDroppedSpans (2) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tooLargeDroppedSpans: ", p), err)
-	}
-	return err
-}
-
-func (p *ClientStats) writeField3(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("failedToEmitSpans", thrift.I64, 3); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:failedToEmitSpans: ", p), err)
-	}
-	if err := oprot.WriteI64(int64(p.FailedToEmitSpans)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.failedToEmitSpans (3) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 3:failedToEmitSpans: ", p), err)
-	}
-	return err
-}
-
-func (p *ClientStats) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("ClientStats(%+v)", *p)
-}
-
-// Attributes:
-//  - Process
-//  - Spans
-//  - SeqNo
-//  - Stats
-type Batch struct {
-	Process *Process     `thrift:"process,1,required" json:"process"`
-	Spans   []*Span      `thrift:"spans,2,required" json:"spans"`
-	SeqNo   *int64       `thrift:"seqNo,3" json:"seqNo,omitempty"`
-	Stats   *ClientStats `thrift:"stats,4" json:"stats,omitempty"`
-}
-
-func NewBatch() *Batch {
-	return &Batch{}
-}
-
-var Batch_Process_DEFAULT *Process
-
-func (p *Batch) GetProcess() *Process {
-	if !p.IsSetProcess() {
-		return Batch_Process_DEFAULT
-	}
-	return p.Process
-}
-
-func (p *Batch) GetSpans() []*Span {
-	return p.Spans
-}
-
-var Batch_SeqNo_DEFAULT int64
-
-func (p *Batch) GetSeqNo() int64 {
-	if !p.IsSetSeqNo() {
-		return Batch_SeqNo_DEFAULT
-	}
-	return *p.SeqNo
-}
-
-var Batch_Stats_DEFAULT *ClientStats
-
-func (p *Batch) GetStats() *ClientStats {
-	if !p.IsSetStats() {
-		return Batch_Stats_DEFAULT
-	}
-	return p.Stats
-}
-func (p *Batch) IsSetProcess() bool {
-	return p.Process != nil
-}
-
-func (p *Batch) IsSetSeqNo() bool {
-	return p.SeqNo != nil
-}
-
-func (p *Batch) IsSetStats() bool {
-	return p.Stats != nil
-}
-
-func (p *Batch) Read(iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	var issetProcess bool = false
-	var issetSpans bool = false
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if err := p.readField1(iprot); err != nil {
-				return err
-			}
-			issetProcess = true
-		case 2:
-			if err := p.readField2(iprot); err != nil {
-				return err
-			}
-			issetSpans = true
-		case 3:
-			if err := p.readField3(iprot); err != nil {
-				return err
-			}
-		case 4:
-			if err := p.readField4(iprot); err != nil {
-				return err
-			}
-		default:
-			if err := iprot.Skip(fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	if !issetProcess {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Process is not set"))
-	}
-	if !issetSpans {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Spans is not set"))
-	}
-	return nil
-}
-
-func (p *Batch) readField1(iprot thrift.TProtocol) error {
-	p.Process = &Process{}
-	if err := p.Process.Read(iprot); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Process), err)
-	}
-	return nil
-}
-
-func (p *Batch) readField2(iprot thrift.TProtocol) error {
-	_, size, err := iprot.ReadListBegin()
-	if err != nil {
-		return thrift.PrependError("error reading list begin: ", err)
-	}
-	tSlice := make([]*Span, 0, size)
-	p.Spans = tSlice
-	for i := 0; i < size; i++ {
-		_elem5 := &Span{}
-		if err := _elem5.Read(iprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem5), err)
-		}
-		p.Spans = append(p.Spans, _elem5)
-	}
-	if err := iprot.ReadListEnd(); err != nil {
-		return thrift.PrependError("error reading list end: ", err)
-	}
-	return nil
-}
-
-func (p *Batch) readField3(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(); err != nil {
-		return thrift.PrependError("error reading field 3: ", err)
-	} else {
-		p.SeqNo = &v
-	}
-	return nil
-}
-
-func (p *Batch) readField4(iprot thrift.TProtocol) error {
-	p.Stats = &ClientStats{}
-	if err := p.Stats.Read(iprot); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Stats), err)
-	}
-	return nil
-}
-
-func (p *Batch) Write(oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin("Batch"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if err := p.writeField1(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField2(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField3(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField4(oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteFieldStop(); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *Batch) writeField1(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("process", thrift.STRUCT, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:process: ", p), err)
-	}
-	if err := p.Process.Write(oprot); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Process), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:process: ", p), err)
-	}
-	return err
-}
-
-func (p *Batch) writeField2(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("spans", thrift.LIST, 2); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:spans: ", p), err)
-	}
-	if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil {
-		return thrift.PrependError("error writing list begin: ", err)
-	}
-	for _, v := range p.Spans {
-		if err := v.Write(oprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
-		}
-	}
-	if err := oprot.WriteListEnd(); err != nil {
-		return thrift.PrependError("error writing list end: ", err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:spans: ", p), err)
-	}
-	return err
-}
-
-func (p *Batch) writeField3(oprot thrift.TProtocol) (err error) {
-	if p.IsSetSeqNo() {
-		if err := oprot.WriteFieldBegin("seqNo", thrift.I64, 3); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:seqNo: ", p), err)
-		}
-		if err := oprot.WriteI64(int64(*p.SeqNo)); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T.seqNo (3) field write error: ", p), err)
-		}
-		if err := oprot.WriteFieldEnd(); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 3:seqNo: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Batch) writeField4(oprot thrift.TProtocol) (err error) {
-	if p.IsSetStats() {
-		if err := oprot.WriteFieldBegin("stats", thrift.STRUCT, 4); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:stats: ", p), err)
-		}
-		if err := p.Stats.Write(oprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Stats), err)
-		}
-		if err := oprot.WriteFieldEnd(); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 4:stats: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Batch) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("Batch(%+v)", *p)
-}
-
-// Attributes:
-//  - Ok
-type BatchSubmitResponse struct {
-	Ok bool `thrift:"ok,1,required" json:"ok"`
-}
-
-func NewBatchSubmitResponse() *BatchSubmitResponse {
-	return &BatchSubmitResponse{}
-}
-
-func (p *BatchSubmitResponse) GetOk() bool {
-	return p.Ok
-}
-func (p *BatchSubmitResponse) Read(iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	var issetOk bool = false
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if err := p.readField1(iprot); err != nil {
-				return err
-			}
-			issetOk = true
-		default:
-			if err := iprot.Skip(fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	if !issetOk {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set"))
-	}
-	return nil
-}
-
-func (p *BatchSubmitResponse) readField1(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadBool(); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.Ok = v
-	}
-	return nil
-}
-
-func (p *BatchSubmitResponse) Write(oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin("BatchSubmitResponse"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if err := p.writeField1(oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteFieldStop(); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *BatchSubmitResponse) writeField1(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("ok", thrift.BOOL, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err)
-	}
-	if err := oprot.WriteBool(bool(p.Ok)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err)
-	}
-	return err
-}
-
-func (p *BatchSubmitResponse) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("BatchSubmitResponse(%+v)", *p)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/GoUnusedProtection__.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/GoUnusedProtection__.go
new file mode 100644
index 0000000..015ad4b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/GoUnusedProtection__.go
@@ -0,0 +1,6 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package sampling
+
+var GoUnusedProtection__ int;
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling-consts.go
similarity index 63%
rename from vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/constants.go
rename to vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling-consts.go
index 0f6e3a8..5cc7628 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/constants.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling-consts.go
@@ -1,18 +1,23 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
 
 package sampling
 
-import (
+import(
 	"bytes"
+	"context"
 	"fmt"
+	"time"
 	"github.com/uber/jaeger-client-go/thrift"
 )
 
 // (needed to ensure safety because of naive import list construction.)
 var _ = thrift.ZERO
 var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
 var _ = bytes.Equal
 
+
 func init() {
 }
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling.go
new file mode 100644
index 0000000..3bffa5b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling.go
@@ -0,0 +1,1323 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package sampling
+
+import(
+	"bytes"
+	"context"
+	"database/sql/driver"
+	"errors"
+	"fmt"
+	"time"
+	"github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+type SamplingStrategyType int64
+const (
+  SamplingStrategyType_PROBABILISTIC SamplingStrategyType = 0
+  SamplingStrategyType_RATE_LIMITING SamplingStrategyType = 1
+)
+
+func (p SamplingStrategyType) String() string {
+  switch p {
+  case SamplingStrategyType_PROBABILISTIC: return "PROBABILISTIC"
+  case SamplingStrategyType_RATE_LIMITING: return "RATE_LIMITING"
+  }
+  return "<UNSET>"
+}
+
+func SamplingStrategyTypeFromString(s string) (SamplingStrategyType, error) {
+  switch s {
+  case "PROBABILISTIC": return SamplingStrategyType_PROBABILISTIC, nil 
+  case "RATE_LIMITING": return SamplingStrategyType_RATE_LIMITING, nil 
+  }
+  return SamplingStrategyType(0), fmt.Errorf("not a valid SamplingStrategyType string")
+}
+
+
+func SamplingStrategyTypePtr(v SamplingStrategyType) *SamplingStrategyType { return &v }
+
+func (p SamplingStrategyType) MarshalText() ([]byte, error) {
+return []byte(p.String()), nil
+}
+
+func (p *SamplingStrategyType) UnmarshalText(text []byte) error {
+q, err := SamplingStrategyTypeFromString(string(text))
+if (err != nil) {
+return err
+}
+*p = q
+return nil
+}
+
+func (p *SamplingStrategyType) Scan(value interface{}) error {
+v, ok := value.(int64)
+if !ok {
+return errors.New("Scan value is not int64")
+}
+*p = SamplingStrategyType(v)
+return nil
+}
+
+func (p * SamplingStrategyType) Value() (driver.Value, error) {
+  if p == nil {
+    return nil, nil
+  }
+return int64(*p), nil
+}
+// Attributes:
+//  - SamplingRate
+type ProbabilisticSamplingStrategy struct {
+  SamplingRate float64 `thrift:"samplingRate,1,required" db:"samplingRate" json:"samplingRate"`
+}
+
+func NewProbabilisticSamplingStrategy() *ProbabilisticSamplingStrategy {
+  return &ProbabilisticSamplingStrategy{}
+}
+
+
+func (p *ProbabilisticSamplingStrategy) GetSamplingRate() float64 {
+  return p.SamplingRate
+}
+func (p *ProbabilisticSamplingStrategy) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetSamplingRate bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.DOUBLE {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetSamplingRate = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetSamplingRate{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SamplingRate is not set"));
+  }
+  return nil
+}
+
+func (p *ProbabilisticSamplingStrategy)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadDouble(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.SamplingRate = v
+}
+  return nil
+}
+
+func (p *ProbabilisticSamplingStrategy) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "ProbabilisticSamplingStrategy"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *ProbabilisticSamplingStrategy) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "samplingRate", thrift.DOUBLE, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:samplingRate: ", p), err) }
+  if err := oprot.WriteDouble(ctx, float64(p.SamplingRate)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.samplingRate (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:samplingRate: ", p), err) }
+  return err
+}
+
+func (p *ProbabilisticSamplingStrategy) Equals(other *ProbabilisticSamplingStrategy) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.SamplingRate != other.SamplingRate { return false }
+  return true
+}
+
+func (p *ProbabilisticSamplingStrategy) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("ProbabilisticSamplingStrategy(%+v)", *p)
+}
+
+// Attributes:
+//  - MaxTracesPerSecond
+type RateLimitingSamplingStrategy struct {
+  MaxTracesPerSecond int16 `thrift:"maxTracesPerSecond,1,required" db:"maxTracesPerSecond" json:"maxTracesPerSecond"`
+}
+
+func NewRateLimitingSamplingStrategy() *RateLimitingSamplingStrategy {
+  return &RateLimitingSamplingStrategy{}
+}
+
+
+func (p *RateLimitingSamplingStrategy) GetMaxTracesPerSecond() int16 {
+  return p.MaxTracesPerSecond
+}
+func (p *RateLimitingSamplingStrategy) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetMaxTracesPerSecond bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.I16 {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetMaxTracesPerSecond = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetMaxTracesPerSecond{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxTracesPerSecond is not set"));
+  }
+  return nil
+}
+
+func (p *RateLimitingSamplingStrategy)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI16(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.MaxTracesPerSecond = v
+}
+  return nil
+}
+
+func (p *RateLimitingSamplingStrategy) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "RateLimitingSamplingStrategy"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *RateLimitingSamplingStrategy) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "maxTracesPerSecond", thrift.I16, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:maxTracesPerSecond: ", p), err) }
+  if err := oprot.WriteI16(ctx, int16(p.MaxTracesPerSecond)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.maxTracesPerSecond (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:maxTracesPerSecond: ", p), err) }
+  return err
+}
+
+func (p *RateLimitingSamplingStrategy) Equals(other *RateLimitingSamplingStrategy) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.MaxTracesPerSecond != other.MaxTracesPerSecond { return false }
+  return true
+}
+
+func (p *RateLimitingSamplingStrategy) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("RateLimitingSamplingStrategy(%+v)", *p)
+}
+
+// Attributes:
+//  - Operation
+//  - ProbabilisticSampling
+type OperationSamplingStrategy struct {
+  Operation string `thrift:"operation,1,required" db:"operation" json:"operation"`
+  ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2,required" db:"probabilisticSampling" json:"probabilisticSampling"`
+}
+
+func NewOperationSamplingStrategy() *OperationSamplingStrategy {
+  return &OperationSamplingStrategy{}
+}
+
+
+func (p *OperationSamplingStrategy) GetOperation() string {
+  return p.Operation
+}
+var OperationSamplingStrategy_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy
+func (p *OperationSamplingStrategy) GetProbabilisticSampling() *ProbabilisticSamplingStrategy {
+  if !p.IsSetProbabilisticSampling() {
+    return OperationSamplingStrategy_ProbabilisticSampling_DEFAULT
+  }
+return p.ProbabilisticSampling
+}
+func (p *OperationSamplingStrategy) IsSetProbabilisticSampling() bool {
+  return p.ProbabilisticSampling != nil
+}
+
+func (p *OperationSamplingStrategy) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetOperation bool = false;
+  var issetProbabilisticSampling bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetOperation = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.STRUCT {
+        if err := p.ReadField2(ctx, iprot); err != nil {
+          return err
+        }
+        issetProbabilisticSampling = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetOperation{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Operation is not set"));
+  }
+  if !issetProbabilisticSampling{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ProbabilisticSampling is not set"));
+  }
+  return nil
+}
+
+func (p *OperationSamplingStrategy)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.Operation = v
+}
+  return nil
+}
+
+func (p *OperationSamplingStrategy)  ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+  p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{}
+  if err := p.ProbabilisticSampling.Read(ctx, iprot); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err)
+  }
+  return nil
+}
+
+func (p *OperationSamplingStrategy) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "OperationSamplingStrategy"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField2(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *OperationSamplingStrategy) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "operation", thrift.STRING, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:operation: ", p), err) }
+  if err := oprot.WriteString(ctx, string(p.Operation)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.operation (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:operation: ", p), err) }
+  return err
+}
+
+func (p *OperationSamplingStrategy) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "probabilisticSampling", thrift.STRUCT, 2); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err) }
+  if err := p.ProbabilisticSampling.Write(ctx, oprot); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err)
+  }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err) }
+  return err
+}
+
+func (p *OperationSamplingStrategy) Equals(other *OperationSamplingStrategy) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.Operation != other.Operation { return false }
+  if !p.ProbabilisticSampling.Equals(other.ProbabilisticSampling) { return false }
+  return true
+}
+
+func (p *OperationSamplingStrategy) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("OperationSamplingStrategy(%+v)", *p)
+}
+
+// Attributes:
+//  - DefaultSamplingProbability
+//  - DefaultLowerBoundTracesPerSecond
+//  - PerOperationStrategies
+//  - DefaultUpperBoundTracesPerSecond
+type PerOperationSamplingStrategies struct {
+  DefaultSamplingProbability float64 `thrift:"defaultSamplingProbability,1,required" db:"defaultSamplingProbability" json:"defaultSamplingProbability"`
+  DefaultLowerBoundTracesPerSecond float64 `thrift:"defaultLowerBoundTracesPerSecond,2,required" db:"defaultLowerBoundTracesPerSecond" json:"defaultLowerBoundTracesPerSecond"`
+  PerOperationStrategies []*OperationSamplingStrategy `thrift:"perOperationStrategies,3,required" db:"perOperationStrategies" json:"perOperationStrategies"`
+  DefaultUpperBoundTracesPerSecond *float64 `thrift:"defaultUpperBoundTracesPerSecond,4" db:"defaultUpperBoundTracesPerSecond" json:"defaultUpperBoundTracesPerSecond,omitempty"`
+}
+
+func NewPerOperationSamplingStrategies() *PerOperationSamplingStrategies {
+  return &PerOperationSamplingStrategies{}
+}
+
+
+func (p *PerOperationSamplingStrategies) GetDefaultSamplingProbability() float64 {
+  return p.DefaultSamplingProbability
+}
+
+func (p *PerOperationSamplingStrategies) GetDefaultLowerBoundTracesPerSecond() float64 {
+  return p.DefaultLowerBoundTracesPerSecond
+}
+
+func (p *PerOperationSamplingStrategies) GetPerOperationStrategies() []*OperationSamplingStrategy {
+  return p.PerOperationStrategies
+}
+var PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT float64
+func (p *PerOperationSamplingStrategies) GetDefaultUpperBoundTracesPerSecond() float64 {
+  if !p.IsSetDefaultUpperBoundTracesPerSecond() {
+    return PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT
+  }
+return *p.DefaultUpperBoundTracesPerSecond
+}
+func (p *PerOperationSamplingStrategies) IsSetDefaultUpperBoundTracesPerSecond() bool {
+  return p.DefaultUpperBoundTracesPerSecond != nil
+}
+
+func (p *PerOperationSamplingStrategies) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetDefaultSamplingProbability bool = false;
+  var issetDefaultLowerBoundTracesPerSecond bool = false;
+  var issetPerOperationStrategies bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.DOUBLE {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetDefaultSamplingProbability = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.DOUBLE {
+        if err := p.ReadField2(ctx, iprot); err != nil {
+          return err
+        }
+        issetDefaultLowerBoundTracesPerSecond = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 3:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField3(ctx, iprot); err != nil {
+          return err
+        }
+        issetPerOperationStrategies = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 4:
+      if fieldTypeId == thrift.DOUBLE {
+        if err := p.ReadField4(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetDefaultSamplingProbability{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultSamplingProbability is not set"));
+  }
+  if !issetDefaultLowerBoundTracesPerSecond{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultLowerBoundTracesPerSecond is not set"));
+  }
+  if !issetPerOperationStrategies{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field PerOperationStrategies is not set"));
+  }
+  return nil
+}
+
+func (p *PerOperationSamplingStrategies)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadDouble(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.DefaultSamplingProbability = v
+}
+  return nil
+}
+
+func (p *PerOperationSamplingStrategies)  ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadDouble(ctx); err != nil {
+  return thrift.PrependError("error reading field 2: ", err)
+} else {
+  p.DefaultLowerBoundTracesPerSecond = v
+}
+  return nil
+}
+
+func (p *PerOperationSamplingStrategies)  ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*OperationSamplingStrategy, 0, size)
+  p.PerOperationStrategies =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem0 := &OperationSamplingStrategy{}
+    if err := _elem0.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
+    }
+    p.PerOperationStrategies = append(p.PerOperationStrategies, _elem0)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *PerOperationSamplingStrategies)  ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadDouble(ctx); err != nil {
+  return thrift.PrependError("error reading field 4: ", err)
+} else {
+  p.DefaultUpperBoundTracesPerSecond = &v
+}
+  return nil
+}
+
+func (p *PerOperationSamplingStrategies) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "PerOperationSamplingStrategies"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField2(ctx, oprot); err != nil { return err }
+    if err := p.writeField3(ctx, oprot); err != nil { return err }
+    if err := p.writeField4(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *PerOperationSamplingStrategies) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "defaultSamplingProbability", thrift.DOUBLE, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:defaultSamplingProbability: ", p), err) }
+  if err := oprot.WriteDouble(ctx, float64(p.DefaultSamplingProbability)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.defaultSamplingProbability (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:defaultSamplingProbability: ", p), err) }
+  return err
+}
+
+func (p *PerOperationSamplingStrategies) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "defaultLowerBoundTracesPerSecond", thrift.DOUBLE, 2); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:defaultLowerBoundTracesPerSecond: ", p), err) }
+  if err := oprot.WriteDouble(ctx, float64(p.DefaultLowerBoundTracesPerSecond)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.defaultLowerBoundTracesPerSecond (2) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 2:defaultLowerBoundTracesPerSecond: ", p), err) }
+  return err
+}
+
+func (p *PerOperationSamplingStrategies) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "perOperationStrategies", thrift.LIST, 3); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:perOperationStrategies: ", p), err) }
+  if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.PerOperationStrategies)); err != nil {
+    return thrift.PrependError("error writing list begin: ", err)
+  }
+  for _, v := range p.PerOperationStrategies {
+    if err := v.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+    }
+  }
+  if err := oprot.WriteListEnd(ctx); err != nil {
+    return thrift.PrependError("error writing list end: ", err)
+  }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 3:perOperationStrategies: ", p), err) }
+  return err
+}
+
+func (p *PerOperationSamplingStrategies) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetDefaultUpperBoundTracesPerSecond() {
+    if err := oprot.WriteFieldBegin(ctx, "defaultUpperBoundTracesPerSecond", thrift.DOUBLE, 4); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:defaultUpperBoundTracesPerSecond: ", p), err) }
+    if err := oprot.WriteDouble(ctx, float64(*p.DefaultUpperBoundTracesPerSecond)); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.defaultUpperBoundTracesPerSecond (4) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 4:defaultUpperBoundTracesPerSecond: ", p), err) }
+  }
+  return err
+}
+
+func (p *PerOperationSamplingStrategies) Equals(other *PerOperationSamplingStrategies) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.DefaultSamplingProbability != other.DefaultSamplingProbability { return false }
+  if p.DefaultLowerBoundTracesPerSecond != other.DefaultLowerBoundTracesPerSecond { return false }
+  if len(p.PerOperationStrategies) != len(other.PerOperationStrategies) { return false }
+  for i, _tgt := range p.PerOperationStrategies {
+    _src1 := other.PerOperationStrategies[i]
+    if !_tgt.Equals(_src1) { return false }
+  }
+  if p.DefaultUpperBoundTracesPerSecond != other.DefaultUpperBoundTracesPerSecond {
+    if p.DefaultUpperBoundTracesPerSecond == nil || other.DefaultUpperBoundTracesPerSecond == nil {
+      return false
+    }
+    if (*p.DefaultUpperBoundTracesPerSecond) != (*other.DefaultUpperBoundTracesPerSecond) { return false }
+  }
+  return true
+}
+
+func (p *PerOperationSamplingStrategies) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("PerOperationSamplingStrategies(%+v)", *p)
+}
+
+// Attributes:
+//  - StrategyType
+//  - ProbabilisticSampling
+//  - RateLimitingSampling
+//  - OperationSampling
+type SamplingStrategyResponse struct {
+  StrategyType SamplingStrategyType `thrift:"strategyType,1,required" db:"strategyType" json:"strategyType"`
+  ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2" db:"probabilisticSampling" json:"probabilisticSampling,omitempty"`
+  RateLimitingSampling *RateLimitingSamplingStrategy `thrift:"rateLimitingSampling,3" db:"rateLimitingSampling" json:"rateLimitingSampling,omitempty"`
+  OperationSampling *PerOperationSamplingStrategies `thrift:"operationSampling,4" db:"operationSampling" json:"operationSampling,omitempty"`
+}
+
+func NewSamplingStrategyResponse() *SamplingStrategyResponse {
+  return &SamplingStrategyResponse{}
+}
+
+
+func (p *SamplingStrategyResponse) GetStrategyType() SamplingStrategyType {
+  return p.StrategyType
+}
+var SamplingStrategyResponse_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy
+func (p *SamplingStrategyResponse) GetProbabilisticSampling() *ProbabilisticSamplingStrategy {
+  if !p.IsSetProbabilisticSampling() {
+    return SamplingStrategyResponse_ProbabilisticSampling_DEFAULT
+  }
+return p.ProbabilisticSampling
+}
+var SamplingStrategyResponse_RateLimitingSampling_DEFAULT *RateLimitingSamplingStrategy
+func (p *SamplingStrategyResponse) GetRateLimitingSampling() *RateLimitingSamplingStrategy {
+  if !p.IsSetRateLimitingSampling() {
+    return SamplingStrategyResponse_RateLimitingSampling_DEFAULT
+  }
+return p.RateLimitingSampling
+}
+var SamplingStrategyResponse_OperationSampling_DEFAULT *PerOperationSamplingStrategies
+func (p *SamplingStrategyResponse) GetOperationSampling() *PerOperationSamplingStrategies {
+  if !p.IsSetOperationSampling() {
+    return SamplingStrategyResponse_OperationSampling_DEFAULT
+  }
+return p.OperationSampling
+}
+func (p *SamplingStrategyResponse) IsSetProbabilisticSampling() bool {
+  return p.ProbabilisticSampling != nil
+}
+
+func (p *SamplingStrategyResponse) IsSetRateLimitingSampling() bool {
+  return p.RateLimitingSampling != nil
+}
+
+func (p *SamplingStrategyResponse) IsSetOperationSampling() bool {
+  return p.OperationSampling != nil
+}
+
+func (p *SamplingStrategyResponse) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetStrategyType bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.I32 {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetStrategyType = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.STRUCT {
+        if err := p.ReadField2(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 3:
+      if fieldTypeId == thrift.STRUCT {
+        if err := p.ReadField3(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 4:
+      if fieldTypeId == thrift.STRUCT {
+        if err := p.ReadField4(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetStrategyType{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StrategyType is not set"));
+  }
+  return nil
+}
+
+func (p *SamplingStrategyResponse)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI32(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  temp := SamplingStrategyType(v)
+  p.StrategyType = temp
+}
+  return nil
+}
+
+func (p *SamplingStrategyResponse)  ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+  p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{}
+  if err := p.ProbabilisticSampling.Read(ctx, iprot); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err)
+  }
+  return nil
+}
+
+func (p *SamplingStrategyResponse)  ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+  p.RateLimitingSampling = &RateLimitingSamplingStrategy{}
+  if err := p.RateLimitingSampling.Read(ctx, iprot); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.RateLimitingSampling), err)
+  }
+  return nil
+}
+
+func (p *SamplingStrategyResponse)  ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+  p.OperationSampling = &PerOperationSamplingStrategies{}
+  if err := p.OperationSampling.Read(ctx, iprot); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationSampling), err)
+  }
+  return nil
+}
+
+func (p *SamplingStrategyResponse) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "SamplingStrategyResponse"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField2(ctx, oprot); err != nil { return err }
+    if err := p.writeField3(ctx, oprot); err != nil { return err }
+    if err := p.writeField4(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *SamplingStrategyResponse) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "strategyType", thrift.I32, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:strategyType: ", p), err) }
+  if err := oprot.WriteI32(ctx, int32(p.StrategyType)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.strategyType (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:strategyType: ", p), err) }
+  return err
+}
+
+func (p *SamplingStrategyResponse) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetProbabilisticSampling() {
+    if err := oprot.WriteFieldBegin(ctx, "probabilisticSampling", thrift.STRUCT, 2); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err) }
+    if err := p.ProbabilisticSampling.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err)
+    }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err) }
+  }
+  return err
+}
+
+func (p *SamplingStrategyResponse) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetRateLimitingSampling() {
+    if err := oprot.WriteFieldBegin(ctx, "rateLimitingSampling", thrift.STRUCT, 3); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:rateLimitingSampling: ", p), err) }
+    if err := p.RateLimitingSampling.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.RateLimitingSampling), err)
+    }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 3:rateLimitingSampling: ", p), err) }
+  }
+  return err
+}
+
+func (p *SamplingStrategyResponse) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetOperationSampling() {
+    if err := oprot.WriteFieldBegin(ctx, "operationSampling", thrift.STRUCT, 4); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:operationSampling: ", p), err) }
+    if err := p.OperationSampling.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationSampling), err)
+    }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 4:operationSampling: ", p), err) }
+  }
+  return err
+}
+
+func (p *SamplingStrategyResponse) Equals(other *SamplingStrategyResponse) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.StrategyType != other.StrategyType { return false }
+  if !p.ProbabilisticSampling.Equals(other.ProbabilisticSampling) { return false }
+  if !p.RateLimitingSampling.Equals(other.RateLimitingSampling) { return false }
+  if !p.OperationSampling.Equals(other.OperationSampling) { return false }
+  return true
+}
+
+func (p *SamplingStrategyResponse) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("SamplingStrategyResponse(%+v)", *p)
+}
+
+type SamplingManager interface {
+  // Parameters:
+  //  - ServiceName
+  GetSamplingStrategy(ctx context.Context, serviceName string) (_r *SamplingStrategyResponse, _err error)
+}
+
+type SamplingManagerClient struct {
+  c thrift.TClient
+  meta thrift.ResponseMeta
+}
+
+func NewSamplingManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *SamplingManagerClient {
+  return &SamplingManagerClient{
+    c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
+  }
+}
+
+func NewSamplingManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *SamplingManagerClient {
+  return &SamplingManagerClient{
+    c: thrift.NewTStandardClient(iprot, oprot),
+  }
+}
+
+func NewSamplingManagerClient(c thrift.TClient) *SamplingManagerClient {
+  return &SamplingManagerClient{
+    c: c,
+  }
+}
+
+func (p *SamplingManagerClient) Client_() thrift.TClient {
+  return p.c
+}
+
+func (p *SamplingManagerClient) LastResponseMeta_() thrift.ResponseMeta {
+  return p.meta
+}
+
+func (p *SamplingManagerClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
+  p.meta = meta
+}
+
+// Parameters:
+//  - ServiceName
+func (p *SamplingManagerClient) GetSamplingStrategy(ctx context.Context, serviceName string) (_r *SamplingStrategyResponse, _err error) {
+  var _args2 SamplingManagerGetSamplingStrategyArgs
+  _args2.ServiceName = serviceName
+  var _result4 SamplingManagerGetSamplingStrategyResult
+  var _meta3 thrift.ResponseMeta
+  _meta3, _err = p.Client_().Call(ctx, "getSamplingStrategy", &_args2, &_result4)
+  p.SetLastResponseMeta_(_meta3)
+  if _err != nil {
+    return
+  }
+  return _result4.GetSuccess(), nil
+}
+
+type SamplingManagerProcessor struct {
+  processorMap map[string]thrift.TProcessorFunction
+  handler SamplingManager
+}
+
+func (p *SamplingManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+  p.processorMap[key] = processor
+}
+
+func (p *SamplingManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+  processor, ok = p.processorMap[key]
+  return processor, ok
+}
+
+func (p *SamplingManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+  return p.processorMap
+}
+
+func NewSamplingManagerProcessor(handler SamplingManager) *SamplingManagerProcessor {
+
+  self5 := &SamplingManagerProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)}
+  self5.processorMap["getSamplingStrategy"] = &samplingManagerProcessorGetSamplingStrategy{handler:handler}
+return self5
+}
+
+func (p *SamplingManagerProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+  name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
+  if err2 != nil { return false, thrift.WrapTException(err2) }
+  if processor, ok := p.GetProcessorFunction(name); ok {
+    return processor.Process(ctx, seqId, iprot, oprot)
+  }
+  iprot.Skip(ctx, thrift.STRUCT)
+  iprot.ReadMessageEnd(ctx)
+  x6 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name)
+  oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
+  x6.Write(ctx, oprot)
+  oprot.WriteMessageEnd(ctx)
+  oprot.Flush(ctx)
+  return false, x6
+
+}
+
+type samplingManagerProcessorGetSamplingStrategy struct {
+  handler SamplingManager
+}
+
+func (p *samplingManagerProcessorGetSamplingStrategy) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+  args := SamplingManagerGetSamplingStrategyArgs{}
+  var err2 error
+  if err2 = args.Read(ctx, iprot); err2 != nil {
+    iprot.ReadMessageEnd(ctx)
+    x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error())
+    oprot.WriteMessageBegin(ctx, "getSamplingStrategy", thrift.EXCEPTION, seqId)
+    x.Write(ctx, oprot)
+    oprot.WriteMessageEnd(ctx)
+    oprot.Flush(ctx)
+    return false, thrift.WrapTException(err2)
+  }
+  iprot.ReadMessageEnd(ctx)
+
+  tickerCancel := func() {}
+  // Start a goroutine to do server side connectivity check.
+  if thrift.ServerConnectivityCheckInterval > 0 {
+    var cancel context.CancelFunc
+    ctx, cancel = context.WithCancel(ctx)
+    defer cancel()
+    var tickerCtx context.Context
+    tickerCtx, tickerCancel = context.WithCancel(context.Background())
+    defer tickerCancel()
+    go func(ctx context.Context, cancel context.CancelFunc) {
+      ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval)
+      defer ticker.Stop()
+      for {
+        select {
+        case <-ctx.Done():
+          return
+        case <-ticker.C:
+          if !iprot.Transport().IsOpen() {
+            cancel()
+            return
+          }
+        }
+      }
+    }(tickerCtx, cancel)
+  }
+
+  result := SamplingManagerGetSamplingStrategyResult{}
+  var retval *SamplingStrategyResponse
+  if retval, err2 = p.handler.GetSamplingStrategy(ctx, args.ServiceName); err2 != nil {
+    tickerCancel()
+    if err2 == thrift.ErrAbandonRequest {
+      return false, thrift.WrapTException(err2)
+    }
+    x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getSamplingStrategy: " + err2.Error())
+    oprot.WriteMessageBegin(ctx, "getSamplingStrategy", thrift.EXCEPTION, seqId)
+    x.Write(ctx, oprot)
+    oprot.WriteMessageEnd(ctx)
+    oprot.Flush(ctx)
+    return true, thrift.WrapTException(err2)
+  } else {
+    result.Success = retval
+  }
+  tickerCancel()
+  if err2 = oprot.WriteMessageBegin(ctx, "getSamplingStrategy", thrift.REPLY, seqId); err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err2 = result.Write(ctx, oprot); err == nil && err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err != nil {
+    return
+  }
+  return true, err
+}
+
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+//  - ServiceName
+type SamplingManagerGetSamplingStrategyArgs struct {
+  ServiceName string `thrift:"serviceName,1" db:"serviceName" json:"serviceName"`
+}
+
+func NewSamplingManagerGetSamplingStrategyArgs() *SamplingManagerGetSamplingStrategyArgs {
+  return &SamplingManagerGetSamplingStrategyArgs{}
+}
+
+
+func (p *SamplingManagerGetSamplingStrategyArgs) GetServiceName() string {
+  return p.ServiceName
+}
+func (p *SamplingManagerGetSamplingStrategyArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyArgs)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.ServiceName = v
+}
+  return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "getSamplingStrategy_args"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) }
+  if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) }
+  return err
+}
+
+func (p *SamplingManagerGetSamplingStrategyArgs) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("SamplingManagerGetSamplingStrategyArgs(%+v)", *p)
+}
+
+// Attributes:
+//  - Success
+type SamplingManagerGetSamplingStrategyResult struct {
+  Success *SamplingStrategyResponse `thrift:"success,0" db:"success" json:"success,omitempty"`
+}
+
+func NewSamplingManagerGetSamplingStrategyResult() *SamplingManagerGetSamplingStrategyResult {
+  return &SamplingManagerGetSamplingStrategyResult{}
+}
+
+var SamplingManagerGetSamplingStrategyResult_Success_DEFAULT *SamplingStrategyResponse
+func (p *SamplingManagerGetSamplingStrategyResult) GetSuccess() *SamplingStrategyResponse {
+  if !p.IsSetSuccess() {
+    return SamplingManagerGetSamplingStrategyResult_Success_DEFAULT
+  }
+return p.Success
+}
+func (p *SamplingManagerGetSamplingStrategyResult) IsSetSuccess() bool {
+  return p.Success != nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 0:
+      if fieldTypeId == thrift.STRUCT {
+        if err := p.ReadField0(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult)  ReadField0(ctx context.Context, iprot thrift.TProtocol) error {
+  p.Success = &SamplingStrategyResponse{}
+  if err := p.Success.Read(ctx, iprot); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
+  }
+  return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "getSamplingStrategy_result"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField0(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetSuccess() {
+    if err := oprot.WriteFieldBegin(ctx, "success", thrift.STRUCT, 0); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) }
+    if err := p.Success.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err)
+    }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) }
+  }
+  return err
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("SamplingManagerGetSamplingStrategyResult(%+v)", *p)
+}
+
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go
deleted file mode 100644
index 33179cf..0000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go
+++ /dev/null
@@ -1,410 +0,0 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-
-package sampling
-
-import (
-	"bytes"
-	"fmt"
-	"github.com/uber/jaeger-client-go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = bytes.Equal
-
-type SamplingManager interface {
-	// Parameters:
-	//  - ServiceName
-	GetSamplingStrategy(serviceName string) (r *SamplingStrategyResponse, err error)
-}
-
-type SamplingManagerClient struct {
-	Transport       thrift.TTransport
-	ProtocolFactory thrift.TProtocolFactory
-	InputProtocol   thrift.TProtocol
-	OutputProtocol  thrift.TProtocol
-	SeqId           int32
-}
-
-func NewSamplingManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *SamplingManagerClient {
-	return &SamplingManagerClient{Transport: t,
-		ProtocolFactory: f,
-		InputProtocol:   f.GetProtocol(t),
-		OutputProtocol:  f.GetProtocol(t),
-		SeqId:           0,
-	}
-}
-
-func NewSamplingManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *SamplingManagerClient {
-	return &SamplingManagerClient{Transport: t,
-		ProtocolFactory: nil,
-		InputProtocol:   iprot,
-		OutputProtocol:  oprot,
-		SeqId:           0,
-	}
-}
-
-// Parameters:
-//  - ServiceName
-func (p *SamplingManagerClient) GetSamplingStrategy(serviceName string) (r *SamplingStrategyResponse, err error) {
-	if err = p.sendGetSamplingStrategy(serviceName); err != nil {
-		return
-	}
-	return p.recvGetSamplingStrategy()
-}
-
-func (p *SamplingManagerClient) sendGetSamplingStrategy(serviceName string) (err error) {
-	oprot := p.OutputProtocol
-	if oprot == nil {
-		oprot = p.ProtocolFactory.GetProtocol(p.Transport)
-		p.OutputProtocol = oprot
-	}
-	p.SeqId++
-	if err = oprot.WriteMessageBegin("getSamplingStrategy", thrift.CALL, p.SeqId); err != nil {
-		return
-	}
-	args := SamplingManagerGetSamplingStrategyArgs{
-		ServiceName: serviceName,
-	}
-	if err = args.Write(oprot); err != nil {
-		return
-	}
-	if err = oprot.WriteMessageEnd(); err != nil {
-		return
-	}
-	return oprot.Flush()
-}
-
-func (p *SamplingManagerClient) recvGetSamplingStrategy() (value *SamplingStrategyResponse, err error) {
-	iprot := p.InputProtocol
-	if iprot == nil {
-		iprot = p.ProtocolFactory.GetProtocol(p.Transport)
-		p.InputProtocol = iprot
-	}
-	method, mTypeId, seqId, err := iprot.ReadMessageBegin()
-	if err != nil {
-		return
-	}
-	if method != "getSamplingStrategy" {
-		err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getSamplingStrategy failed: wrong method name")
-		return
-	}
-	if p.SeqId != seqId {
-		err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getSamplingStrategy failed: out of sequence response")
-		return
-	}
-	if mTypeId == thrift.EXCEPTION {
-		error1 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
-		var error2 error
-		error2, err = error1.Read(iprot)
-		if err != nil {
-			return
-		}
-		if err = iprot.ReadMessageEnd(); err != nil {
-			return
-		}
-		err = error2
-		return
-	}
-	if mTypeId != thrift.REPLY {
-		err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getSamplingStrategy failed: invalid message type")
-		return
-	}
-	result := SamplingManagerGetSamplingStrategyResult{}
-	if err = result.Read(iprot); err != nil {
-		return
-	}
-	if err = iprot.ReadMessageEnd(); err != nil {
-		return
-	}
-	value = result.GetSuccess()
-	return
-}
-
-type SamplingManagerProcessor struct {
-	processorMap map[string]thrift.TProcessorFunction
-	handler      SamplingManager
-}
-
-func (p *SamplingManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
-	p.processorMap[key] = processor
-}
-
-func (p *SamplingManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
-	processor, ok = p.processorMap[key]
-	return processor, ok
-}
-
-func (p *SamplingManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
-	return p.processorMap
-}
-
-func NewSamplingManagerProcessor(handler SamplingManager) *SamplingManagerProcessor {
-
-	self3 := &SamplingManagerProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
-	self3.processorMap["getSamplingStrategy"] = &samplingManagerProcessorGetSamplingStrategy{handler: handler}
-	return self3
-}
-
-func (p *SamplingManagerProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
-	name, _, seqId, err := iprot.ReadMessageBegin()
-	if err != nil {
-		return false, err
-	}
-	if processor, ok := p.GetProcessorFunction(name); ok {
-		return processor.Process(seqId, iprot, oprot)
-	}
-	iprot.Skip(thrift.STRUCT)
-	iprot.ReadMessageEnd()
-	x4 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
-	oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
-	x4.Write(oprot)
-	oprot.WriteMessageEnd()
-	oprot.Flush()
-	return false, x4
-
-}
-
-type samplingManagerProcessorGetSamplingStrategy struct {
-	handler SamplingManager
-}
-
-func (p *samplingManagerProcessorGetSamplingStrategy) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
-	args := SamplingManagerGetSamplingStrategyArgs{}
-	if err = args.Read(iprot); err != nil {
-		iprot.ReadMessageEnd()
-		x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
-		oprot.WriteMessageBegin("getSamplingStrategy", thrift.EXCEPTION, seqId)
-		x.Write(oprot)
-		oprot.WriteMessageEnd()
-		oprot.Flush()
-		return false, err
-	}
-
-	iprot.ReadMessageEnd()
-	result := SamplingManagerGetSamplingStrategyResult{}
-	var retval *SamplingStrategyResponse
-	var err2 error
-	if retval, err2 = p.handler.GetSamplingStrategy(args.ServiceName); err2 != nil {
-		x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getSamplingStrategy: "+err2.Error())
-		oprot.WriteMessageBegin("getSamplingStrategy", thrift.EXCEPTION, seqId)
-		x.Write(oprot)
-		oprot.WriteMessageEnd()
-		oprot.Flush()
-		return true, err2
-	} else {
-		result.Success = retval
-	}
-	if err2 = oprot.WriteMessageBegin("getSamplingStrategy", thrift.REPLY, seqId); err2 != nil {
-		err = err2
-	}
-	if err2 = result.Write(oprot); err == nil && err2 != nil {
-		err = err2
-	}
-	if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
-		err = err2
-	}
-	if err2 = oprot.Flush(); err == nil && err2 != nil {
-		err = err2
-	}
-	if err != nil {
-		return
-	}
-	return true, err
-}
-
-// HELPER FUNCTIONS AND STRUCTURES
-
-// Attributes:
-//  - ServiceName
-type SamplingManagerGetSamplingStrategyArgs struct {
-	ServiceName string `thrift:"serviceName,1" json:"serviceName"`
-}
-
-func NewSamplingManagerGetSamplingStrategyArgs() *SamplingManagerGetSamplingStrategyArgs {
-	return &SamplingManagerGetSamplingStrategyArgs{}
-}
-
-func (p *SamplingManagerGetSamplingStrategyArgs) GetServiceName() string {
-	return p.ServiceName
-}
-func (p *SamplingManagerGetSamplingStrategyArgs) Read(iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if err := p.readField1(iprot); err != nil {
-				return err
-			}
-		default:
-			if err := iprot.Skip(fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	return nil
-}
-
-func (p *SamplingManagerGetSamplingStrategyArgs) readField1(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadString(); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.ServiceName = v
-	}
-	return nil
-}
-
-func (p *SamplingManagerGetSamplingStrategyArgs) Write(oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin("getSamplingStrategy_args"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if err := p.writeField1(oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteFieldStop(); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *SamplingManagerGetSamplingStrategyArgs) writeField1(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err)
-	}
-	if err := oprot.WriteString(string(p.ServiceName)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err)
-	}
-	return err
-}
-
-func (p *SamplingManagerGetSamplingStrategyArgs) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("SamplingManagerGetSamplingStrategyArgs(%+v)", *p)
-}
-
-// Attributes:
-//  - Success
-type SamplingManagerGetSamplingStrategyResult struct {
-	Success *SamplingStrategyResponse `thrift:"success,0" json:"success,omitempty"`
-}
-
-func NewSamplingManagerGetSamplingStrategyResult() *SamplingManagerGetSamplingStrategyResult {
-	return &SamplingManagerGetSamplingStrategyResult{}
-}
-
-var SamplingManagerGetSamplingStrategyResult_Success_DEFAULT *SamplingStrategyResponse
-
-func (p *SamplingManagerGetSamplingStrategyResult) GetSuccess() *SamplingStrategyResponse {
-	if !p.IsSetSuccess() {
-		return SamplingManagerGetSamplingStrategyResult_Success_DEFAULT
-	}
-	return p.Success
-}
-func (p *SamplingManagerGetSamplingStrategyResult) IsSetSuccess() bool {
-	return p.Success != nil
-}
-
-func (p *SamplingManagerGetSamplingStrategyResult) Read(iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 0:
-			if err := p.readField0(iprot); err != nil {
-				return err
-			}
-		default:
-			if err := iprot.Skip(fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	return nil
-}
-
-func (p *SamplingManagerGetSamplingStrategyResult) readField0(iprot thrift.TProtocol) error {
-	p.Success = &SamplingStrategyResponse{}
-	if err := p.Success.Read(iprot); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
-	}
-	return nil
-}
-
-func (p *SamplingManagerGetSamplingStrategyResult) Write(oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin("getSamplingStrategy_result"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if err := p.writeField0(oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteFieldStop(); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *SamplingManagerGetSamplingStrategyResult) writeField0(oprot thrift.TProtocol) (err error) {
-	if p.IsSetSuccess() {
-		if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
-		}
-		if err := p.Success.Write(oprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err)
-		}
-		if err := oprot.WriteFieldEnd(); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *SamplingManagerGetSamplingStrategyResult) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("SamplingManagerGetSamplingStrategyResult(%+v)", *p)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go
deleted file mode 100644
index 9abaf05..0000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go
+++ /dev/null
@@ -1,873 +0,0 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-
-package sampling
-
-import (
-	"bytes"
-	"fmt"
-	"github.com/uber/jaeger-client-go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = bytes.Equal
-
-var GoUnusedProtection__ int
-
-type SamplingStrategyType int64
-
-const (
-	SamplingStrategyType_PROBABILISTIC SamplingStrategyType = 0
-	SamplingStrategyType_RATE_LIMITING SamplingStrategyType = 1
-)
-
-func (p SamplingStrategyType) String() string {
-	switch p {
-	case SamplingStrategyType_PROBABILISTIC:
-		return "PROBABILISTIC"
-	case SamplingStrategyType_RATE_LIMITING:
-		return "RATE_LIMITING"
-	}
-	return "<UNSET>"
-}
-
-func SamplingStrategyTypeFromString(s string) (SamplingStrategyType, error) {
-	switch s {
-	case "PROBABILISTIC":
-		return SamplingStrategyType_PROBABILISTIC, nil
-	case "RATE_LIMITING":
-		return SamplingStrategyType_RATE_LIMITING, nil
-	}
-	return SamplingStrategyType(0), fmt.Errorf("not a valid SamplingStrategyType string")
-}
-
-func SamplingStrategyTypePtr(v SamplingStrategyType) *SamplingStrategyType { return &v }
-
-func (p SamplingStrategyType) MarshalText() ([]byte, error) {
-	return []byte(p.String()), nil
-}
-
-func (p *SamplingStrategyType) UnmarshalText(text []byte) error {
-	q, err := SamplingStrategyTypeFromString(string(text))
-	if err != nil {
-		return err
-	}
-	*p = q
-	return nil
-}
-
-// Attributes:
-//  - SamplingRate
-type ProbabilisticSamplingStrategy struct {
-	SamplingRate float64 `thrift:"samplingRate,1,required" json:"samplingRate"`
-}
-
-func NewProbabilisticSamplingStrategy() *ProbabilisticSamplingStrategy {
-	return &ProbabilisticSamplingStrategy{}
-}
-
-func (p *ProbabilisticSamplingStrategy) GetSamplingRate() float64 {
-	return p.SamplingRate
-}
-func (p *ProbabilisticSamplingStrategy) Read(iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	var issetSamplingRate bool = false
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if err := p.readField1(iprot); err != nil {
-				return err
-			}
-			issetSamplingRate = true
-		default:
-			if err := iprot.Skip(fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	if !issetSamplingRate {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SamplingRate is not set"))
-	}
-	return nil
-}
-
-func (p *ProbabilisticSamplingStrategy) readField1(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadDouble(); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.SamplingRate = v
-	}
-	return nil
-}
-
-func (p *ProbabilisticSamplingStrategy) Write(oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin("ProbabilisticSamplingStrategy"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if err := p.writeField1(oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteFieldStop(); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *ProbabilisticSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("samplingRate", thrift.DOUBLE, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:samplingRate: ", p), err)
-	}
-	if err := oprot.WriteDouble(float64(p.SamplingRate)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.samplingRate (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:samplingRate: ", p), err)
-	}
-	return err
-}
-
-func (p *ProbabilisticSamplingStrategy) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("ProbabilisticSamplingStrategy(%+v)", *p)
-}
-
-// Attributes:
-//  - MaxTracesPerSecond
-type RateLimitingSamplingStrategy struct {
-	MaxTracesPerSecond int16 `thrift:"maxTracesPerSecond,1,required" json:"maxTracesPerSecond"`
-}
-
-func NewRateLimitingSamplingStrategy() *RateLimitingSamplingStrategy {
-	return &RateLimitingSamplingStrategy{}
-}
-
-func (p *RateLimitingSamplingStrategy) GetMaxTracesPerSecond() int16 {
-	return p.MaxTracesPerSecond
-}
-func (p *RateLimitingSamplingStrategy) Read(iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	var issetMaxTracesPerSecond bool = false
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if err := p.readField1(iprot); err != nil {
-				return err
-			}
-			issetMaxTracesPerSecond = true
-		default:
-			if err := iprot.Skip(fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	if !issetMaxTracesPerSecond {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxTracesPerSecond is not set"))
-	}
-	return nil
-}
-
-func (p *RateLimitingSamplingStrategy) readField1(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI16(); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.MaxTracesPerSecond = v
-	}
-	return nil
-}
-
-func (p *RateLimitingSamplingStrategy) Write(oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin("RateLimitingSamplingStrategy"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if err := p.writeField1(oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteFieldStop(); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *RateLimitingSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("maxTracesPerSecond", thrift.I16, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:maxTracesPerSecond: ", p), err)
-	}
-	if err := oprot.WriteI16(int16(p.MaxTracesPerSecond)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.maxTracesPerSecond (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:maxTracesPerSecond: ", p), err)
-	}
-	return err
-}
-
-func (p *RateLimitingSamplingStrategy) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("RateLimitingSamplingStrategy(%+v)", *p)
-}
-
-// Attributes:
-//  - Operation
-//  - ProbabilisticSampling
-type OperationSamplingStrategy struct {
-	Operation             string                         `thrift:"operation,1,required" json:"operation"`
-	ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2,required" json:"probabilisticSampling"`
-}
-
-func NewOperationSamplingStrategy() *OperationSamplingStrategy {
-	return &OperationSamplingStrategy{}
-}
-
-func (p *OperationSamplingStrategy) GetOperation() string {
-	return p.Operation
-}
-
-var OperationSamplingStrategy_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy
-
-func (p *OperationSamplingStrategy) GetProbabilisticSampling() *ProbabilisticSamplingStrategy {
-	if !p.IsSetProbabilisticSampling() {
-		return OperationSamplingStrategy_ProbabilisticSampling_DEFAULT
-	}
-	return p.ProbabilisticSampling
-}
-func (p *OperationSamplingStrategy) IsSetProbabilisticSampling() bool {
-	return p.ProbabilisticSampling != nil
-}
-
-func (p *OperationSamplingStrategy) Read(iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	var issetOperation bool = false
-	var issetProbabilisticSampling bool = false
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if err := p.readField1(iprot); err != nil {
-				return err
-			}
-			issetOperation = true
-		case 2:
-			if err := p.readField2(iprot); err != nil {
-				return err
-			}
-			issetProbabilisticSampling = true
-		default:
-			if err := iprot.Skip(fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	if !issetOperation {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Operation is not set"))
-	}
-	if !issetProbabilisticSampling {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ProbabilisticSampling is not set"))
-	}
-	return nil
-}
-
-func (p *OperationSamplingStrategy) readField1(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadString(); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.Operation = v
-	}
-	return nil
-}
-
-func (p *OperationSamplingStrategy) readField2(iprot thrift.TProtocol) error {
-	p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{}
-	if err := p.ProbabilisticSampling.Read(iprot); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err)
-	}
-	return nil
-}
-
-func (p *OperationSamplingStrategy) Write(oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin("OperationSamplingStrategy"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if err := p.writeField1(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField2(oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteFieldStop(); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *OperationSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("operation", thrift.STRING, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:operation: ", p), err)
-	}
-	if err := oprot.WriteString(string(p.Operation)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.operation (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:operation: ", p), err)
-	}
-	return err
-}
-
-func (p *OperationSamplingStrategy) writeField2(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("probabilisticSampling", thrift.STRUCT, 2); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err)
-	}
-	if err := p.ProbabilisticSampling.Write(oprot); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err)
-	}
-	return err
-}
-
-func (p *OperationSamplingStrategy) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("OperationSamplingStrategy(%+v)", *p)
-}
-
-// Attributes:
-//  - DefaultSamplingProbability
-//  - DefaultLowerBoundTracesPerSecond
-//  - PerOperationStrategies
-//  - DefaultUpperBoundTracesPerSecond
-type PerOperationSamplingStrategies struct {
-	DefaultSamplingProbability       float64                      `thrift:"defaultSamplingProbability,1,required" json:"defaultSamplingProbability"`
-	DefaultLowerBoundTracesPerSecond float64                      `thrift:"defaultLowerBoundTracesPerSecond,2,required" json:"defaultLowerBoundTracesPerSecond"`
-	PerOperationStrategies           []*OperationSamplingStrategy `thrift:"perOperationStrategies,3,required" json:"perOperationStrategies"`
-	DefaultUpperBoundTracesPerSecond *float64                     `thrift:"defaultUpperBoundTracesPerSecond,4" json:"defaultUpperBoundTracesPerSecond,omitempty"`
-}
-
-func NewPerOperationSamplingStrategies() *PerOperationSamplingStrategies {
-	return &PerOperationSamplingStrategies{}
-}
-
-func (p *PerOperationSamplingStrategies) GetDefaultSamplingProbability() float64 {
-	return p.DefaultSamplingProbability
-}
-
-func (p *PerOperationSamplingStrategies) GetDefaultLowerBoundTracesPerSecond() float64 {
-	return p.DefaultLowerBoundTracesPerSecond
-}
-
-func (p *PerOperationSamplingStrategies) GetPerOperationStrategies() []*OperationSamplingStrategy {
-	return p.PerOperationStrategies
-}
-
-var PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT float64
-
-func (p *PerOperationSamplingStrategies) GetDefaultUpperBoundTracesPerSecond() float64 {
-	if !p.IsSetDefaultUpperBoundTracesPerSecond() {
-		return PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT
-	}
-	return *p.DefaultUpperBoundTracesPerSecond
-}
-func (p *PerOperationSamplingStrategies) IsSetDefaultUpperBoundTracesPerSecond() bool {
-	return p.DefaultUpperBoundTracesPerSecond != nil
-}
-
-func (p *PerOperationSamplingStrategies) Read(iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	var issetDefaultSamplingProbability bool = false
-	var issetDefaultLowerBoundTracesPerSecond bool = false
-	var issetPerOperationStrategies bool = false
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if err := p.readField1(iprot); err != nil {
-				return err
-			}
-			issetDefaultSamplingProbability = true
-		case 2:
-			if err := p.readField2(iprot); err != nil {
-				return err
-			}
-			issetDefaultLowerBoundTracesPerSecond = true
-		case 3:
-			if err := p.readField3(iprot); err != nil {
-				return err
-			}
-			issetPerOperationStrategies = true
-		case 4:
-			if err := p.readField4(iprot); err != nil {
-				return err
-			}
-		default:
-			if err := iprot.Skip(fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	if !issetDefaultSamplingProbability {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultSamplingProbability is not set"))
-	}
-	if !issetDefaultLowerBoundTracesPerSecond {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultLowerBoundTracesPerSecond is not set"))
-	}
-	if !issetPerOperationStrategies {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field PerOperationStrategies is not set"))
-	}
-	return nil
-}
-
-func (p *PerOperationSamplingStrategies) readField1(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadDouble(); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.DefaultSamplingProbability = v
-	}
-	return nil
-}
-
-func (p *PerOperationSamplingStrategies) readField2(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadDouble(); err != nil {
-		return thrift.PrependError("error reading field 2: ", err)
-	} else {
-		p.DefaultLowerBoundTracesPerSecond = v
-	}
-	return nil
-}
-
-func (p *PerOperationSamplingStrategies) readField3(iprot thrift.TProtocol) error {
-	_, size, err := iprot.ReadListBegin()
-	if err != nil {
-		return thrift.PrependError("error reading list begin: ", err)
-	}
-	tSlice := make([]*OperationSamplingStrategy, 0, size)
-	p.PerOperationStrategies = tSlice
-	for i := 0; i < size; i++ {
-		_elem0 := &OperationSamplingStrategy{}
-		if err := _elem0.Read(iprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
-		}
-		p.PerOperationStrategies = append(p.PerOperationStrategies, _elem0)
-	}
-	if err := iprot.ReadListEnd(); err != nil {
-		return thrift.PrependError("error reading list end: ", err)
-	}
-	return nil
-}
-
-func (p *PerOperationSamplingStrategies) readField4(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadDouble(); err != nil {
-		return thrift.PrependError("error reading field 4: ", err)
-	} else {
-		p.DefaultUpperBoundTracesPerSecond = &v
-	}
-	return nil
-}
-
-func (p *PerOperationSamplingStrategies) Write(oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin("PerOperationSamplingStrategies"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if err := p.writeField1(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField2(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField3(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField4(oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteFieldStop(); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *PerOperationSamplingStrategies) writeField1(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("defaultSamplingProbability", thrift.DOUBLE, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:defaultSamplingProbability: ", p), err)
-	}
-	if err := oprot.WriteDouble(float64(p.DefaultSamplingProbability)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.defaultSamplingProbability (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:defaultSamplingProbability: ", p), err)
-	}
-	return err
-}
-
-func (p *PerOperationSamplingStrategies) writeField2(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("defaultLowerBoundTracesPerSecond", thrift.DOUBLE, 2); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:defaultLowerBoundTracesPerSecond: ", p), err)
-	}
-	if err := oprot.WriteDouble(float64(p.DefaultLowerBoundTracesPerSecond)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.defaultLowerBoundTracesPerSecond (2) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:defaultLowerBoundTracesPerSecond: ", p), err)
-	}
-	return err
-}
-
-func (p *PerOperationSamplingStrategies) writeField3(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("perOperationStrategies", thrift.LIST, 3); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:perOperationStrategies: ", p), err)
-	}
-	if err := oprot.WriteListBegin(thrift.STRUCT, len(p.PerOperationStrategies)); err != nil {
-		return thrift.PrependError("error writing list begin: ", err)
-	}
-	for _, v := range p.PerOperationStrategies {
-		if err := v.Write(oprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
-		}
-	}
-	if err := oprot.WriteListEnd(); err != nil {
-		return thrift.PrependError("error writing list end: ", err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 3:perOperationStrategies: ", p), err)
-	}
-	return err
-}
-
-func (p *PerOperationSamplingStrategies) writeField4(oprot thrift.TProtocol) (err error) {
-	if p.IsSetDefaultUpperBoundTracesPerSecond() {
-		if err := oprot.WriteFieldBegin("defaultUpperBoundTracesPerSecond", thrift.DOUBLE, 4); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:defaultUpperBoundTracesPerSecond: ", p), err)
-		}
-		if err := oprot.WriteDouble(float64(*p.DefaultUpperBoundTracesPerSecond)); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T.defaultUpperBoundTracesPerSecond (4) field write error: ", p), err)
-		}
-		if err := oprot.WriteFieldEnd(); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 4:defaultUpperBoundTracesPerSecond: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *PerOperationSamplingStrategies) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("PerOperationSamplingStrategies(%+v)", *p)
-}
-
-// Attributes:
-//  - StrategyType
-//  - ProbabilisticSampling
-//  - RateLimitingSampling
-//  - OperationSampling
-type SamplingStrategyResponse struct {
-	StrategyType          SamplingStrategyType            `thrift:"strategyType,1,required" json:"strategyType"`
-	ProbabilisticSampling *ProbabilisticSamplingStrategy  `thrift:"probabilisticSampling,2" json:"probabilisticSampling,omitempty"`
-	RateLimitingSampling  *RateLimitingSamplingStrategy   `thrift:"rateLimitingSampling,3" json:"rateLimitingSampling,omitempty"`
-	OperationSampling     *PerOperationSamplingStrategies `thrift:"operationSampling,4" json:"operationSampling,omitempty"`
-}
-
-func NewSamplingStrategyResponse() *SamplingStrategyResponse {
-	return &SamplingStrategyResponse{}
-}
-
-func (p *SamplingStrategyResponse) GetStrategyType() SamplingStrategyType {
-	return p.StrategyType
-}
-
-var SamplingStrategyResponse_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy
-
-func (p *SamplingStrategyResponse) GetProbabilisticSampling() *ProbabilisticSamplingStrategy {
-	if !p.IsSetProbabilisticSampling() {
-		return SamplingStrategyResponse_ProbabilisticSampling_DEFAULT
-	}
-	return p.ProbabilisticSampling
-}
-
-var SamplingStrategyResponse_RateLimitingSampling_DEFAULT *RateLimitingSamplingStrategy
-
-func (p *SamplingStrategyResponse) GetRateLimitingSampling() *RateLimitingSamplingStrategy {
-	if !p.IsSetRateLimitingSampling() {
-		return SamplingStrategyResponse_RateLimitingSampling_DEFAULT
-	}
-	return p.RateLimitingSampling
-}
-
-var SamplingStrategyResponse_OperationSampling_DEFAULT *PerOperationSamplingStrategies
-
-func (p *SamplingStrategyResponse) GetOperationSampling() *PerOperationSamplingStrategies {
-	if !p.IsSetOperationSampling() {
-		return SamplingStrategyResponse_OperationSampling_DEFAULT
-	}
-	return p.OperationSampling
-}
-func (p *SamplingStrategyResponse) IsSetProbabilisticSampling() bool {
-	return p.ProbabilisticSampling != nil
-}
-
-func (p *SamplingStrategyResponse) IsSetRateLimitingSampling() bool {
-	return p.RateLimitingSampling != nil
-}
-
-func (p *SamplingStrategyResponse) IsSetOperationSampling() bool {
-	return p.OperationSampling != nil
-}
-
-func (p *SamplingStrategyResponse) Read(iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	var issetStrategyType bool = false
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if err := p.readField1(iprot); err != nil {
-				return err
-			}
-			issetStrategyType = true
-		case 2:
-			if err := p.readField2(iprot); err != nil {
-				return err
-			}
-		case 3:
-			if err := p.readField3(iprot); err != nil {
-				return err
-			}
-		case 4:
-			if err := p.readField4(iprot); err != nil {
-				return err
-			}
-		default:
-			if err := iprot.Skip(fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	if !issetStrategyType {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StrategyType is not set"))
-	}
-	return nil
-}
-
-func (p *SamplingStrategyResponse) readField1(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI32(); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		temp := SamplingStrategyType(v)
-		p.StrategyType = temp
-	}
-	return nil
-}
-
-func (p *SamplingStrategyResponse) readField2(iprot thrift.TProtocol) error {
-	p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{}
-	if err := p.ProbabilisticSampling.Read(iprot); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err)
-	}
-	return nil
-}
-
-func (p *SamplingStrategyResponse) readField3(iprot thrift.TProtocol) error {
-	p.RateLimitingSampling = &RateLimitingSamplingStrategy{}
-	if err := p.RateLimitingSampling.Read(iprot); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.RateLimitingSampling), err)
-	}
-	return nil
-}
-
-func (p *SamplingStrategyResponse) readField4(iprot thrift.TProtocol) error {
-	p.OperationSampling = &PerOperationSamplingStrategies{}
-	if err := p.OperationSampling.Read(iprot); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationSampling), err)
-	}
-	return nil
-}
-
-func (p *SamplingStrategyResponse) Write(oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin("SamplingStrategyResponse"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if err := p.writeField1(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField2(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField3(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField4(oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteFieldStop(); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *SamplingStrategyResponse) writeField1(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("strategyType", thrift.I32, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:strategyType: ", p), err)
-	}
-	if err := oprot.WriteI32(int32(p.StrategyType)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.strategyType (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:strategyType: ", p), err)
-	}
-	return err
-}
-
-func (p *SamplingStrategyResponse) writeField2(oprot thrift.TProtocol) (err error) {
-	if p.IsSetProbabilisticSampling() {
-		if err := oprot.WriteFieldBegin("probabilisticSampling", thrift.STRUCT, 2); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err)
-		}
-		if err := p.ProbabilisticSampling.Write(oprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err)
-		}
-		if err := oprot.WriteFieldEnd(); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *SamplingStrategyResponse) writeField3(oprot thrift.TProtocol) (err error) {
-	if p.IsSetRateLimitingSampling() {
-		if err := oprot.WriteFieldBegin("rateLimitingSampling", thrift.STRUCT, 3); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:rateLimitingSampling: ", p), err)
-		}
-		if err := p.RateLimitingSampling.Write(oprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.RateLimitingSampling), err)
-		}
-		if err := oprot.WriteFieldEnd(); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 3:rateLimitingSampling: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *SamplingStrategyResponse) writeField4(oprot thrift.TProtocol) (err error) {
-	if p.IsSetOperationSampling() {
-		if err := oprot.WriteFieldBegin("operationSampling", thrift.STRUCT, 4); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:operationSampling: ", p), err)
-		}
-		if err := p.OperationSampling.Write(oprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationSampling), err)
-		}
-		if err := oprot.WriteFieldEnd(); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 4:operationSampling: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *SamplingStrategyResponse) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("SamplingStrategyResponse(%+v)", *p)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/GoUnusedProtection__.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/GoUnusedProtection__.go
new file mode 100644
index 0000000..ebf4301
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/GoUnusedProtection__.go
@@ -0,0 +1,6 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package zipkincore
+
+var GoUnusedProtection__ int;
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go
deleted file mode 100644
index 15583e5..0000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go
+++ /dev/null
@@ -1,1337 +0,0 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-
-package zipkincore
-
-import (
-	"bytes"
-	"fmt"
-	"github.com/uber/jaeger-client-go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = bytes.Equal
-
-var GoUnusedProtection__ int
-
-type AnnotationType int64
-
-const (
-	AnnotationType_BOOL   AnnotationType = 0
-	AnnotationType_BYTES  AnnotationType = 1
-	AnnotationType_I16    AnnotationType = 2
-	AnnotationType_I32    AnnotationType = 3
-	AnnotationType_I64    AnnotationType = 4
-	AnnotationType_DOUBLE AnnotationType = 5
-	AnnotationType_STRING AnnotationType = 6
-)
-
-func (p AnnotationType) String() string {
-	switch p {
-	case AnnotationType_BOOL:
-		return "BOOL"
-	case AnnotationType_BYTES:
-		return "BYTES"
-	case AnnotationType_I16:
-		return "I16"
-	case AnnotationType_I32:
-		return "I32"
-	case AnnotationType_I64:
-		return "I64"
-	case AnnotationType_DOUBLE:
-		return "DOUBLE"
-	case AnnotationType_STRING:
-		return "STRING"
-	}
-	return "<UNSET>"
-}
-
-func AnnotationTypeFromString(s string) (AnnotationType, error) {
-	switch s {
-	case "BOOL":
-		return AnnotationType_BOOL, nil
-	case "BYTES":
-		return AnnotationType_BYTES, nil
-	case "I16":
-		return AnnotationType_I16, nil
-	case "I32":
-		return AnnotationType_I32, nil
-	case "I64":
-		return AnnotationType_I64, nil
-	case "DOUBLE":
-		return AnnotationType_DOUBLE, nil
-	case "STRING":
-		return AnnotationType_STRING, nil
-	}
-	return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string")
-}
-
-func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v }
-
-func (p AnnotationType) MarshalText() ([]byte, error) {
-	return []byte(p.String()), nil
-}
-
-func (p *AnnotationType) UnmarshalText(text []byte) error {
-	q, err := AnnotationTypeFromString(string(text))
-	if err != nil {
-		return err
-	}
-	*p = q
-	return nil
-}
-
-// Indicates the network context of a service recording an annotation with two
-// exceptions.
-//
-// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR,
-// the endpoint indicates the source or destination of an RPC. This exception
-// allows zipkin to display network context of uninstrumented services, or
-// clients such as web browsers.
-//
-// Attributes:
-//  - Ipv4: IPv4 host address packed into 4 bytes.
-//
-// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4
-//  - Port: IPv4 port
-//
-// Note: this is to be treated as an unsigned integer, so watch for negatives.
-//
-// Conventionally, when the port isn't known, port = 0.
-//  - ServiceName: Service name in lowercase, such as "memcache" or "zipkin-web"
-//
-// Conventionally, when the service name isn't known, service_name = "unknown".
-//  - Ipv6: IPv6 host address packed into 16 bytes. Ex Inet6Address.getBytes()
-type Endpoint struct {
-	Ipv4        int32  `thrift:"ipv4,1" json:"ipv4"`
-	Port        int16  `thrift:"port,2" json:"port"`
-	ServiceName string `thrift:"service_name,3" json:"service_name"`
-	Ipv6        []byte `thrift:"ipv6,4" json:"ipv6,omitempty"`
-}
-
-func NewEndpoint() *Endpoint {
-	return &Endpoint{}
-}
-
-func (p *Endpoint) GetIpv4() int32 {
-	return p.Ipv4
-}
-
-func (p *Endpoint) GetPort() int16 {
-	return p.Port
-}
-
-func (p *Endpoint) GetServiceName() string {
-	return p.ServiceName
-}
-
-var Endpoint_Ipv6_DEFAULT []byte
-
-func (p *Endpoint) GetIpv6() []byte {
-	return p.Ipv6
-}
-func (p *Endpoint) IsSetIpv6() bool {
-	return p.Ipv6 != nil
-}
-
-func (p *Endpoint) Read(iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if err := p.readField1(iprot); err != nil {
-				return err
-			}
-		case 2:
-			if err := p.readField2(iprot); err != nil {
-				return err
-			}
-		case 3:
-			if err := p.readField3(iprot); err != nil {
-				return err
-			}
-		case 4:
-			if err := p.readField4(iprot); err != nil {
-				return err
-			}
-		default:
-			if err := iprot.Skip(fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	return nil
-}
-
-func (p *Endpoint) readField1(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI32(); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.Ipv4 = v
-	}
-	return nil
-}
-
-func (p *Endpoint) readField2(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI16(); err != nil {
-		return thrift.PrependError("error reading field 2: ", err)
-	} else {
-		p.Port = v
-	}
-	return nil
-}
-
-func (p *Endpoint) readField3(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadString(); err != nil {
-		return thrift.PrependError("error reading field 3: ", err)
-	} else {
-		p.ServiceName = v
-	}
-	return nil
-}
-
-func (p *Endpoint) readField4(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadBinary(); err != nil {
-		return thrift.PrependError("error reading field 4: ", err)
-	} else {
-		p.Ipv6 = v
-	}
-	return nil
-}
-
-func (p *Endpoint) Write(oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin("Endpoint"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if err := p.writeField1(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField2(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField3(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField4(oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteFieldStop(); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *Endpoint) writeField1(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("ipv4", thrift.I32, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err)
-	}
-	if err := oprot.WriteI32(int32(p.Ipv4)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err)
-	}
-	return err
-}
-
-func (p *Endpoint) writeField2(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("port", thrift.I16, 2); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err)
-	}
-	if err := oprot.WriteI16(int16(p.Port)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err)
-	}
-	return err
-}
-
-func (p *Endpoint) writeField3(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("service_name", thrift.STRING, 3); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err)
-	}
-	if err := oprot.WriteString(string(p.ServiceName)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err)
-	}
-	return err
-}
-
-func (p *Endpoint) writeField4(oprot thrift.TProtocol) (err error) {
-	if p.IsSetIpv6() {
-		if err := oprot.WriteFieldBegin("ipv6", thrift.STRING, 4); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ipv6: ", p), err)
-		}
-		if err := oprot.WriteBinary(p.Ipv6); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T.ipv6 (4) field write error: ", p), err)
-		}
-		if err := oprot.WriteFieldEnd(); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ipv6: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Endpoint) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("Endpoint(%+v)", *p)
-}
-
-// An annotation is similar to a log statement. It includes a host field which
-// allows these events to be attributed properly, and also aggregatable.
-//
-// Attributes:
-//  - Timestamp: Microseconds from epoch.
-//
-// This value should use the most precise value possible. For example,
-// gettimeofday or syncing nanoTime against a tick of currentTimeMillis.
-//  - Value
-//  - Host: Always the host that recorded the event. By specifying the host you allow
-// rollup of all events (such as client requests to a service) by IP address.
-type Annotation struct {
-	Timestamp int64     `thrift:"timestamp,1" json:"timestamp"`
-	Value     string    `thrift:"value,2" json:"value"`
-	Host      *Endpoint `thrift:"host,3" json:"host,omitempty"`
-}
-
-func NewAnnotation() *Annotation {
-	return &Annotation{}
-}
-
-func (p *Annotation) GetTimestamp() int64 {
-	return p.Timestamp
-}
-
-func (p *Annotation) GetValue() string {
-	return p.Value
-}
-
-var Annotation_Host_DEFAULT *Endpoint
-
-func (p *Annotation) GetHost() *Endpoint {
-	if !p.IsSetHost() {
-		return Annotation_Host_DEFAULT
-	}
-	return p.Host
-}
-func (p *Annotation) IsSetHost() bool {
-	return p.Host != nil
-}
-
-func (p *Annotation) Read(iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if err := p.readField1(iprot); err != nil {
-				return err
-			}
-		case 2:
-			if err := p.readField2(iprot); err != nil {
-				return err
-			}
-		case 3:
-			if err := p.readField3(iprot); err != nil {
-				return err
-			}
-		default:
-			if err := iprot.Skip(fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	return nil
-}
-
-func (p *Annotation) readField1(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.Timestamp = v
-	}
-	return nil
-}
-
-func (p *Annotation) readField2(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadString(); err != nil {
-		return thrift.PrependError("error reading field 2: ", err)
-	} else {
-		p.Value = v
-	}
-	return nil
-}
-
-func (p *Annotation) readField3(iprot thrift.TProtocol) error {
-	p.Host = &Endpoint{}
-	if err := p.Host.Read(iprot); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
-	}
-	return nil
-}
-
-func (p *Annotation) Write(oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin("Annotation"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if err := p.writeField1(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField2(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField3(oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteFieldStop(); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *Annotation) writeField1(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err)
-	}
-	if err := oprot.WriteI64(int64(p.Timestamp)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err)
-	}
-	return err
-}
-
-func (p *Annotation) writeField2(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("value", thrift.STRING, 2); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err)
-	}
-	if err := oprot.WriteString(string(p.Value)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err)
-	}
-	return err
-}
-
-func (p *Annotation) writeField3(oprot thrift.TProtocol) (err error) {
-	if p.IsSetHost() {
-		if err := oprot.WriteFieldBegin("host", thrift.STRUCT, 3); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err)
-		}
-		if err := p.Host.Write(oprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
-		}
-		if err := oprot.WriteFieldEnd(); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Annotation) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("Annotation(%+v)", *p)
-}
-
-// Binary annotations are tags applied to a Span to give it context. For
-// example, a binary annotation of "http.uri" could the path to a resource in a
-// RPC call.
-//
-// Binary annotations of type STRING are always queryable, though more a
-// historical implementation detail than a structural concern.
-//
-// Binary annotations can repeat, and vary on the host. Similar to Annotation,
-// the host indicates who logged the event. This allows you to tell the
-// difference between the client and server side of the same key. For example,
-// the key "http.uri" might be different on the client and server side due to
-// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field,
-// you can see the different points of view, which often help in debugging.
-//
-// Attributes:
-//  - Key
-//  - Value
-//  - AnnotationType
-//  - Host: The host that recorded tag, which allows you to differentiate between
-// multiple tags with the same key. There are two exceptions to this.
-//
-// When the key is CLIENT_ADDR or SERVER_ADDR, host indicates the source or
-// destination of an RPC. This exception allows zipkin to display network
-// context of uninstrumented services, or clients such as web browsers.
-type BinaryAnnotation struct {
-	Key            string         `thrift:"key,1" json:"key"`
-	Value          []byte         `thrift:"value,2" json:"value"`
-	AnnotationType AnnotationType `thrift:"annotation_type,3" json:"annotation_type"`
-	Host           *Endpoint      `thrift:"host,4" json:"host,omitempty"`
-}
-
-func NewBinaryAnnotation() *BinaryAnnotation {
-	return &BinaryAnnotation{}
-}
-
-func (p *BinaryAnnotation) GetKey() string {
-	return p.Key
-}
-
-func (p *BinaryAnnotation) GetValue() []byte {
-	return p.Value
-}
-
-func (p *BinaryAnnotation) GetAnnotationType() AnnotationType {
-	return p.AnnotationType
-}
-
-var BinaryAnnotation_Host_DEFAULT *Endpoint
-
-func (p *BinaryAnnotation) GetHost() *Endpoint {
-	if !p.IsSetHost() {
-		return BinaryAnnotation_Host_DEFAULT
-	}
-	return p.Host
-}
-func (p *BinaryAnnotation) IsSetHost() bool {
-	return p.Host != nil
-}
-
-func (p *BinaryAnnotation) Read(iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if err := p.readField1(iprot); err != nil {
-				return err
-			}
-		case 2:
-			if err := p.readField2(iprot); err != nil {
-				return err
-			}
-		case 3:
-			if err := p.readField3(iprot); err != nil {
-				return err
-			}
-		case 4:
-			if err := p.readField4(iprot); err != nil {
-				return err
-			}
-		default:
-			if err := iprot.Skip(fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	return nil
-}
-
-func (p *BinaryAnnotation) readField1(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadString(); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.Key = v
-	}
-	return nil
-}
-
-func (p *BinaryAnnotation) readField2(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadBinary(); err != nil {
-		return thrift.PrependError("error reading field 2: ", err)
-	} else {
-		p.Value = v
-	}
-	return nil
-}
-
-func (p *BinaryAnnotation) readField3(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI32(); err != nil {
-		return thrift.PrependError("error reading field 3: ", err)
-	} else {
-		temp := AnnotationType(v)
-		p.AnnotationType = temp
-	}
-	return nil
-}
-
-func (p *BinaryAnnotation) readField4(iprot thrift.TProtocol) error {
-	p.Host = &Endpoint{}
-	if err := p.Host.Read(iprot); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
-	}
-	return nil
-}
-
-func (p *BinaryAnnotation) Write(oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin("BinaryAnnotation"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if err := p.writeField1(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField2(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField3(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField4(oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteFieldStop(); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *BinaryAnnotation) writeField1(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("key", thrift.STRING, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err)
-	}
-	if err := oprot.WriteString(string(p.Key)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err)
-	}
-	return err
-}
-
-func (p *BinaryAnnotation) writeField2(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("value", thrift.STRING, 2); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err)
-	}
-	if err := oprot.WriteBinary(p.Value); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err)
-	}
-	return err
-}
-
-func (p *BinaryAnnotation) writeField3(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("annotation_type", thrift.I32, 3); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err)
-	}
-	if err := oprot.WriteI32(int32(p.AnnotationType)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err)
-	}
-	return err
-}
-
-func (p *BinaryAnnotation) writeField4(oprot thrift.TProtocol) (err error) {
-	if p.IsSetHost() {
-		if err := oprot.WriteFieldBegin("host", thrift.STRUCT, 4); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err)
-		}
-		if err := p.Host.Write(oprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
-		}
-		if err := oprot.WriteFieldEnd(); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *BinaryAnnotation) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("BinaryAnnotation(%+v)", *p)
-}
-
-// A trace is a series of spans (often RPC calls) which form a latency tree.
-//
-// The root span is where trace_id = id and parent_id = Nil. The root span is
-// usually the longest interval in the trace, starting with a SERVER_RECV
-// annotation and ending with a SERVER_SEND.
-//
-// Attributes:
-//  - TraceID
-//  - Name: Span name in lowercase, rpc method for example
-//
-// Conventionally, when the span name isn't known, name = "unknown".
-//  - ID
-//  - ParentID
-//  - Annotations
-//  - BinaryAnnotations
-//  - Debug
-//  - Timestamp: Microseconds from epoch of the creation of this span.
-//
-// This value should be set directly by instrumentation, using the most
-// precise value possible. For example, gettimeofday or syncing nanoTime
-// against a tick of currentTimeMillis.
-//
-// For compatibility with instrumentation that precede this field, collectors
-// or span stores can derive this via Annotation.timestamp.
-// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp.
-//
-// This field is optional for compatibility with old data: first-party span
-// stores are expected to support this at time of introduction.
-//  - Duration: Measurement of duration in microseconds, used to support queries.
-//
-// This value should be set directly, where possible. Doing so encourages
-// precise measurement decoupled from problems of clocks, such as skew or NTP
-// updates causing time to move backwards.
-//
-// For compatibility with instrumentation that precede this field, collectors
-// or span stores can derive this by subtracting Annotation.timestamp.
-// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp.
-//
-// If this field is persisted as unset, zipkin will continue to work, except
-// duration query support will be implementation-specific. Similarly, setting
-// this field non-atomically is implementation-specific.
-//
-// This field is i64 vs i32 to support spans longer than 35 minutes.
-//  - TraceIDHigh: Optional unique 8-byte additional identifier for a trace. If non zero, this
-// means the trace uses 128 bit traceIds instead of 64 bit.
-type Span struct {
-	TraceID int64 `thrift:"trace_id,1" json:"trace_id"`
-	// unused field # 2
-	Name        string        `thrift:"name,3" json:"name"`
-	ID          int64         `thrift:"id,4" json:"id"`
-	ParentID    *int64        `thrift:"parent_id,5" json:"parent_id,omitempty"`
-	Annotations []*Annotation `thrift:"annotations,6" json:"annotations"`
-	// unused field # 7
-	BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" json:"binary_annotations"`
-	Debug             bool                `thrift:"debug,9" json:"debug,omitempty"`
-	Timestamp         *int64              `thrift:"timestamp,10" json:"timestamp,omitempty"`
-	Duration          *int64              `thrift:"duration,11" json:"duration,omitempty"`
-	TraceIDHigh       *int64              `thrift:"trace_id_high,12" json:"trace_id_high,omitempty"`
-}
-
-func NewSpan() *Span {
-	return &Span{}
-}
-
-func (p *Span) GetTraceID() int64 {
-	return p.TraceID
-}
-
-func (p *Span) GetName() string {
-	return p.Name
-}
-
-func (p *Span) GetID() int64 {
-	return p.ID
-}
-
-var Span_ParentID_DEFAULT int64
-
-func (p *Span) GetParentID() int64 {
-	if !p.IsSetParentID() {
-		return Span_ParentID_DEFAULT
-	}
-	return *p.ParentID
-}
-
-func (p *Span) GetAnnotations() []*Annotation {
-	return p.Annotations
-}
-
-func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation {
-	return p.BinaryAnnotations
-}
-
-var Span_Debug_DEFAULT bool = false
-
-func (p *Span) GetDebug() bool {
-	return p.Debug
-}
-
-var Span_Timestamp_DEFAULT int64
-
-func (p *Span) GetTimestamp() int64 {
-	if !p.IsSetTimestamp() {
-		return Span_Timestamp_DEFAULT
-	}
-	return *p.Timestamp
-}
-
-var Span_Duration_DEFAULT int64
-
-func (p *Span) GetDuration() int64 {
-	if !p.IsSetDuration() {
-		return Span_Duration_DEFAULT
-	}
-	return *p.Duration
-}
-
-var Span_TraceIDHigh_DEFAULT int64
-
-func (p *Span) GetTraceIDHigh() int64 {
-	if !p.IsSetTraceIDHigh() {
-		return Span_TraceIDHigh_DEFAULT
-	}
-	return *p.TraceIDHigh
-}
-func (p *Span) IsSetParentID() bool {
-	return p.ParentID != nil
-}
-
-func (p *Span) IsSetDebug() bool {
-	return p.Debug != Span_Debug_DEFAULT
-}
-
-func (p *Span) IsSetTimestamp() bool {
-	return p.Timestamp != nil
-}
-
-func (p *Span) IsSetDuration() bool {
-	return p.Duration != nil
-}
-
-func (p *Span) IsSetTraceIDHigh() bool {
-	return p.TraceIDHigh != nil
-}
-
-func (p *Span) Read(iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if err := p.readField1(iprot); err != nil {
-				return err
-			}
-		case 3:
-			if err := p.readField3(iprot); err != nil {
-				return err
-			}
-		case 4:
-			if err := p.readField4(iprot); err != nil {
-				return err
-			}
-		case 5:
-			if err := p.readField5(iprot); err != nil {
-				return err
-			}
-		case 6:
-			if err := p.readField6(iprot); err != nil {
-				return err
-			}
-		case 8:
-			if err := p.readField8(iprot); err != nil {
-				return err
-			}
-		case 9:
-			if err := p.readField9(iprot); err != nil {
-				return err
-			}
-		case 10:
-			if err := p.readField10(iprot); err != nil {
-				return err
-			}
-		case 11:
-			if err := p.readField11(iprot); err != nil {
-				return err
-			}
-		case 12:
-			if err := p.readField12(iprot); err != nil {
-				return err
-			}
-		default:
-			if err := iprot.Skip(fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	return nil
-}
-
-func (p *Span) readField1(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.TraceID = v
-	}
-	return nil
-}
-
-func (p *Span) readField3(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadString(); err != nil {
-		return thrift.PrependError("error reading field 3: ", err)
-	} else {
-		p.Name = v
-	}
-	return nil
-}
-
-func (p *Span) readField4(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(); err != nil {
-		return thrift.PrependError("error reading field 4: ", err)
-	} else {
-		p.ID = v
-	}
-	return nil
-}
-
-func (p *Span) readField5(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(); err != nil {
-		return thrift.PrependError("error reading field 5: ", err)
-	} else {
-		p.ParentID = &v
-	}
-	return nil
-}
-
-func (p *Span) readField6(iprot thrift.TProtocol) error {
-	_, size, err := iprot.ReadListBegin()
-	if err != nil {
-		return thrift.PrependError("error reading list begin: ", err)
-	}
-	tSlice := make([]*Annotation, 0, size)
-	p.Annotations = tSlice
-	for i := 0; i < size; i++ {
-		_elem0 := &Annotation{}
-		if err := _elem0.Read(iprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
-		}
-		p.Annotations = append(p.Annotations, _elem0)
-	}
-	if err := iprot.ReadListEnd(); err != nil {
-		return thrift.PrependError("error reading list end: ", err)
-	}
-	return nil
-}
-
-func (p *Span) readField8(iprot thrift.TProtocol) error {
-	_, size, err := iprot.ReadListBegin()
-	if err != nil {
-		return thrift.PrependError("error reading list begin: ", err)
-	}
-	tSlice := make([]*BinaryAnnotation, 0, size)
-	p.BinaryAnnotations = tSlice
-	for i := 0; i < size; i++ {
-		_elem1 := &BinaryAnnotation{}
-		if err := _elem1.Read(iprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err)
-		}
-		p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1)
-	}
-	if err := iprot.ReadListEnd(); err != nil {
-		return thrift.PrependError("error reading list end: ", err)
-	}
-	return nil
-}
-
-func (p *Span) readField9(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadBool(); err != nil {
-		return thrift.PrependError("error reading field 9: ", err)
-	} else {
-		p.Debug = v
-	}
-	return nil
-}
-
-func (p *Span) readField10(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(); err != nil {
-		return thrift.PrependError("error reading field 10: ", err)
-	} else {
-		p.Timestamp = &v
-	}
-	return nil
-}
-
-func (p *Span) readField11(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(); err != nil {
-		return thrift.PrependError("error reading field 11: ", err)
-	} else {
-		p.Duration = &v
-	}
-	return nil
-}
-
-func (p *Span) readField12(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(); err != nil {
-		return thrift.PrependError("error reading field 12: ", err)
-	} else {
-		p.TraceIDHigh = &v
-	}
-	return nil
-}
-
-func (p *Span) Write(oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin("Span"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if err := p.writeField1(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField3(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField4(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField5(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField6(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField8(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField9(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField10(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField11(oprot); err != nil {
-		return err
-	}
-	if err := p.writeField12(oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteFieldStop(); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *Span) writeField1(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("trace_id", thrift.I64, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err)
-	}
-	if err := oprot.WriteI64(int64(p.TraceID)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err)
-	}
-	return err
-}
-
-func (p *Span) writeField3(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("name", thrift.STRING, 3); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err)
-	}
-	if err := oprot.WriteString(string(p.Name)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err)
-	}
-	return err
-}
-
-func (p *Span) writeField4(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("id", thrift.I64, 4); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err)
-	}
-	if err := oprot.WriteI64(int64(p.ID)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err)
-	}
-	return err
-}
-
-func (p *Span) writeField5(oprot thrift.TProtocol) (err error) {
-	if p.IsSetParentID() {
-		if err := oprot.WriteFieldBegin("parent_id", thrift.I64, 5); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err)
-		}
-		if err := oprot.WriteI64(int64(*p.ParentID)); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err)
-		}
-		if err := oprot.WriteFieldEnd(); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Span) writeField6(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("annotations", thrift.LIST, 6); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err)
-	}
-	if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Annotations)); err != nil {
-		return thrift.PrependError("error writing list begin: ", err)
-	}
-	for _, v := range p.Annotations {
-		if err := v.Write(oprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
-		}
-	}
-	if err := oprot.WriteListEnd(); err != nil {
-		return thrift.PrependError("error writing list end: ", err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err)
-	}
-	return err
-}
-
-func (p *Span) writeField8(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("binary_annotations", thrift.LIST, 8); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err)
-	}
-	if err := oprot.WriteListBegin(thrift.STRUCT, len(p.BinaryAnnotations)); err != nil {
-		return thrift.PrependError("error writing list begin: ", err)
-	}
-	for _, v := range p.BinaryAnnotations {
-		if err := v.Write(oprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
-		}
-	}
-	if err := oprot.WriteListEnd(); err != nil {
-		return thrift.PrependError("error writing list end: ", err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err)
-	}
-	return err
-}
-
-func (p *Span) writeField9(oprot thrift.TProtocol) (err error) {
-	if p.IsSetDebug() {
-		if err := oprot.WriteFieldBegin("debug", thrift.BOOL, 9); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err)
-		}
-		if err := oprot.WriteBool(bool(p.Debug)); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err)
-		}
-		if err := oprot.WriteFieldEnd(); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Span) writeField10(oprot thrift.TProtocol) (err error) {
-	if p.IsSetTimestamp() {
-		if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 10); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err)
-		}
-		if err := oprot.WriteI64(int64(*p.Timestamp)); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err)
-		}
-		if err := oprot.WriteFieldEnd(); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Span) writeField11(oprot thrift.TProtocol) (err error) {
-	if p.IsSetDuration() {
-		if err := oprot.WriteFieldBegin("duration", thrift.I64, 11); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err)
-		}
-		if err := oprot.WriteI64(int64(*p.Duration)); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err)
-		}
-		if err := oprot.WriteFieldEnd(); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Span) writeField12(oprot thrift.TProtocol) (err error) {
-	if p.IsSetTraceIDHigh() {
-		if err := oprot.WriteFieldBegin("trace_id_high", thrift.I64, 12); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:trace_id_high: ", p), err)
-		}
-		if err := oprot.WriteI64(int64(*p.TraceIDHigh)); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T.trace_id_high (12) field write error: ", p), err)
-		}
-		if err := oprot.WriteFieldEnd(); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 12:trace_id_high: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Span) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("Span(%+v)", *p)
-}
-
-// Attributes:
-//  - Ok
-type Response struct {
-	Ok bool `thrift:"ok,1,required" json:"ok"`
-}
-
-func NewResponse() *Response {
-	return &Response{}
-}
-
-func (p *Response) GetOk() bool {
-	return p.Ok
-}
-func (p *Response) Read(iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	var issetOk bool = false
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if err := p.readField1(iprot); err != nil {
-				return err
-			}
-			issetOk = true
-		default:
-			if err := iprot.Skip(fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	if !issetOk {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set"))
-	}
-	return nil
-}
-
-func (p *Response) readField1(iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadBool(); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.Ok = v
-	}
-	return nil
-}
-
-func (p *Response) Write(oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin("Response"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if err := p.writeField1(oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteFieldStop(); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *Response) writeField1(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("ok", thrift.BOOL, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err)
-	}
-	if err := oprot.WriteBool(bool(p.Ok)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err)
-	}
-	return err
-}
-
-func (p *Response) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("Response(%+v)", *p)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go
deleted file mode 100644
index 417e883..0000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go
+++ /dev/null
@@ -1,446 +0,0 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-
-package zipkincore
-
-import (
-	"bytes"
-	"fmt"
-	"github.com/uber/jaeger-client-go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = bytes.Equal
-
-type ZipkinCollector interface {
-	// Parameters:
-	//  - Spans
-	SubmitZipkinBatch(spans []*Span) (r []*Response, err error)
-}
-
-type ZipkinCollectorClient struct {
-	Transport       thrift.TTransport
-	ProtocolFactory thrift.TProtocolFactory
-	InputProtocol   thrift.TProtocol
-	OutputProtocol  thrift.TProtocol
-	SeqId           int32
-}
-
-func NewZipkinCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ZipkinCollectorClient {
-	return &ZipkinCollectorClient{Transport: t,
-		ProtocolFactory: f,
-		InputProtocol:   f.GetProtocol(t),
-		OutputProtocol:  f.GetProtocol(t),
-		SeqId:           0,
-	}
-}
-
-func NewZipkinCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ZipkinCollectorClient {
-	return &ZipkinCollectorClient{Transport: t,
-		ProtocolFactory: nil,
-		InputProtocol:   iprot,
-		OutputProtocol:  oprot,
-		SeqId:           0,
-	}
-}
-
-// Parameters:
-//  - Spans
-func (p *ZipkinCollectorClient) SubmitZipkinBatch(spans []*Span) (r []*Response, err error) {
-	if err = p.sendSubmitZipkinBatch(spans); err != nil {
-		return
-	}
-	return p.recvSubmitZipkinBatch()
-}
-
-func (p *ZipkinCollectorClient) sendSubmitZipkinBatch(spans []*Span) (err error) {
-	oprot := p.OutputProtocol
-	if oprot == nil {
-		oprot = p.ProtocolFactory.GetProtocol(p.Transport)
-		p.OutputProtocol = oprot
-	}
-	p.SeqId++
-	if err = oprot.WriteMessageBegin("submitZipkinBatch", thrift.CALL, p.SeqId); err != nil {
-		return
-	}
-	args := ZipkinCollectorSubmitZipkinBatchArgs{
-		Spans: spans,
-	}
-	if err = args.Write(oprot); err != nil {
-		return
-	}
-	if err = oprot.WriteMessageEnd(); err != nil {
-		return
-	}
-	return oprot.Flush()
-}
-
-func (p *ZipkinCollectorClient) recvSubmitZipkinBatch() (value []*Response, err error) {
-	iprot := p.InputProtocol
-	if iprot == nil {
-		iprot = p.ProtocolFactory.GetProtocol(p.Transport)
-		p.InputProtocol = iprot
-	}
-	method, mTypeId, seqId, err := iprot.ReadMessageBegin()
-	if err != nil {
-		return
-	}
-	if method != "submitZipkinBatch" {
-		err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "submitZipkinBatch failed: wrong method name")
-		return
-	}
-	if p.SeqId != seqId {
-		err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "submitZipkinBatch failed: out of sequence response")
-		return
-	}
-	if mTypeId == thrift.EXCEPTION {
-		error2 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
-		var error3 error
-		error3, err = error2.Read(iprot)
-		if err != nil {
-			return
-		}
-		if err = iprot.ReadMessageEnd(); err != nil {
-			return
-		}
-		err = error3
-		return
-	}
-	if mTypeId != thrift.REPLY {
-		err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "submitZipkinBatch failed: invalid message type")
-		return
-	}
-	result := ZipkinCollectorSubmitZipkinBatchResult{}
-	if err = result.Read(iprot); err != nil {
-		return
-	}
-	if err = iprot.ReadMessageEnd(); err != nil {
-		return
-	}
-	value = result.GetSuccess()
-	return
-}
-
-type ZipkinCollectorProcessor struct {
-	processorMap map[string]thrift.TProcessorFunction
-	handler      ZipkinCollector
-}
-
-func (p *ZipkinCollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
-	p.processorMap[key] = processor
-}
-
-func (p *ZipkinCollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
-	processor, ok = p.processorMap[key]
-	return processor, ok
-}
-
-func (p *ZipkinCollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
-	return p.processorMap
-}
-
-func NewZipkinCollectorProcessor(handler ZipkinCollector) *ZipkinCollectorProcessor {
-
-	self4 := &ZipkinCollectorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
-	self4.processorMap["submitZipkinBatch"] = &zipkinCollectorProcessorSubmitZipkinBatch{handler: handler}
-	return self4
-}
-
-func (p *ZipkinCollectorProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
-	name, _, seqId, err := iprot.ReadMessageBegin()
-	if err != nil {
-		return false, err
-	}
-	if processor, ok := p.GetProcessorFunction(name); ok {
-		return processor.Process(seqId, iprot, oprot)
-	}
-	iprot.Skip(thrift.STRUCT)
-	iprot.ReadMessageEnd()
-	x5 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
-	oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
-	x5.Write(oprot)
-	oprot.WriteMessageEnd()
-	oprot.Flush()
-	return false, x5
-
-}
-
-type zipkinCollectorProcessorSubmitZipkinBatch struct {
-	handler ZipkinCollector
-}
-
-func (p *zipkinCollectorProcessorSubmitZipkinBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
-	args := ZipkinCollectorSubmitZipkinBatchArgs{}
-	if err = args.Read(iprot); err != nil {
-		iprot.ReadMessageEnd()
-		x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
-		oprot.WriteMessageBegin("submitZipkinBatch", thrift.EXCEPTION, seqId)
-		x.Write(oprot)
-		oprot.WriteMessageEnd()
-		oprot.Flush()
-		return false, err
-	}
-
-	iprot.ReadMessageEnd()
-	result := ZipkinCollectorSubmitZipkinBatchResult{}
-	var retval []*Response
-	var err2 error
-	if retval, err2 = p.handler.SubmitZipkinBatch(args.Spans); err2 != nil {
-		x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitZipkinBatch: "+err2.Error())
-		oprot.WriteMessageBegin("submitZipkinBatch", thrift.EXCEPTION, seqId)
-		x.Write(oprot)
-		oprot.WriteMessageEnd()
-		oprot.Flush()
-		return true, err2
-	} else {
-		result.Success = retval
-	}
-	if err2 = oprot.WriteMessageBegin("submitZipkinBatch", thrift.REPLY, seqId); err2 != nil {
-		err = err2
-	}
-	if err2 = result.Write(oprot); err == nil && err2 != nil {
-		err = err2
-	}
-	if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
-		err = err2
-	}
-	if err2 = oprot.Flush(); err == nil && err2 != nil {
-		err = err2
-	}
-	if err != nil {
-		return
-	}
-	return true, err
-}
-
-// HELPER FUNCTIONS AND STRUCTURES
-
-// Attributes:
-//  - Spans
-type ZipkinCollectorSubmitZipkinBatchArgs struct {
-	Spans []*Span `thrift:"spans,1" json:"spans"`
-}
-
-func NewZipkinCollectorSubmitZipkinBatchArgs() *ZipkinCollectorSubmitZipkinBatchArgs {
-	return &ZipkinCollectorSubmitZipkinBatchArgs{}
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) GetSpans() []*Span {
-	return p.Spans
-}
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) Read(iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if err := p.readField1(iprot); err != nil {
-				return err
-			}
-		default:
-			if err := iprot.Skip(fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) readField1(iprot thrift.TProtocol) error {
-	_, size, err := iprot.ReadListBegin()
-	if err != nil {
-		return thrift.PrependError("error reading list begin: ", err)
-	}
-	tSlice := make([]*Span, 0, size)
-	p.Spans = tSlice
-	for i := 0; i < size; i++ {
-		_elem6 := &Span{}
-		if err := _elem6.Read(iprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem6), err)
-		}
-		p.Spans = append(p.Spans, _elem6)
-	}
-	if err := iprot.ReadListEnd(); err != nil {
-		return thrift.PrependError("error reading list end: ", err)
-	}
-	return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) Write(oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin("submitZipkinBatch_args"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if err := p.writeField1(oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteFieldStop(); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) writeField1(oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin("spans", thrift.LIST, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err)
-	}
-	if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil {
-		return thrift.PrependError("error writing list begin: ", err)
-	}
-	for _, v := range p.Spans {
-		if err := v.Write(oprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
-		}
-	}
-	if err := oprot.WriteListEnd(); err != nil {
-		return thrift.PrependError("error writing list end: ", err)
-	}
-	if err := oprot.WriteFieldEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err)
-	}
-	return err
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchArgs(%+v)", *p)
-}
-
-// Attributes:
-//  - Success
-type ZipkinCollectorSubmitZipkinBatchResult struct {
-	Success []*Response `thrift:"success,0" json:"success,omitempty"`
-}
-
-func NewZipkinCollectorSubmitZipkinBatchResult() *ZipkinCollectorSubmitZipkinBatchResult {
-	return &ZipkinCollectorSubmitZipkinBatchResult{}
-}
-
-var ZipkinCollectorSubmitZipkinBatchResult_Success_DEFAULT []*Response
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) GetSuccess() []*Response {
-	return p.Success
-}
-func (p *ZipkinCollectorSubmitZipkinBatchResult) IsSetSuccess() bool {
-	return p.Success != nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) Read(iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 0:
-			if err := p.readField0(iprot); err != nil {
-				return err
-			}
-		default:
-			if err := iprot.Skip(fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) readField0(iprot thrift.TProtocol) error {
-	_, size, err := iprot.ReadListBegin()
-	if err != nil {
-		return thrift.PrependError("error reading list begin: ", err)
-	}
-	tSlice := make([]*Response, 0, size)
-	p.Success = tSlice
-	for i := 0; i < size; i++ {
-		_elem7 := &Response{}
-		if err := _elem7.Read(iprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem7), err)
-		}
-		p.Success = append(p.Success, _elem7)
-	}
-	if err := iprot.ReadListEnd(); err != nil {
-		return thrift.PrependError("error reading list end: ", err)
-	}
-	return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) Write(oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin("submitZipkinBatch_result"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if err := p.writeField0(oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteFieldStop(); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) writeField0(oprot thrift.TProtocol) (err error) {
-	if p.IsSetSuccess() {
-		if err := oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
-		}
-		if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil {
-			return thrift.PrependError("error writing list begin: ", err)
-		}
-		for _, v := range p.Success {
-			if err := v.Write(oprot); err != nil {
-				return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
-			}
-		}
-		if err := oprot.WriteListEnd(); err != nil {
-			return thrift.PrependError("error writing list end: ", err)
-		}
-		if err := oprot.WriteFieldEnd(); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchResult(%+v)", *p)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore-consts.go
similarity index 83%
rename from vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go
rename to vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore-consts.go
index a53d46f..7a924b9 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore-consts.go
@@ -1,17 +1,20 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
 
 package zipkincore
 
-import (
+import(
 	"bytes"
+	"context"
 	"fmt"
+	"time"
 	"github.com/uber/jaeger-client-go/thrift"
 )
 
 // (needed to ensure safety because of naive import list construction.)
 var _ = thrift.ZERO
 var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
 var _ = bytes.Equal
 
 const CLIENT_SEND = "cs"
@@ -33,3 +36,4 @@
 
 func init() {
 }
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore.go
new file mode 100644
index 0000000..b00ecd2
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore.go
@@ -0,0 +1,1853 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package zipkincore
+
+import(
+	"bytes"
+	"context"
+	"database/sql/driver"
+	"errors"
+	"fmt"
+	"time"
+	"github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+type AnnotationType int64
+const (
+  AnnotationType_BOOL AnnotationType = 0
+  AnnotationType_BYTES AnnotationType = 1
+  AnnotationType_I16 AnnotationType = 2
+  AnnotationType_I32 AnnotationType = 3
+  AnnotationType_I64 AnnotationType = 4
+  AnnotationType_DOUBLE AnnotationType = 5
+  AnnotationType_STRING AnnotationType = 6
+)
+
+func (p AnnotationType) String() string {
+  switch p {
+  case AnnotationType_BOOL: return "BOOL"
+  case AnnotationType_BYTES: return "BYTES"
+  case AnnotationType_I16: return "I16"
+  case AnnotationType_I32: return "I32"
+  case AnnotationType_I64: return "I64"
+  case AnnotationType_DOUBLE: return "DOUBLE"
+  case AnnotationType_STRING: return "STRING"
+  }
+  return "<UNSET>"
+}
+
+func AnnotationTypeFromString(s string) (AnnotationType, error) {
+  switch s {
+  case "BOOL": return AnnotationType_BOOL, nil 
+  case "BYTES": return AnnotationType_BYTES, nil 
+  case "I16": return AnnotationType_I16, nil 
+  case "I32": return AnnotationType_I32, nil 
+  case "I64": return AnnotationType_I64, nil 
+  case "DOUBLE": return AnnotationType_DOUBLE, nil 
+  case "STRING": return AnnotationType_STRING, nil 
+  }
+  return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string")
+}
+
+
+func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v }
+
+func (p AnnotationType) MarshalText() ([]byte, error) {
+return []byte(p.String()), nil
+}
+
+func (p *AnnotationType) UnmarshalText(text []byte) error {
+q, err := AnnotationTypeFromString(string(text))
+if (err != nil) {
+return err
+}
+*p = q
+return nil
+}
+
+func (p *AnnotationType) Scan(value interface{}) error {
+v, ok := value.(int64)
+if !ok {
+return errors.New("Scan value is not int64")
+}
+*p = AnnotationType(v)
+return nil
+}
+
+func (p * AnnotationType) Value() (driver.Value, error) {
+  if p == nil {
+    return nil, nil
+  }
+return int64(*p), nil
+}
+// Indicates the network context of a service recording an annotation with two
+// exceptions.
+// 
+// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR,
+// the endpoint indicates the source or destination of an RPC. This exception
+// allows zipkin to display network context of uninstrumented services, or
+// clients such as web browsers.
+// 
+// Attributes:
+//  - Ipv4: IPv4 host address packed into 4 bytes.
+// 
+// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4
+//  - Port: IPv4 port
+// 
+// Note: this is to be treated as an unsigned integer, so watch for negatives.
+// 
+// Conventionally, when the port isn't known, port = 0.
+//  - ServiceName: Service name in lowercase, such as "memcache" or "zipkin-web"
+// 
+// Conventionally, when the service name isn't known, service_name = "unknown".
+//  - Ipv6: IPv6 host address packed into 16 bytes. Ex Inet6Address.getBytes()
+type Endpoint struct {
+  Ipv4 int32 `thrift:"ipv4,1" db:"ipv4" json:"ipv4"`
+  Port int16 `thrift:"port,2" db:"port" json:"port"`
+  ServiceName string `thrift:"service_name,3" db:"service_name" json:"service_name"`
+  Ipv6 []byte `thrift:"ipv6,4" db:"ipv6" json:"ipv6,omitempty"`
+}
+
+func NewEndpoint() *Endpoint {
+  return &Endpoint{}
+}
+
+
+func (p *Endpoint) GetIpv4() int32 {
+  return p.Ipv4
+}
+
+func (p *Endpoint) GetPort() int16 {
+  return p.Port
+}
+
+func (p *Endpoint) GetServiceName() string {
+  return p.ServiceName
+}
+var Endpoint_Ipv6_DEFAULT []byte
+
+func (p *Endpoint) GetIpv6() []byte {
+  return p.Ipv6
+}
+func (p *Endpoint) IsSetIpv6() bool {
+  return p.Ipv6 != nil
+}
+
+func (p *Endpoint) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.I32 {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.I16 {
+        if err := p.ReadField2(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 3:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField3(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 4:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField4(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *Endpoint)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI32(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.Ipv4 = v
+}
+  return nil
+}
+
+func (p *Endpoint)  ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI16(ctx); err != nil {
+  return thrift.PrependError("error reading field 2: ", err)
+} else {
+  p.Port = v
+}
+  return nil
+}
+
+func (p *Endpoint)  ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(ctx); err != nil {
+  return thrift.PrependError("error reading field 3: ", err)
+} else {
+  p.ServiceName = v
+}
+  return nil
+}
+
+func (p *Endpoint)  ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadBinary(ctx); err != nil {
+  return thrift.PrependError("error reading field 4: ", err)
+} else {
+  p.Ipv6 = v
+}
+  return nil
+}
+
+func (p *Endpoint) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "Endpoint"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField2(ctx, oprot); err != nil { return err }
+    if err := p.writeField3(ctx, oprot); err != nil { return err }
+    if err := p.writeField4(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *Endpoint) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "ipv4", thrift.I32, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err) }
+  if err := oprot.WriteI32(ctx, int32(p.Ipv4)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err) }
+  return err
+}
+
+func (p *Endpoint) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "port", thrift.I16, 2); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err) }
+  if err := oprot.WriteI16(ctx, int16(p.Port)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err) }
+  return err
+}
+
+func (p *Endpoint) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "service_name", thrift.STRING, 3); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err) }
+  if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err) }
+  return err
+}
+
+func (p *Endpoint) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetIpv6() {
+    if err := oprot.WriteFieldBegin(ctx, "ipv6", thrift.STRING, 4); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ipv6: ", p), err) }
+    if err := oprot.WriteBinary(ctx, p.Ipv6); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.ipv6 (4) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ipv6: ", p), err) }
+  }
+  return err
+}
+
+func (p *Endpoint) Equals(other *Endpoint) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.Ipv4 != other.Ipv4 { return false }
+  if p.Port != other.Port { return false }
+  if p.ServiceName != other.ServiceName { return false }
+  if bytes.Compare(p.Ipv6, other.Ipv6) != 0 { return false }
+  return true
+}
+
+func (p *Endpoint) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("Endpoint(%+v)", *p)
+}
+
+// An annotation is similar to a log statement. It includes a host field which
+// allows these events to be attributed properly, and also aggregatable.
+// 
+// Attributes:
+//  - Timestamp: Microseconds from epoch.
+// 
+// This value should use the most precise value possible. For example,
+// gettimeofday or syncing nanoTime against a tick of currentTimeMillis.
+//  - Value
+//  - Host: Always the host that recorded the event. By specifying the host you allow
+// rollup of all events (such as client requests to a service) by IP address.
+type Annotation struct {
+  Timestamp int64 `thrift:"timestamp,1" db:"timestamp" json:"timestamp"`
+  Value string `thrift:"value,2" db:"value" json:"value"`
+  Host *Endpoint `thrift:"host,3" db:"host" json:"host,omitempty"`
+}
+
+func NewAnnotation() *Annotation {
+  return &Annotation{}
+}
+
+
+func (p *Annotation) GetTimestamp() int64 {
+  return p.Timestamp
+}
+
+func (p *Annotation) GetValue() string {
+  return p.Value
+}
+var Annotation_Host_DEFAULT *Endpoint
+func (p *Annotation) GetHost() *Endpoint {
+  if !p.IsSetHost() {
+    return Annotation_Host_DEFAULT
+  }
+return p.Host
+}
+func (p *Annotation) IsSetHost() bool {
+  return p.Host != nil
+}
+
+func (p *Annotation) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField2(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 3:
+      if fieldTypeId == thrift.STRUCT {
+        if err := p.ReadField3(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *Annotation)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.Timestamp = v
+}
+  return nil
+}
+
+func (p *Annotation)  ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(ctx); err != nil {
+  return thrift.PrependError("error reading field 2: ", err)
+} else {
+  p.Value = v
+}
+  return nil
+}
+
+func (p *Annotation)  ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+  p.Host = &Endpoint{}
+  if err := p.Host.Read(ctx, iprot); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
+  }
+  return nil
+}
+
+func (p *Annotation) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "Annotation"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField2(ctx, oprot); err != nil { return err }
+    if err := p.writeField3(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *Annotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) }
+  return err
+}
+
+func (p *Annotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) }
+  if err := oprot.WriteString(ctx, string(p.Value)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) }
+  return err
+}
+
+func (p *Annotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetHost() {
+    if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 3); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err) }
+    if err := p.Host.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
+    }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err) }
+  }
+  return err
+}
+
+func (p *Annotation) Equals(other *Annotation) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.Timestamp != other.Timestamp { return false }
+  if p.Value != other.Value { return false }
+  if !p.Host.Equals(other.Host) { return false }
+  return true
+}
+
+func (p *Annotation) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("Annotation(%+v)", *p)
+}
+
+// Binary annotations are tags applied to a Span to give it context. For
+// example, a binary annotation of "http.uri" could the path to a resource in a
+// RPC call.
+// 
+// Binary annotations of type STRING are always queryable, though more a
+// historical implementation detail than a structural concern.
+// 
+// Binary annotations can repeat, and vary on the host. Similar to Annotation,
+// the host indicates who logged the event. This allows you to tell the
+// difference between the client and server side of the same key. For example,
+// the key "http.uri" might be different on the client and server side due to
+// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field,
+// you can see the different points of view, which often help in debugging.
+// 
+// Attributes:
+//  - Key
+//  - Value
+//  - AnnotationType
+//  - Host: The host that recorded tag, which allows you to differentiate between
+// multiple tags with the same key. There are two exceptions to this.
+// 
+// When the key is CLIENT_ADDR or SERVER_ADDR, host indicates the source or
+// destination of an RPC. This exception allows zipkin to display network
+// context of uninstrumented services, or clients such as web browsers.
+type BinaryAnnotation struct {
+  Key string `thrift:"key,1" db:"key" json:"key"`
+  Value []byte `thrift:"value,2" db:"value" json:"value"`
+  AnnotationType AnnotationType `thrift:"annotation_type,3" db:"annotation_type" json:"annotation_type"`
+  Host *Endpoint `thrift:"host,4" db:"host" json:"host,omitempty"`
+}
+
+func NewBinaryAnnotation() *BinaryAnnotation {
+  return &BinaryAnnotation{}
+}
+
+
+func (p *BinaryAnnotation) GetKey() string {
+  return p.Key
+}
+
+func (p *BinaryAnnotation) GetValue() []byte {
+  return p.Value
+}
+
+func (p *BinaryAnnotation) GetAnnotationType() AnnotationType {
+  return p.AnnotationType
+}
+var BinaryAnnotation_Host_DEFAULT *Endpoint
+func (p *BinaryAnnotation) GetHost() *Endpoint {
+  if !p.IsSetHost() {
+    return BinaryAnnotation_Host_DEFAULT
+  }
+return p.Host
+}
+func (p *BinaryAnnotation) IsSetHost() bool {
+  return p.Host != nil
+}
+
+func (p *BinaryAnnotation) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField2(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 3:
+      if fieldTypeId == thrift.I32 {
+        if err := p.ReadField3(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 4:
+      if fieldTypeId == thrift.STRUCT {
+        if err := p.ReadField4(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *BinaryAnnotation)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.Key = v
+}
+  return nil
+}
+
+func (p *BinaryAnnotation)  ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadBinary(ctx); err != nil {
+  return thrift.PrependError("error reading field 2: ", err)
+} else {
+  p.Value = v
+}
+  return nil
+}
+
+func (p *BinaryAnnotation)  ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI32(ctx); err != nil {
+  return thrift.PrependError("error reading field 3: ", err)
+} else {
+  temp := AnnotationType(v)
+  p.AnnotationType = temp
+}
+  return nil
+}
+
+func (p *BinaryAnnotation)  ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+  p.Host = &Endpoint{}
+  if err := p.Host.Read(ctx, iprot); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
+  }
+  return nil
+}
+
+func (p *BinaryAnnotation) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "BinaryAnnotation"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField2(ctx, oprot); err != nil { return err }
+    if err := p.writeField3(ctx, oprot); err != nil { return err }
+    if err := p.writeField4(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *BinaryAnnotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) }
+  if err := oprot.WriteString(ctx, string(p.Key)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) }
+  return err
+}
+
+func (p *BinaryAnnotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) }
+  if err := oprot.WriteBinary(ctx, p.Value); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) }
+  return err
+}
+
+func (p *BinaryAnnotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "annotation_type", thrift.I32, 3); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err) }
+  if err := oprot.WriteI32(ctx, int32(p.AnnotationType)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err) }
+  return err
+}
+
+func (p *BinaryAnnotation) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetHost() {
+    if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 4); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err) }
+    if err := p.Host.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
+    }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err) }
+  }
+  return err
+}
+
+func (p *BinaryAnnotation) Equals(other *BinaryAnnotation) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.Key != other.Key { return false }
+  if bytes.Compare(p.Value, other.Value) != 0 { return false }
+  if p.AnnotationType != other.AnnotationType { return false }
+  if !p.Host.Equals(other.Host) { return false }
+  return true
+}
+
+func (p *BinaryAnnotation) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("BinaryAnnotation(%+v)", *p)
+}
+
+// A trace is a series of spans (often RPC calls) which form a latency tree.
+// 
+// The root span is where trace_id = id and parent_id = Nil. The root span is
+// usually the longest interval in the trace, starting with a SERVER_RECV
+// annotation and ending with a SERVER_SEND.
+// 
+// Attributes:
+//  - TraceID
+//  - Name: Span name in lowercase, rpc method for example
+// 
+// Conventionally, when the span name isn't known, name = "unknown".
+//  - ID
+//  - ParentID
+//  - Annotations
+//  - BinaryAnnotations
+//  - Debug
+//  - Timestamp: Microseconds from epoch of the creation of this span.
+// 
+// This value should be set directly by instrumentation, using the most
+// precise value possible. For example, gettimeofday or syncing nanoTime
+// against a tick of currentTimeMillis.
+// 
+// For compatibility with instrumentation that precede this field, collectors
+// or span stores can derive this via Annotation.timestamp.
+// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp.
+// 
+// This field is optional for compatibility with old data: first-party span
+// stores are expected to support this at time of introduction.
+//  - Duration: Measurement of duration in microseconds, used to support queries.
+// 
+// This value should be set directly, where possible. Doing so encourages
+// precise measurement decoupled from problems of clocks, such as skew or NTP
+// updates causing time to move backwards.
+// 
+// For compatibility with instrumentation that precede this field, collectors
+// or span stores can derive this by subtracting Annotation.timestamp.
+// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp.
+// 
+// If this field is persisted as unset, zipkin will continue to work, except
+// duration query support will be implementation-specific. Similarly, setting
+// this field non-atomically is implementation-specific.
+// 
+// This field is i64 vs i32 to support spans longer than 35 minutes.
+//  - TraceIDHigh: Optional unique 8-byte additional identifier for a trace. If non zero, this
+// means the trace uses 128 bit traceIds instead of 64 bit.
+type Span struct {
+  TraceID int64 `thrift:"trace_id,1" db:"trace_id" json:"trace_id"`
+  // unused field # 2
+  Name string `thrift:"name,3" db:"name" json:"name"`
+  ID int64 `thrift:"id,4" db:"id" json:"id"`
+  ParentID *int64 `thrift:"parent_id,5" db:"parent_id" json:"parent_id,omitempty"`
+  Annotations []*Annotation `thrift:"annotations,6" db:"annotations" json:"annotations"`
+  // unused field # 7
+  BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" db:"binary_annotations" json:"binary_annotations"`
+  Debug bool `thrift:"debug,9" db:"debug" json:"debug"`
+  Timestamp *int64 `thrift:"timestamp,10" db:"timestamp" json:"timestamp,omitempty"`
+  Duration *int64 `thrift:"duration,11" db:"duration" json:"duration,omitempty"`
+  TraceIDHigh *int64 `thrift:"trace_id_high,12" db:"trace_id_high" json:"trace_id_high,omitempty"`
+}
+
+func NewSpan() *Span {
+  return &Span{}
+}
+
+
+func (p *Span) GetTraceID() int64 {
+  return p.TraceID
+}
+
+func (p *Span) GetName() string {
+  return p.Name
+}
+
+func (p *Span) GetID() int64 {
+  return p.ID
+}
+var Span_ParentID_DEFAULT int64
+func (p *Span) GetParentID() int64 {
+  if !p.IsSetParentID() {
+    return Span_ParentID_DEFAULT
+  }
+return *p.ParentID
+}
+
+func (p *Span) GetAnnotations() []*Annotation {
+  return p.Annotations
+}
+
+func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation {
+  return p.BinaryAnnotations
+}
+var Span_Debug_DEFAULT bool = false
+
+func (p *Span) GetDebug() bool {
+  return p.Debug
+}
+var Span_Timestamp_DEFAULT int64
+func (p *Span) GetTimestamp() int64 {
+  if !p.IsSetTimestamp() {
+    return Span_Timestamp_DEFAULT
+  }
+return *p.Timestamp
+}
+var Span_Duration_DEFAULT int64
+func (p *Span) GetDuration() int64 {
+  if !p.IsSetDuration() {
+    return Span_Duration_DEFAULT
+  }
+return *p.Duration
+}
+var Span_TraceIDHigh_DEFAULT int64
+func (p *Span) GetTraceIDHigh() int64 {
+  if !p.IsSetTraceIDHigh() {
+    return Span_TraceIDHigh_DEFAULT
+  }
+return *p.TraceIDHigh
+}
+func (p *Span) IsSetParentID() bool {
+  return p.ParentID != nil
+}
+
+func (p *Span) IsSetDebug() bool {
+  return p.Debug != Span_Debug_DEFAULT
+}
+
+func (p *Span) IsSetTimestamp() bool {
+  return p.Timestamp != nil
+}
+
+func (p *Span) IsSetDuration() bool {
+  return p.Duration != nil
+}
+
+func (p *Span) IsSetTraceIDHigh() bool {
+  return p.TraceIDHigh != nil
+}
+
+func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 3:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField3(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 4:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField4(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 5:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField5(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 6:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField6(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 8:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField8(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 9:
+      if fieldTypeId == thrift.BOOL {
+        if err := p.ReadField9(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 10:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField10(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 11:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField11(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 12:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField12(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *Span)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.TraceID = v
+}
+  return nil
+}
+
+func (p *Span)  ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(ctx); err != nil {
+  return thrift.PrependError("error reading field 3: ", err)
+} else {
+  p.Name = v
+}
+  return nil
+}
+
+func (p *Span)  ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 4: ", err)
+} else {
+  p.ID = v
+}
+  return nil
+}
+
+func (p *Span)  ReadField5(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 5: ", err)
+} else {
+  p.ParentID = &v
+}
+  return nil
+}
+
+func (p *Span)  ReadField6(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*Annotation, 0, size)
+  p.Annotations =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem0 := &Annotation{}
+    if err := _elem0.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
+    }
+    p.Annotations = append(p.Annotations, _elem0)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *Span)  ReadField8(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*BinaryAnnotation, 0, size)
+  p.BinaryAnnotations =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem1 := &BinaryAnnotation{}
+    if err := _elem1.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err)
+    }
+    p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *Span)  ReadField9(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadBool(ctx); err != nil {
+  return thrift.PrependError("error reading field 9: ", err)
+} else {
+  p.Debug = v
+}
+  return nil
+}
+
+func (p *Span)  ReadField10(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 10: ", err)
+} else {
+  p.Timestamp = &v
+}
+  return nil
+}
+
+func (p *Span)  ReadField11(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 11: ", err)
+} else {
+  p.Duration = &v
+}
+  return nil
+}
+
+func (p *Span)  ReadField12(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 12: ", err)
+} else {
+  p.TraceIDHigh = &v
+}
+  return nil
+}
+
+func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "Span"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField3(ctx, oprot); err != nil { return err }
+    if err := p.writeField4(ctx, oprot); err != nil { return err }
+    if err := p.writeField5(ctx, oprot); err != nil { return err }
+    if err := p.writeField6(ctx, oprot); err != nil { return err }
+    if err := p.writeField8(ctx, oprot); err != nil { return err }
+    if err := p.writeField9(ctx, oprot); err != nil { return err }
+    if err := p.writeField10(ctx, oprot); err != nil { return err }
+    if err := p.writeField11(ctx, oprot); err != nil { return err }
+    if err := p.writeField12(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "trace_id", thrift.I64, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.TraceID)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "name", thrift.STRING, 3); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err) }
+  if err := oprot.WriteString(ctx, string(p.Name)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "id", thrift.I64, 4); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.ID)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetParentID() {
+    if err := oprot.WriteFieldBegin(ctx, "parent_id", thrift.I64, 5); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err) }
+    if err := oprot.WriteI64(ctx, int64(*p.ParentID)); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err) }
+  }
+  return err
+}
+
+func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "annotations", thrift.LIST, 6); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err) }
+  if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Annotations)); err != nil {
+    return thrift.PrependError("error writing list begin: ", err)
+  }
+  for _, v := range p.Annotations {
+    if err := v.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+    }
+  }
+  if err := oprot.WriteListEnd(ctx); err != nil {
+    return thrift.PrependError("error writing list end: ", err)
+  }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "binary_annotations", thrift.LIST, 8); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err) }
+  if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.BinaryAnnotations)); err != nil {
+    return thrift.PrependError("error writing list begin: ", err)
+  }
+  for _, v := range p.BinaryAnnotations {
+    if err := v.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+    }
+  }
+  if err := oprot.WriteListEnd(ctx); err != nil {
+    return thrift.PrependError("error writing list end: ", err)
+  }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetDebug() {
+    if err := oprot.WriteFieldBegin(ctx, "debug", thrift.BOOL, 9); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err) }
+    if err := oprot.WriteBool(ctx, bool(p.Debug)); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err) }
+  }
+  return err
+}
+
+func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetTimestamp() {
+    if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 10); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err) }
+    if err := oprot.WriteI64(ctx, int64(*p.Timestamp)); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err) }
+  }
+  return err
+}
+
+func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetDuration() {
+    if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 11); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err) }
+    if err := oprot.WriteI64(ctx, int64(*p.Duration)); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err) }
+  }
+  return err
+}
+
+func (p *Span) writeField12(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetTraceIDHigh() {
+    if err := oprot.WriteFieldBegin(ctx, "trace_id_high", thrift.I64, 12); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:trace_id_high: ", p), err) }
+    if err := oprot.WriteI64(ctx, int64(*p.TraceIDHigh)); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.trace_id_high (12) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 12:trace_id_high: ", p), err) }
+  }
+  return err
+}
+
+func (p *Span) Equals(other *Span) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.TraceID != other.TraceID { return false }
+  if p.Name != other.Name { return false }
+  if p.ID != other.ID { return false }
+  if p.ParentID != other.ParentID {
+    if p.ParentID == nil || other.ParentID == nil {
+      return false
+    }
+    if (*p.ParentID) != (*other.ParentID) { return false }
+  }
+  if len(p.Annotations) != len(other.Annotations) { return false }
+  for i, _tgt := range p.Annotations {
+    _src2 := other.Annotations[i]
+    if !_tgt.Equals(_src2) { return false }
+  }
+  if len(p.BinaryAnnotations) != len(other.BinaryAnnotations) { return false }
+  for i, _tgt := range p.BinaryAnnotations {
+    _src3 := other.BinaryAnnotations[i]
+    if !_tgt.Equals(_src3) { return false }
+  }
+  if p.Debug != other.Debug { return false }
+  if p.Timestamp != other.Timestamp {
+    if p.Timestamp == nil || other.Timestamp == nil {
+      return false
+    }
+    if (*p.Timestamp) != (*other.Timestamp) { return false }
+  }
+  if p.Duration != other.Duration {
+    if p.Duration == nil || other.Duration == nil {
+      return false
+    }
+    if (*p.Duration) != (*other.Duration) { return false }
+  }
+  if p.TraceIDHigh != other.TraceIDHigh {
+    if p.TraceIDHigh == nil || other.TraceIDHigh == nil {
+      return false
+    }
+    if (*p.TraceIDHigh) != (*other.TraceIDHigh) { return false }
+  }
+  return true
+}
+
+func (p *Span) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("Span(%+v)", *p)
+}
+
+// Attributes:
+//  - Ok
+type Response struct {
+  Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"`
+}
+
+func NewResponse() *Response {
+  return &Response{}
+}
+
+
+func (p *Response) GetOk() bool {
+  return p.Ok
+}
+func (p *Response) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetOk bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.BOOL {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetOk = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetOk{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set"));
+  }
+  return nil
+}
+
+func (p *Response)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadBool(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.Ok = v
+}
+  return nil
+}
+
+func (p *Response) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "Response"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *Response) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) }
+  if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) }
+  return err
+}
+
+func (p *Response) Equals(other *Response) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.Ok != other.Ok { return false }
+  return true
+}
+
+func (p *Response) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("Response(%+v)", *p)
+}
+
+type ZipkinCollector interface {
+  // Parameters:
+  //  - Spans
+  SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error)
+}
+
+type ZipkinCollectorClient struct {
+  c thrift.TClient
+  meta thrift.ResponseMeta
+}
+
+func NewZipkinCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ZipkinCollectorClient {
+  return &ZipkinCollectorClient{
+    c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
+  }
+}
+
+func NewZipkinCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ZipkinCollectorClient {
+  return &ZipkinCollectorClient{
+    c: thrift.NewTStandardClient(iprot, oprot),
+  }
+}
+
+func NewZipkinCollectorClient(c thrift.TClient) *ZipkinCollectorClient {
+  return &ZipkinCollectorClient{
+    c: c,
+  }
+}
+
+func (p *ZipkinCollectorClient) Client_() thrift.TClient {
+  return p.c
+}
+
+func (p *ZipkinCollectorClient) LastResponseMeta_() thrift.ResponseMeta {
+  return p.meta
+}
+
+func (p *ZipkinCollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
+  p.meta = meta
+}
+
+// Parameters:
+//  - Spans
+func (p *ZipkinCollectorClient) SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error) {
+  var _args4 ZipkinCollectorSubmitZipkinBatchArgs
+  _args4.Spans = spans
+  var _result6 ZipkinCollectorSubmitZipkinBatchResult
+  var _meta5 thrift.ResponseMeta
+  _meta5, _err = p.Client_().Call(ctx, "submitZipkinBatch", &_args4, &_result6)
+  p.SetLastResponseMeta_(_meta5)
+  if _err != nil {
+    return
+  }
+  return _result6.GetSuccess(), nil
+}
+
+type ZipkinCollectorProcessor struct {
+  processorMap map[string]thrift.TProcessorFunction
+  handler ZipkinCollector
+}
+
+func (p *ZipkinCollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+  p.processorMap[key] = processor
+}
+
+func (p *ZipkinCollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+  processor, ok = p.processorMap[key]
+  return processor, ok
+}
+
+func (p *ZipkinCollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+  return p.processorMap
+}
+
+func NewZipkinCollectorProcessor(handler ZipkinCollector) *ZipkinCollectorProcessor {
+
+  self7 := &ZipkinCollectorProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)}
+  self7.processorMap["submitZipkinBatch"] = &zipkinCollectorProcessorSubmitZipkinBatch{handler:handler}
+return self7
+}
+
+func (p *ZipkinCollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+  name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
+  if err2 != nil { return false, thrift.WrapTException(err2) }
+  if processor, ok := p.GetProcessorFunction(name); ok {
+    return processor.Process(ctx, seqId, iprot, oprot)
+  }
+  iprot.Skip(ctx, thrift.STRUCT)
+  iprot.ReadMessageEnd(ctx)
+  x8 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name)
+  oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
+  x8.Write(ctx, oprot)
+  oprot.WriteMessageEnd(ctx)
+  oprot.Flush(ctx)
+  return false, x8
+
+}
+
+type zipkinCollectorProcessorSubmitZipkinBatch struct {
+  handler ZipkinCollector
+}
+
+func (p *zipkinCollectorProcessorSubmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+  args := ZipkinCollectorSubmitZipkinBatchArgs{}
+  var err2 error
+  if err2 = args.Read(ctx, iprot); err2 != nil {
+    iprot.ReadMessageEnd(ctx)
+    x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error())
+    oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId)
+    x.Write(ctx, oprot)
+    oprot.WriteMessageEnd(ctx)
+    oprot.Flush(ctx)
+    return false, thrift.WrapTException(err2)
+  }
+  iprot.ReadMessageEnd(ctx)
+
+  tickerCancel := func() {}
+  // Start a goroutine to do server side connectivity check.
+  if thrift.ServerConnectivityCheckInterval > 0 {
+    var cancel context.CancelFunc
+    ctx, cancel = context.WithCancel(ctx)
+    defer cancel()
+    var tickerCtx context.Context
+    tickerCtx, tickerCancel = context.WithCancel(context.Background())
+    defer tickerCancel()
+    go func(ctx context.Context, cancel context.CancelFunc) {
+      ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval)
+      defer ticker.Stop()
+      for {
+        select {
+        case <-ctx.Done():
+          return
+        case <-ticker.C:
+          if !iprot.Transport().IsOpen() {
+            cancel()
+            return
+          }
+        }
+      }
+    }(tickerCtx, cancel)
+  }
+
+  result := ZipkinCollectorSubmitZipkinBatchResult{}
+  var retval []*Response
+  if retval, err2 = p.handler.SubmitZipkinBatch(ctx, args.Spans); err2 != nil {
+    tickerCancel()
+    if err2 == thrift.ErrAbandonRequest {
+      return false, thrift.WrapTException(err2)
+    }
+    x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitZipkinBatch: " + err2.Error())
+    oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId)
+    x.Write(ctx, oprot)
+    oprot.WriteMessageEnd(ctx)
+    oprot.Flush(ctx)
+    return true, thrift.WrapTException(err2)
+  } else {
+    result.Success = retval
+  }
+  tickerCancel()
+  if err2 = oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.REPLY, seqId); err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err2 = result.Write(ctx, oprot); err == nil && err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err != nil {
+    return
+  }
+  return true, err
+}
+
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+//  - Spans
+type ZipkinCollectorSubmitZipkinBatchArgs struct {
+  Spans []*Span `thrift:"spans,1" db:"spans" json:"spans"`
+}
+
+func NewZipkinCollectorSubmitZipkinBatchArgs() *ZipkinCollectorSubmitZipkinBatchArgs {
+  return &ZipkinCollectorSubmitZipkinBatchArgs{}
+}
+
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) GetSpans() []*Span {
+  return p.Spans
+}
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*Span, 0, size)
+  p.Spans =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem9 := &Span{}
+    if err := _elem9.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem9), err)
+    }
+    p.Spans = append(p.Spans, _elem9)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_args"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) }
+  if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil {
+    return thrift.PrependError("error writing list begin: ", err)
+  }
+  for _, v := range p.Spans {
+    if err := v.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+    }
+  }
+  if err := oprot.WriteListEnd(ctx); err != nil {
+    return thrift.PrependError("error writing list end: ", err)
+  }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) }
+  return err
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchArgs(%+v)", *p)
+}
+
+// Attributes:
+//  - Success
+type ZipkinCollectorSubmitZipkinBatchResult struct {
+  Success []*Response `thrift:"success,0" db:"success" json:"success,omitempty"`
+}
+
+func NewZipkinCollectorSubmitZipkinBatchResult() *ZipkinCollectorSubmitZipkinBatchResult {
+  return &ZipkinCollectorSubmitZipkinBatchResult{}
+}
+
+var ZipkinCollectorSubmitZipkinBatchResult_Success_DEFAULT []*Response
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) GetSuccess() []*Response {
+  return p.Success
+}
+func (p *ZipkinCollectorSubmitZipkinBatchResult) IsSetSuccess() bool {
+  return p.Success != nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 0:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField0(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult)  ReadField0(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*Response, 0, size)
+  p.Success =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem10 := &Response{}
+    if err := _elem10.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err)
+    }
+    p.Success = append(p.Success, _elem10)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_result"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField0(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetSuccess() {
+    if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) }
+    if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil {
+      return thrift.PrependError("error writing list begin: ", err)
+    }
+    for _, v := range p.Success {
+      if err := v.Write(ctx, oprot); err != nil {
+        return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+      }
+    }
+    if err := oprot.WriteListEnd(ctx); err != nil {
+      return thrift.PrependError("error writing list end: ", err)
+    }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) }
+  }
+  return err
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchResult(%+v)", *p)
+}
+
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/README.md b/vendor/github.com/uber/jaeger-client-go/thrift/README.md
index 1d8e642..c4c38ae 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/README.md
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/README.md
@@ -1,7 +1,11 @@
 # Apache Thrift
 
-This is a partial copy of Apache Thrift v0.10 (https://github.com/apache/thrift/commit/b2a4d4ae21c789b689dd162deb819665567f481c).
+This is a partial copy of Apache Thrift v0.14.1 (https://github.com/apache/thrift/commit/f6fa1794539e68ac294038ac388d6bde40a6c237).
 
-It is vendored code to avoid compatibility issues introduced in Thrift  v0.11.
+It is vendored code to avoid compatibility issues with Thrift versions.
 
-See https://github.com/jaegertracing/jaeger-client-go/pull/303.
+The file logger.go is modified to remove dependency on "testing" (see Issue #585).
+
+See:
+  * https://github.com/jaegertracing/jaeger-client-go/pull/584
+  * https://github.com/jaegertracing/jaeger-client-go/pull/303
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go
index 6655cc5..32d5b01 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go
@@ -19,6 +19,10 @@
 
 package thrift
 
+import (
+	"context"
+)
+
 const (
 	UNKNOWN_APPLICATION_EXCEPTION  = 0
 	UNKNOWN_METHOD                 = 1
@@ -28,14 +32,31 @@
 	MISSING_RESULT                 = 5
 	INTERNAL_ERROR                 = 6
 	PROTOCOL_ERROR                 = 7
+	INVALID_TRANSFORM              = 8
+	INVALID_PROTOCOL               = 9
+	UNSUPPORTED_CLIENT_TYPE        = 10
 )
 
+var defaultApplicationExceptionMessage = map[int32]string{
+	UNKNOWN_APPLICATION_EXCEPTION:  "unknown application exception",
+	UNKNOWN_METHOD:                 "unknown method",
+	INVALID_MESSAGE_TYPE_EXCEPTION: "invalid message type",
+	WRONG_METHOD_NAME:              "wrong method name",
+	BAD_SEQUENCE_ID:                "bad sequence ID",
+	MISSING_RESULT:                 "missing result",
+	INTERNAL_ERROR:                 "unknown internal error",
+	PROTOCOL_ERROR:                 "unknown protocol error",
+	INVALID_TRANSFORM:              "Invalid transform",
+	INVALID_PROTOCOL:               "Invalid protocol",
+	UNSUPPORTED_CLIENT_TYPE:        "Unsupported client type",
+}
+
 // Application level Thrift exception
 type TApplicationException interface {
 	TException
 	TypeId() int32
-	Read(iprot TProtocol) (TApplicationException, error)
-	Write(oprot TProtocol) error
+	Read(ctx context.Context, iprot TProtocol) error
+	Write(ctx context.Context, oprot TProtocol) error
 }
 
 type tApplicationException struct {
@@ -43,8 +64,17 @@
 	type_   int32
 }
 
+var _ TApplicationException = (*tApplicationException)(nil)
+
+func (tApplicationException) TExceptionType() TExceptionType {
+	return TExceptionTypeApplication
+}
+
 func (e tApplicationException) Error() string {
-	return e.message
+	if e.message != "" {
+		return e.message
+	}
+	return defaultApplicationExceptionMessage[e.type_]
 }
 
 func NewTApplicationException(type_ int32, message string) TApplicationException {
@@ -55,19 +85,20 @@
 	return p.type_
 }
 
-func (p *tApplicationException) Read(iprot TProtocol) (TApplicationException, error) {
-	_, err := iprot.ReadStructBegin()
+func (p *tApplicationException) Read(ctx context.Context, iprot TProtocol) error {
+	// TODO: this should really be generated by the compiler
+	_, err := iprot.ReadStructBegin(ctx)
 	if err != nil {
-		return nil, err
+		return err
 	}
 
 	message := ""
 	type_ := int32(UNKNOWN_APPLICATION_EXCEPTION)
 
 	for {
-		_, ttype, id, err := iprot.ReadFieldBegin()
+		_, ttype, id, err := iprot.ReadFieldBegin(ctx)
 		if err != nil {
-			return nil, err
+			return err
 		}
 		if ttype == STOP {
 			break
@@ -75,68 +106,75 @@
 		switch id {
 		case 1:
 			if ttype == STRING {
-				if message, err = iprot.ReadString(); err != nil {
-					return nil, err
+				if message, err = iprot.ReadString(ctx); err != nil {
+					return err
 				}
 			} else {
-				if err = SkipDefaultDepth(iprot, ttype); err != nil {
-					return nil, err
+				if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
+					return err
 				}
 			}
 		case 2:
 			if ttype == I32 {
-				if type_, err = iprot.ReadI32(); err != nil {
-					return nil, err
+				if type_, err = iprot.ReadI32(ctx); err != nil {
+					return err
 				}
 			} else {
-				if err = SkipDefaultDepth(iprot, ttype); err != nil {
-					return nil, err
+				if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
+					return err
 				}
 			}
 		default:
-			if err = SkipDefaultDepth(iprot, ttype); err != nil {
-				return nil, err
+			if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
+				return err
 			}
 		}
-		if err = iprot.ReadFieldEnd(); err != nil {
-			return nil, err
+		if err = iprot.ReadFieldEnd(ctx); err != nil {
+			return err
 		}
 	}
-	return NewTApplicationException(type_, message), iprot.ReadStructEnd()
+	if err := iprot.ReadStructEnd(ctx); err != nil {
+		return err
+	}
+
+	p.message = message
+	p.type_ = type_
+
+	return nil
 }
 
-func (p *tApplicationException) Write(oprot TProtocol) (err error) {
-	err = oprot.WriteStructBegin("TApplicationException")
+func (p *tApplicationException) Write(ctx context.Context, oprot TProtocol) (err error) {
+	err = oprot.WriteStructBegin(ctx, "TApplicationException")
 	if len(p.Error()) > 0 {
-		err = oprot.WriteFieldBegin("message", STRING, 1)
+		err = oprot.WriteFieldBegin(ctx, "message", STRING, 1)
 		if err != nil {
 			return
 		}
-		err = oprot.WriteString(p.Error())
+		err = oprot.WriteString(ctx, p.Error())
 		if err != nil {
 			return
 		}
-		err = oprot.WriteFieldEnd()
+		err = oprot.WriteFieldEnd(ctx)
 		if err != nil {
 			return
 		}
 	}
-	err = oprot.WriteFieldBegin("type", I32, 2)
+	err = oprot.WriteFieldBegin(ctx, "type", I32, 2)
 	if err != nil {
 		return
 	}
-	err = oprot.WriteI32(p.type_)
+	err = oprot.WriteI32(ctx, p.type_)
 	if err != nil {
 		return
 	}
-	err = oprot.WriteFieldEnd()
+	err = oprot.WriteFieldEnd(ctx)
 	if err != nil {
 		return
 	}
-	err = oprot.WriteFieldStop()
+	err = oprot.WriteFieldStop(ctx)
 	if err != nil {
 		return
 	}
-	err = oprot.WriteStructEnd()
+	err = oprot.WriteStructEnd(ctx)
 	return
 }
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go
index 690d341..45c880d 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go
@@ -21,6 +21,7 @@
 
 import (
 	"bytes"
+	"context"
 	"encoding/binary"
 	"errors"
 	"fmt"
@@ -31,190 +32,220 @@
 type TBinaryProtocol struct {
 	trans         TRichTransport
 	origTransport TTransport
-	reader        io.Reader
-	writer        io.Writer
-	strictRead    bool
-	strictWrite   bool
+	cfg           *TConfiguration
 	buffer        [64]byte
 }
 
 type TBinaryProtocolFactory struct {
-	strictRead  bool
-	strictWrite bool
+	cfg *TConfiguration
 }
 
+// Deprecated: Use NewTBinaryProtocolConf instead.
 func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol {
-	return NewTBinaryProtocol(t, false, true)
+	return NewTBinaryProtocolConf(t, &TConfiguration{
+		noPropagation: true,
+	})
 }
 
+// Deprecated: Use NewTBinaryProtocolConf instead.
 func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol {
-	p := &TBinaryProtocol{origTransport: t, strictRead: strictRead, strictWrite: strictWrite}
+	return NewTBinaryProtocolConf(t, &TConfiguration{
+		TBinaryStrictRead:  &strictRead,
+		TBinaryStrictWrite: &strictWrite,
+
+		noPropagation: true,
+	})
+}
+
+func NewTBinaryProtocolConf(t TTransport, conf *TConfiguration) *TBinaryProtocol {
+	PropagateTConfiguration(t, conf)
+	p := &TBinaryProtocol{
+		origTransport: t,
+		cfg:           conf,
+	}
 	if et, ok := t.(TRichTransport); ok {
 		p.trans = et
 	} else {
 		p.trans = NewTRichTransport(t)
 	}
-	p.reader = p.trans
-	p.writer = p.trans
 	return p
 }
 
+// Deprecated: Use NewTBinaryProtocolFactoryConf instead.
 func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory {
-	return NewTBinaryProtocolFactory(false, true)
+	return NewTBinaryProtocolFactoryConf(&TConfiguration{
+		noPropagation: true,
+	})
 }
 
+// Deprecated: Use NewTBinaryProtocolFactoryConf instead.
 func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory {
-	return &TBinaryProtocolFactory{strictRead: strictRead, strictWrite: strictWrite}
+	return NewTBinaryProtocolFactoryConf(&TConfiguration{
+		TBinaryStrictRead:  &strictRead,
+		TBinaryStrictWrite: &strictWrite,
+
+		noPropagation: true,
+	})
+}
+
+func NewTBinaryProtocolFactoryConf(conf *TConfiguration) *TBinaryProtocolFactory {
+	return &TBinaryProtocolFactory{
+		cfg: conf,
+	}
 }
 
 func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol {
-	return NewTBinaryProtocol(t, p.strictRead, p.strictWrite)
+	return NewTBinaryProtocolConf(t, p.cfg)
+}
+
+func (p *TBinaryProtocolFactory) SetTConfiguration(conf *TConfiguration) {
+	p.cfg = conf
 }
 
 /**
  * Writing Methods
  */
 
-func (p *TBinaryProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error {
-	if p.strictWrite {
+func (p *TBinaryProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error {
+	if p.cfg.GetTBinaryStrictWrite() {
 		version := uint32(VERSION_1) | uint32(typeId)
-		e := p.WriteI32(int32(version))
+		e := p.WriteI32(ctx, int32(version))
 		if e != nil {
 			return e
 		}
-		e = p.WriteString(name)
+		e = p.WriteString(ctx, name)
 		if e != nil {
 			return e
 		}
-		e = p.WriteI32(seqId)
+		e = p.WriteI32(ctx, seqId)
 		return e
 	} else {
-		e := p.WriteString(name)
+		e := p.WriteString(ctx, name)
 		if e != nil {
 			return e
 		}
-		e = p.WriteByte(int8(typeId))
+		e = p.WriteByte(ctx, int8(typeId))
 		if e != nil {
 			return e
 		}
-		e = p.WriteI32(seqId)
+		e = p.WriteI32(ctx, seqId)
 		return e
 	}
 	return nil
 }
 
-func (p *TBinaryProtocol) WriteMessageEnd() error {
+func (p *TBinaryProtocol) WriteMessageEnd(ctx context.Context) error {
 	return nil
 }
 
-func (p *TBinaryProtocol) WriteStructBegin(name string) error {
+func (p *TBinaryProtocol) WriteStructBegin(ctx context.Context, name string) error {
 	return nil
 }
 
-func (p *TBinaryProtocol) WriteStructEnd() error {
+func (p *TBinaryProtocol) WriteStructEnd(ctx context.Context) error {
 	return nil
 }
 
-func (p *TBinaryProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
-	e := p.WriteByte(int8(typeId))
+func (p *TBinaryProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
+	e := p.WriteByte(ctx, int8(typeId))
 	if e != nil {
 		return e
 	}
-	e = p.WriteI16(id)
+	e = p.WriteI16(ctx, id)
 	return e
 }
 
-func (p *TBinaryProtocol) WriteFieldEnd() error {
+func (p *TBinaryProtocol) WriteFieldEnd(ctx context.Context) error {
 	return nil
 }
 
-func (p *TBinaryProtocol) WriteFieldStop() error {
-	e := p.WriteByte(STOP)
+func (p *TBinaryProtocol) WriteFieldStop(ctx context.Context) error {
+	e := p.WriteByte(ctx, STOP)
 	return e
 }
 
-func (p *TBinaryProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
-	e := p.WriteByte(int8(keyType))
+func (p *TBinaryProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
+	e := p.WriteByte(ctx, int8(keyType))
 	if e != nil {
 		return e
 	}
-	e = p.WriteByte(int8(valueType))
+	e = p.WriteByte(ctx, int8(valueType))
 	if e != nil {
 		return e
 	}
-	e = p.WriteI32(int32(size))
+	e = p.WriteI32(ctx, int32(size))
 	return e
 }
 
-func (p *TBinaryProtocol) WriteMapEnd() error {
+func (p *TBinaryProtocol) WriteMapEnd(ctx context.Context) error {
 	return nil
 }
 
-func (p *TBinaryProtocol) WriteListBegin(elemType TType, size int) error {
-	e := p.WriteByte(int8(elemType))
+func (p *TBinaryProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
+	e := p.WriteByte(ctx, int8(elemType))
 	if e != nil {
 		return e
 	}
-	e = p.WriteI32(int32(size))
+	e = p.WriteI32(ctx, int32(size))
 	return e
 }
 
-func (p *TBinaryProtocol) WriteListEnd() error {
+func (p *TBinaryProtocol) WriteListEnd(ctx context.Context) error {
 	return nil
 }
 
-func (p *TBinaryProtocol) WriteSetBegin(elemType TType, size int) error {
-	e := p.WriteByte(int8(elemType))
+func (p *TBinaryProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
+	e := p.WriteByte(ctx, int8(elemType))
 	if e != nil {
 		return e
 	}
-	e = p.WriteI32(int32(size))
+	e = p.WriteI32(ctx, int32(size))
 	return e
 }
 
-func (p *TBinaryProtocol) WriteSetEnd() error {
+func (p *TBinaryProtocol) WriteSetEnd(ctx context.Context) error {
 	return nil
 }
 
-func (p *TBinaryProtocol) WriteBool(value bool) error {
+func (p *TBinaryProtocol) WriteBool(ctx context.Context, value bool) error {
 	if value {
-		return p.WriteByte(1)
+		return p.WriteByte(ctx, 1)
 	}
-	return p.WriteByte(0)
+	return p.WriteByte(ctx, 0)
 }
 
-func (p *TBinaryProtocol) WriteByte(value int8) error {
+func (p *TBinaryProtocol) WriteByte(ctx context.Context, value int8) error {
 	e := p.trans.WriteByte(byte(value))
 	return NewTProtocolException(e)
 }
 
-func (p *TBinaryProtocol) WriteI16(value int16) error {
+func (p *TBinaryProtocol) WriteI16(ctx context.Context, value int16) error {
 	v := p.buffer[0:2]
 	binary.BigEndian.PutUint16(v, uint16(value))
-	_, e := p.writer.Write(v)
+	_, e := p.trans.Write(v)
 	return NewTProtocolException(e)
 }
 
-func (p *TBinaryProtocol) WriteI32(value int32) error {
+func (p *TBinaryProtocol) WriteI32(ctx context.Context, value int32) error {
 	v := p.buffer[0:4]
 	binary.BigEndian.PutUint32(v, uint32(value))
-	_, e := p.writer.Write(v)
+	_, e := p.trans.Write(v)
 	return NewTProtocolException(e)
 }
 
-func (p *TBinaryProtocol) WriteI64(value int64) error {
+func (p *TBinaryProtocol) WriteI64(ctx context.Context, value int64) error {
 	v := p.buffer[0:8]
 	binary.BigEndian.PutUint64(v, uint64(value))
-	_, err := p.writer.Write(v)
+	_, err := p.trans.Write(v)
 	return NewTProtocolException(err)
 }
 
-func (p *TBinaryProtocol) WriteDouble(value float64) error {
-	return p.WriteI64(int64(math.Float64bits(value)))
+func (p *TBinaryProtocol) WriteDouble(ctx context.Context, value float64) error {
+	return p.WriteI64(ctx, int64(math.Float64bits(value)))
 }
 
-func (p *TBinaryProtocol) WriteString(value string) error {
-	e := p.WriteI32(int32(len(value)))
+func (p *TBinaryProtocol) WriteString(ctx context.Context, value string) error {
+	e := p.WriteI32(ctx, int32(len(value)))
 	if e != nil {
 		return e
 	}
@@ -222,12 +253,12 @@
 	return NewTProtocolException(err)
 }
 
-func (p *TBinaryProtocol) WriteBinary(value []byte) error {
-	e := p.WriteI32(int32(len(value)))
+func (p *TBinaryProtocol) WriteBinary(ctx context.Context, value []byte) error {
+	e := p.WriteI32(ctx, int32(len(value)))
 	if e != nil {
 		return e
 	}
-	_, err := p.writer.Write(value)
+	_, err := p.trans.Write(value)
 	return NewTProtocolException(err)
 }
 
@@ -235,8 +266,8 @@
  * Reading methods
  */
 
-func (p *TBinaryProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
-	size, e := p.ReadI32()
+func (p *TBinaryProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
+	size, e := p.ReadI32(ctx)
 	if e != nil {
 		return "", typeId, 0, NewTProtocolException(e)
 	}
@@ -246,79 +277,79 @@
 		if version != VERSION_1 {
 			return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin"))
 		}
-		name, e = p.ReadString()
+		name, e = p.ReadString(ctx)
 		if e != nil {
 			return name, typeId, seqId, NewTProtocolException(e)
 		}
-		seqId, e = p.ReadI32()
+		seqId, e = p.ReadI32(ctx)
 		if e != nil {
 			return name, typeId, seqId, NewTProtocolException(e)
 		}
 		return name, typeId, seqId, nil
 	}
-	if p.strictRead {
+	if p.cfg.GetTBinaryStrictRead() {
 		return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin"))
 	}
 	name, e2 := p.readStringBody(size)
 	if e2 != nil {
 		return name, typeId, seqId, e2
 	}
-	b, e3 := p.ReadByte()
+	b, e3 := p.ReadByte(ctx)
 	if e3 != nil {
 		return name, typeId, seqId, e3
 	}
 	typeId = TMessageType(b)
-	seqId, e4 := p.ReadI32()
+	seqId, e4 := p.ReadI32(ctx)
 	if e4 != nil {
 		return name, typeId, seqId, e4
 	}
 	return name, typeId, seqId, nil
 }
 
-func (p *TBinaryProtocol) ReadMessageEnd() error {
+func (p *TBinaryProtocol) ReadMessageEnd(ctx context.Context) error {
 	return nil
 }
 
-func (p *TBinaryProtocol) ReadStructBegin() (name string, err error) {
+func (p *TBinaryProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
 	return
 }
 
-func (p *TBinaryProtocol) ReadStructEnd() error {
+func (p *TBinaryProtocol) ReadStructEnd(ctx context.Context) error {
 	return nil
 }
 
-func (p *TBinaryProtocol) ReadFieldBegin() (name string, typeId TType, seqId int16, err error) {
-	t, err := p.ReadByte()
+func (p *TBinaryProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, seqId int16, err error) {
+	t, err := p.ReadByte(ctx)
 	typeId = TType(t)
 	if err != nil {
 		return name, typeId, seqId, err
 	}
 	if t != STOP {
-		seqId, err = p.ReadI16()
+		seqId, err = p.ReadI16(ctx)
 	}
 	return name, typeId, seqId, err
 }
 
-func (p *TBinaryProtocol) ReadFieldEnd() error {
+func (p *TBinaryProtocol) ReadFieldEnd(ctx context.Context) error {
 	return nil
 }
 
 var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length"))
 
-func (p *TBinaryProtocol) ReadMapBegin() (kType, vType TType, size int, err error) {
-	k, e := p.ReadByte()
+func (p *TBinaryProtocol) ReadMapBegin(ctx context.Context) (kType, vType TType, size int, err error) {
+	k, e := p.ReadByte(ctx)
 	if e != nil {
 		err = NewTProtocolException(e)
 		return
 	}
 	kType = TType(k)
-	v, e := p.ReadByte()
+	v, e := p.ReadByte(ctx)
 	if e != nil {
 		err = NewTProtocolException(e)
 		return
 	}
 	vType = TType(v)
-	size32, e := p.ReadI32()
+	size32, e := p.ReadI32(ctx)
 	if e != nil {
 		err = NewTProtocolException(e)
 		return
@@ -331,18 +362,18 @@
 	return kType, vType, size, nil
 }
 
-func (p *TBinaryProtocol) ReadMapEnd() error {
+func (p *TBinaryProtocol) ReadMapEnd(ctx context.Context) error {
 	return nil
 }
 
-func (p *TBinaryProtocol) ReadListBegin() (elemType TType, size int, err error) {
-	b, e := p.ReadByte()
+func (p *TBinaryProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
+	b, e := p.ReadByte(ctx)
 	if e != nil {
 		err = NewTProtocolException(e)
 		return
 	}
 	elemType = TType(b)
-	size32, e := p.ReadI32()
+	size32, e := p.ReadI32(ctx)
 	if e != nil {
 		err = NewTProtocolException(e)
 		return
@@ -356,18 +387,18 @@
 	return
 }
 
-func (p *TBinaryProtocol) ReadListEnd() error {
+func (p *TBinaryProtocol) ReadListEnd(ctx context.Context) error {
 	return nil
 }
 
-func (p *TBinaryProtocol) ReadSetBegin() (elemType TType, size int, err error) {
-	b, e := p.ReadByte()
+func (p *TBinaryProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
+	b, e := p.ReadByte(ctx)
 	if e != nil {
 		err = NewTProtocolException(e)
 		return
 	}
 	elemType = TType(b)
-	size32, e := p.ReadI32()
+	size32, e := p.ReadI32(ctx)
 	if e != nil {
 		err = NewTProtocolException(e)
 		return
@@ -380,12 +411,12 @@
 	return elemType, size, nil
 }
 
-func (p *TBinaryProtocol) ReadSetEnd() error {
+func (p *TBinaryProtocol) ReadSetEnd(ctx context.Context) error {
 	return nil
 }
 
-func (p *TBinaryProtocol) ReadBool() (bool, error) {
-	b, e := p.ReadByte()
+func (p *TBinaryProtocol) ReadBool(ctx context.Context) (bool, error) {
+	b, e := p.ReadByte(ctx)
 	v := true
 	if b != 1 {
 		v = false
@@ -393,122 +424,132 @@
 	return v, e
 }
 
-func (p *TBinaryProtocol) ReadByte() (int8, error) {
+func (p *TBinaryProtocol) ReadByte(ctx context.Context) (int8, error) {
 	v, err := p.trans.ReadByte()
 	return int8(v), err
 }
 
-func (p *TBinaryProtocol) ReadI16() (value int16, err error) {
+func (p *TBinaryProtocol) ReadI16(ctx context.Context) (value int16, err error) {
 	buf := p.buffer[0:2]
-	err = p.readAll(buf)
+	err = p.readAll(ctx, buf)
 	value = int16(binary.BigEndian.Uint16(buf))
 	return value, err
 }
 
-func (p *TBinaryProtocol) ReadI32() (value int32, err error) {
+func (p *TBinaryProtocol) ReadI32(ctx context.Context) (value int32, err error) {
 	buf := p.buffer[0:4]
-	err = p.readAll(buf)
+	err = p.readAll(ctx, buf)
 	value = int32(binary.BigEndian.Uint32(buf))
 	return value, err
 }
 
-func (p *TBinaryProtocol) ReadI64() (value int64, err error) {
+func (p *TBinaryProtocol) ReadI64(ctx context.Context) (value int64, err error) {
 	buf := p.buffer[0:8]
-	err = p.readAll(buf)
+	err = p.readAll(ctx, buf)
 	value = int64(binary.BigEndian.Uint64(buf))
 	return value, err
 }
 
-func (p *TBinaryProtocol) ReadDouble() (value float64, err error) {
+func (p *TBinaryProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
 	buf := p.buffer[0:8]
-	err = p.readAll(buf)
+	err = p.readAll(ctx, buf)
 	value = math.Float64frombits(binary.BigEndian.Uint64(buf))
 	return value, err
 }
 
-func (p *TBinaryProtocol) ReadString() (value string, err error) {
-	size, e := p.ReadI32()
+func (p *TBinaryProtocol) ReadString(ctx context.Context) (value string, err error) {
+	size, e := p.ReadI32(ctx)
 	if e != nil {
 		return "", e
 	}
+	err = checkSizeForProtocol(size, p.cfg)
+	if err != nil {
+		return
+	}
 	if size < 0 {
 		err = invalidDataLength
 		return
 	}
+	if size == 0 {
+		return "", nil
+	}
+	if size < int32(len(p.buffer)) {
+		// Avoid allocation on small reads
+		buf := p.buffer[:size]
+		read, e := io.ReadFull(p.trans, buf)
+		return string(buf[:read]), NewTProtocolException(e)
+	}
 
 	return p.readStringBody(size)
 }
 
-func (p *TBinaryProtocol) ReadBinary() ([]byte, error) {
-	size, e := p.ReadI32()
+func (p *TBinaryProtocol) ReadBinary(ctx context.Context) ([]byte, error) {
+	size, e := p.ReadI32(ctx)
 	if e != nil {
 		return nil, e
 	}
-	if size < 0 {
-		return nil, invalidDataLength
-	}
-	if uint64(size) > p.trans.RemainingBytes() {
-		return nil, invalidDataLength
+	if err := checkSizeForProtocol(size, p.cfg); err != nil {
+		return nil, err
 	}
 
-	isize := int(size)
-	buf := make([]byte, isize)
-	_, err := io.ReadFull(p.trans, buf)
+	buf, err := safeReadBytes(size, p.trans)
 	return buf, NewTProtocolException(err)
 }
 
-func (p *TBinaryProtocol) Flush() (err error) {
-	return NewTProtocolException(p.trans.Flush())
+func (p *TBinaryProtocol) Flush(ctx context.Context) (err error) {
+	return NewTProtocolException(p.trans.Flush(ctx))
 }
 
-func (p *TBinaryProtocol) Skip(fieldType TType) (err error) {
-	return SkipDefaultDepth(p, fieldType)
+func (p *TBinaryProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
+	return SkipDefaultDepth(ctx, p, fieldType)
 }
 
 func (p *TBinaryProtocol) Transport() TTransport {
 	return p.origTransport
 }
 
-func (p *TBinaryProtocol) readAll(buf []byte) error {
-	_, err := io.ReadFull(p.reader, buf)
+func (p *TBinaryProtocol) readAll(ctx context.Context, buf []byte) (err error) {
+	var read int
+	_, deadlineSet := ctx.Deadline()
+	for {
+		read, err = io.ReadFull(p.trans, buf)
+		if deadlineSet && read == 0 && isTimeoutError(err) && ctx.Err() == nil {
+			// This is I/O timeout without anything read,
+			// and we still have time left, keep retrying.
+			continue
+		}
+		// For anything else, don't retry
+		break
+	}
 	return NewTProtocolException(err)
 }
 
-const readLimit = 32768
-
 func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) {
+	buf, err := safeReadBytes(size, p.trans)
+	return string(buf), NewTProtocolException(err)
+}
+
+func (p *TBinaryProtocol) SetTConfiguration(conf *TConfiguration) {
+	PropagateTConfiguration(p.trans, conf)
+	PropagateTConfiguration(p.origTransport, conf)
+	p.cfg = conf
+}
+
+var (
+	_ TConfigurationSetter = (*TBinaryProtocolFactory)(nil)
+	_ TConfigurationSetter = (*TBinaryProtocol)(nil)
+)
+
+// This function is shared between TBinaryProtocol and TCompactProtocol.
+//
+// It tries to read size bytes from trans, in a way that prevents large
+// allocations when size is insanely large (mostly caused by malformed message).
+func safeReadBytes(size int32, trans io.Reader) ([]byte, error) {
 	if size < 0 {
-		return "", nil
-	}
-	if uint64(size) > p.trans.RemainingBytes() {
-		return "", invalidDataLength
+		return nil, nil
 	}
 
-	var (
-		buf bytes.Buffer
-		e   error
-		b   []byte
-	)
-
-	switch {
-	case int(size) <= len(p.buffer):
-		b = p.buffer[:size] // avoids allocation for small reads
-	case int(size) < readLimit:
-		b = make([]byte, size)
-	default:
-		b = make([]byte, readLimit)
-	}
-
-	for size > 0 {
-		_, e = io.ReadFull(p.trans, b)
-		buf.Write(b)
-		if e != nil {
-			break
-		}
-		size -= readLimit
-		if size < readLimit && size > 0 {
-			b = b[:size]
-		}
-	}
-	return buf.String(), NewTProtocolException(e)
+	buf := new(bytes.Buffer)
+	_, err := io.CopyN(buf, trans, int64(size))
+	return buf.Bytes(), err
 }
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/client.go b/vendor/github.com/uber/jaeger-client-go/thrift/client.go
new file mode 100644
index 0000000..ea2c01f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/client.go
@@ -0,0 +1,109 @@
+package thrift
+
+import (
+	"context"
+	"fmt"
+)
+
+// ResponseMeta represents the metadata attached to the response.
+type ResponseMeta struct {
+	// The headers in the response, if any.
+	// If the underlying transport/protocol is not THeader, this will always be nil.
+	Headers THeaderMap
+}
+
+type TClient interface {
+	Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error)
+}
+
+type TStandardClient struct {
+	seqId        int32
+	iprot, oprot TProtocol
+}
+
+// TStandardClient implements TClient, and uses the standard message format for Thrift.
+// It is not safe for concurrent use.
+func NewTStandardClient(inputProtocol, outputProtocol TProtocol) *TStandardClient {
+	return &TStandardClient{
+		iprot: inputProtocol,
+		oprot: outputProtocol,
+	}
+}
+
+func (p *TStandardClient) Send(ctx context.Context, oprot TProtocol, seqId int32, method string, args TStruct) error {
+	// Set headers from context object on THeaderProtocol
+	if headerProt, ok := oprot.(*THeaderProtocol); ok {
+		headerProt.ClearWriteHeaders()
+		for _, key := range GetWriteHeaderList(ctx) {
+			if value, ok := GetHeader(ctx, key); ok {
+				headerProt.SetWriteHeader(key, value)
+			}
+		}
+	}
+
+	if err := oprot.WriteMessageBegin(ctx, method, CALL, seqId); err != nil {
+		return err
+	}
+	if err := args.Write(ctx, oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteMessageEnd(ctx); err != nil {
+		return err
+	}
+	return oprot.Flush(ctx)
+}
+
+func (p *TStandardClient) Recv(ctx context.Context, iprot TProtocol, seqId int32, method string, result TStruct) error {
+	rMethod, rTypeId, rSeqId, err := iprot.ReadMessageBegin(ctx)
+	if err != nil {
+		return err
+	}
+
+	if method != rMethod {
+		return NewTApplicationException(WRONG_METHOD_NAME, fmt.Sprintf("%s: wrong method name", method))
+	} else if seqId != rSeqId {
+		return NewTApplicationException(BAD_SEQUENCE_ID, fmt.Sprintf("%s: out of order sequence response", method))
+	} else if rTypeId == EXCEPTION {
+		var exception tApplicationException
+		if err := exception.Read(ctx, iprot); err != nil {
+			return err
+		}
+
+		if err := iprot.ReadMessageEnd(ctx); err != nil {
+			return err
+		}
+
+		return &exception
+	} else if rTypeId != REPLY {
+		return NewTApplicationException(INVALID_MESSAGE_TYPE_EXCEPTION, fmt.Sprintf("%s: invalid message type", method))
+	}
+
+	if err := result.Read(ctx, iprot); err != nil {
+		return err
+	}
+
+	return iprot.ReadMessageEnd(ctx)
+}
+
+func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) {
+	p.seqId++
+	seqId := p.seqId
+
+	if err := p.Send(ctx, p.oprot, seqId, method, args); err != nil {
+		return ResponseMeta{}, err
+	}
+
+	// method is oneway
+	if result == nil {
+		return ResponseMeta{}, nil
+	}
+
+	err := p.Recv(ctx, p.iprot, seqId, method, result)
+	var headers THeaderMap
+	if hp, ok := p.iprot.(*THeaderProtocol); ok {
+		headers = hp.transport.readHeaders
+	}
+	return ResponseMeta{
+		Headers: headers,
+	}, err
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go
index b9299f2..a49225d 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go
@@ -20,7 +20,9 @@
 package thrift
 
 import (
+	"context"
 	"encoding/binary"
+	"errors"
 	"fmt"
 	"io"
 	"math"
@@ -73,20 +75,37 @@
 	}
 }
 
-type TCompactProtocolFactory struct{}
+type TCompactProtocolFactory struct {
+	cfg *TConfiguration
+}
 
+// Deprecated: Use NewTCompactProtocolFactoryConf instead.
 func NewTCompactProtocolFactory() *TCompactProtocolFactory {
-	return &TCompactProtocolFactory{}
+	return NewTCompactProtocolFactoryConf(&TConfiguration{
+		noPropagation: true,
+	})
+}
+
+func NewTCompactProtocolFactoryConf(conf *TConfiguration) *TCompactProtocolFactory {
+	return &TCompactProtocolFactory{
+		cfg: conf,
+	}
 }
 
 func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol {
-	return NewTCompactProtocol(trans)
+	return NewTCompactProtocolConf(trans, p.cfg)
+}
+
+func (p *TCompactProtocolFactory) SetTConfiguration(conf *TConfiguration) {
+	p.cfg = conf
 }
 
 type TCompactProtocol struct {
 	trans         TRichTransport
 	origTransport TTransport
 
+	cfg *TConfiguration
+
 	// Used to keep track of the last field for the current and previous structs,
 	// so we can do the delta stuff.
 	lastField   []int
@@ -105,9 +124,19 @@
 	buffer             [64]byte
 }
 
-// Create a TCompactProtocol given a TTransport
+// Deprecated: Use NewTCompactProtocolConf instead.
 func NewTCompactProtocol(trans TTransport) *TCompactProtocol {
-	p := &TCompactProtocol{origTransport: trans, lastField: []int{}}
+	return NewTCompactProtocolConf(trans, &TConfiguration{
+		noPropagation: true,
+	})
+}
+
+func NewTCompactProtocolConf(trans TTransport, conf *TConfiguration) *TCompactProtocol {
+	PropagateTConfiguration(trans, conf)
+	p := &TCompactProtocol{
+		origTransport: trans,
+		cfg:           conf,
+	}
 	if et, ok := trans.(TRichTransport); ok {
 		p.trans = et
 	} else {
@@ -115,7 +144,6 @@
 	}
 
 	return p
-
 }
 
 //
@@ -124,7 +152,7 @@
 
 // Write a message header to the wire. Compact Protocol messages contain the
 // protocol version so we can migrate forwards in the future if need be.
-func (p *TCompactProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error {
+func (p *TCompactProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error {
 	err := p.writeByteDirect(COMPACT_PROTOCOL_ID)
 	if err != nil {
 		return NewTProtocolException(err)
@@ -137,17 +165,17 @@
 	if err != nil {
 		return NewTProtocolException(err)
 	}
-	e := p.WriteString(name)
+	e := p.WriteString(ctx, name)
 	return e
 
 }
 
-func (p *TCompactProtocol) WriteMessageEnd() error { return nil }
+func (p *TCompactProtocol) WriteMessageEnd(ctx context.Context) error { return nil }
 
 // Write a struct begin. This doesn't actually put anything on the wire. We
 // use it as an opportunity to put special placeholder markers on the field
 // stack so we can get the field id deltas correct.
-func (p *TCompactProtocol) WriteStructBegin(name string) error {
+func (p *TCompactProtocol) WriteStructBegin(ctx context.Context, name string) error {
 	p.lastField = append(p.lastField, p.lastFieldId)
 	p.lastFieldId = 0
 	return nil
@@ -156,26 +184,29 @@
 // Write a struct end. This doesn't actually put anything on the wire. We use
 // this as an opportunity to pop the last field from the current struct off
 // of the field stack.
-func (p *TCompactProtocol) WriteStructEnd() error {
+func (p *TCompactProtocol) WriteStructEnd(ctx context.Context) error {
+	if len(p.lastField) <= 0 {
+		return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("WriteStructEnd called without matching WriteStructBegin call before"))
+	}
 	p.lastFieldId = p.lastField[len(p.lastField)-1]
 	p.lastField = p.lastField[:len(p.lastField)-1]
 	return nil
 }
 
-func (p *TCompactProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
+func (p *TCompactProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
 	if typeId == BOOL {
 		// we want to possibly include the value, so we'll wait.
 		p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true
 		return nil
 	}
-	_, err := p.writeFieldBeginInternal(name, typeId, id, 0xFF)
+	_, err := p.writeFieldBeginInternal(ctx, name, typeId, id, 0xFF)
 	return NewTProtocolException(err)
 }
 
 // The workhorse of writeFieldBegin. It has the option of doing a
 // 'type override' of the type header. This is used specifically in the
 // boolean field case.
-func (p *TCompactProtocol) writeFieldBeginInternal(name string, typeId TType, id int16, typeOverride byte) (int, error) {
+func (p *TCompactProtocol) writeFieldBeginInternal(ctx context.Context, name string, typeId TType, id int16, typeOverride byte) (int, error) {
 	// short lastField = lastField_.pop();
 
 	// if there's a type override, use that.
@@ -200,7 +231,7 @@
 		if err != nil {
 			return 0, err
 		}
-		err = p.WriteI16(id)
+		err = p.WriteI16(ctx, id)
 		written = 1 + 2
 		if err != nil {
 			return 0, err
@@ -208,18 +239,17 @@
 	}
 
 	p.lastFieldId = fieldId
-	// p.lastField.Push(field.id);
 	return written, nil
 }
 
-func (p *TCompactProtocol) WriteFieldEnd() error { return nil }
+func (p *TCompactProtocol) WriteFieldEnd(ctx context.Context) error { return nil }
 
-func (p *TCompactProtocol) WriteFieldStop() error {
+func (p *TCompactProtocol) WriteFieldStop(ctx context.Context) error {
 	err := p.writeByteDirect(STOP)
 	return NewTProtocolException(err)
 }
 
-func (p *TCompactProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
+func (p *TCompactProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
 	if size == 0 {
 		err := p.writeByteDirect(0)
 		return NewTProtocolException(err)
@@ -232,32 +262,32 @@
 	return NewTProtocolException(err)
 }
 
-func (p *TCompactProtocol) WriteMapEnd() error { return nil }
+func (p *TCompactProtocol) WriteMapEnd(ctx context.Context) error { return nil }
 
 // Write a list header.
-func (p *TCompactProtocol) WriteListBegin(elemType TType, size int) error {
+func (p *TCompactProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
 	_, err := p.writeCollectionBegin(elemType, size)
 	return NewTProtocolException(err)
 }
 
-func (p *TCompactProtocol) WriteListEnd() error { return nil }
+func (p *TCompactProtocol) WriteListEnd(ctx context.Context) error { return nil }
 
 // Write a set header.
-func (p *TCompactProtocol) WriteSetBegin(elemType TType, size int) error {
+func (p *TCompactProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
 	_, err := p.writeCollectionBegin(elemType, size)
 	return NewTProtocolException(err)
 }
 
-func (p *TCompactProtocol) WriteSetEnd() error { return nil }
+func (p *TCompactProtocol) WriteSetEnd(ctx context.Context) error { return nil }
 
-func (p *TCompactProtocol) WriteBool(value bool) error {
+func (p *TCompactProtocol) WriteBool(ctx context.Context, value bool) error {
 	v := byte(COMPACT_BOOLEAN_FALSE)
 	if value {
 		v = byte(COMPACT_BOOLEAN_TRUE)
 	}
 	if p.booleanFieldPending {
 		// we haven't written the field header yet
-		_, err := p.writeFieldBeginInternal(p.booleanFieldName, BOOL, p.booleanFieldId, v)
+		_, err := p.writeFieldBeginInternal(ctx, p.booleanFieldName, BOOL, p.booleanFieldId, v)
 		p.booleanFieldPending = false
 		return NewTProtocolException(err)
 	}
@@ -267,31 +297,31 @@
 }
 
 // Write a byte. Nothing to see here!
-func (p *TCompactProtocol) WriteByte(value int8) error {
+func (p *TCompactProtocol) WriteByte(ctx context.Context, value int8) error {
 	err := p.writeByteDirect(byte(value))
 	return NewTProtocolException(err)
 }
 
 // Write an I16 as a zigzag varint.
-func (p *TCompactProtocol) WriteI16(value int16) error {
+func (p *TCompactProtocol) WriteI16(ctx context.Context, value int16) error {
 	_, err := p.writeVarint32(p.int32ToZigzag(int32(value)))
 	return NewTProtocolException(err)
 }
 
 // Write an i32 as a zigzag varint.
-func (p *TCompactProtocol) WriteI32(value int32) error {
+func (p *TCompactProtocol) WriteI32(ctx context.Context, value int32) error {
 	_, err := p.writeVarint32(p.int32ToZigzag(value))
 	return NewTProtocolException(err)
 }
 
 // Write an i64 as a zigzag varint.
-func (p *TCompactProtocol) WriteI64(value int64) error {
+func (p *TCompactProtocol) WriteI64(ctx context.Context, value int64) error {
 	_, err := p.writeVarint64(p.int64ToZigzag(value))
 	return NewTProtocolException(err)
 }
 
 // Write a double to the wire as 8 bytes.
-func (p *TCompactProtocol) WriteDouble(value float64) error {
+func (p *TCompactProtocol) WriteDouble(ctx context.Context, value float64) error {
 	buf := p.buffer[0:8]
 	binary.LittleEndian.PutUint64(buf, math.Float64bits(value))
 	_, err := p.trans.Write(buf)
@@ -299,7 +329,7 @@
 }
 
 // Write a string to the wire with a varint size preceding.
-func (p *TCompactProtocol) WriteString(value string) error {
+func (p *TCompactProtocol) WriteString(ctx context.Context, value string) error {
 	_, e := p.writeVarint32(int32(len(value)))
 	if e != nil {
 		return NewTProtocolException(e)
@@ -311,7 +341,7 @@
 }
 
 // Write a byte array, using a varint for the size.
-func (p *TCompactProtocol) WriteBinary(bin []byte) error {
+func (p *TCompactProtocol) WriteBinary(ctx context.Context, bin []byte) error {
 	_, e := p.writeVarint32(int32(len(bin)))
 	if e != nil {
 		return NewTProtocolException(e)
@@ -328,9 +358,20 @@
 //
 
 // Read a message header.
-func (p *TCompactProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
+func (p *TCompactProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
+	var protocolId byte
 
-	protocolId, err := p.readByteDirect()
+	_, deadlineSet := ctx.Deadline()
+	for {
+		protocolId, err = p.readByteDirect()
+		if deadlineSet && isTimeoutError(err) && ctx.Err() == nil {
+			// keep retrying I/O timeout errors since we still have
+			// time left
+			continue
+		}
+		// For anything else, don't retry
+		break
+	}
 	if err != nil {
 		return
 	}
@@ -357,15 +398,15 @@
 		err = NewTProtocolException(e)
 		return
 	}
-	name, err = p.ReadString()
+	name, err = p.ReadString(ctx)
 	return
 }
 
-func (p *TCompactProtocol) ReadMessageEnd() error { return nil }
+func (p *TCompactProtocol) ReadMessageEnd(ctx context.Context) error { return nil }
 
 // Read a struct begin. There's nothing on the wire for this, but it is our
 // opportunity to push a new struct begin marker onto the field stack.
-func (p *TCompactProtocol) ReadStructBegin() (name string, err error) {
+func (p *TCompactProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
 	p.lastField = append(p.lastField, p.lastFieldId)
 	p.lastFieldId = 0
 	return
@@ -373,15 +414,18 @@
 
 // Doesn't actually consume any wire data, just removes the last field for
 // this struct from the field stack.
-func (p *TCompactProtocol) ReadStructEnd() error {
+func (p *TCompactProtocol) ReadStructEnd(ctx context.Context) error {
 	// consume the last field we read off the wire.
+	if len(p.lastField) <= 0 {
+		return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("ReadStructEnd called without matching ReadStructBegin call before"))
+	}
 	p.lastFieldId = p.lastField[len(p.lastField)-1]
 	p.lastField = p.lastField[:len(p.lastField)-1]
 	return nil
 }
 
 // Read a field header off the wire.
-func (p *TCompactProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) {
+func (p *TCompactProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) {
 	t, err := p.readByteDirect()
 	if err != nil {
 		return
@@ -396,7 +440,7 @@
 	modifier := int16((t & 0xf0) >> 4)
 	if modifier == 0 {
 		// not a delta. look ahead for the zigzag varint field id.
-		id, err = p.ReadI16()
+		id, err = p.ReadI16(ctx)
 		if err != nil {
 			return
 		}
@@ -422,12 +466,12 @@
 	return
 }
 
-func (p *TCompactProtocol) ReadFieldEnd() error { return nil }
+func (p *TCompactProtocol) ReadFieldEnd(ctx context.Context) error { return nil }
 
 // Read a map header off the wire. If the size is zero, skip reading the key
 // and value type. This means that 0-length maps will yield TMaps without the
 // "correct" types.
-func (p *TCompactProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) {
+func (p *TCompactProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) {
 	size32, e := p.readVarint32()
 	if e != nil {
 		err = NewTProtocolException(e)
@@ -451,13 +495,13 @@
 	return
 }
 
-func (p *TCompactProtocol) ReadMapEnd() error { return nil }
+func (p *TCompactProtocol) ReadMapEnd(ctx context.Context) error { return nil }
 
 // Read a list header off the wire. If the list size is 0-14, the size will
 // be packed into the element type header. If it's a longer list, the 4 MSB
 // of the element type header will be 0xF, and a varint will follow with the
 // true size.
-func (p *TCompactProtocol) ReadListBegin() (elemType TType, size int, err error) {
+func (p *TCompactProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
 	size_and_type, err := p.readByteDirect()
 	if err != nil {
 		return
@@ -483,22 +527,22 @@
 	return
 }
 
-func (p *TCompactProtocol) ReadListEnd() error { return nil }
+func (p *TCompactProtocol) ReadListEnd(ctx context.Context) error { return nil }
 
 // Read a set header off the wire. If the set size is 0-14, the size will
 // be packed into the element type header. If it's a longer set, the 4 MSB
 // of the element type header will be 0xF, and a varint will follow with the
 // true size.
-func (p *TCompactProtocol) ReadSetBegin() (elemType TType, size int, err error) {
-	return p.ReadListBegin()
+func (p *TCompactProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
+	return p.ReadListBegin(ctx)
 }
 
-func (p *TCompactProtocol) ReadSetEnd() error { return nil }
+func (p *TCompactProtocol) ReadSetEnd(ctx context.Context) error { return nil }
 
 // Read a boolean off the wire. If this is a boolean field, the value should
 // already have been read during readFieldBegin, so we'll just consume the
 // pre-stored value. Otherwise, read a byte.
-func (p *TCompactProtocol) ReadBool() (value bool, err error) {
+func (p *TCompactProtocol) ReadBool(ctx context.Context) (value bool, err error) {
 	if p.boolValueIsNotNull {
 		p.boolValueIsNotNull = false
 		return p.boolValue, nil
@@ -508,7 +552,7 @@
 }
 
 // Read a single byte off the wire. Nothing interesting here.
-func (p *TCompactProtocol) ReadByte() (int8, error) {
+func (p *TCompactProtocol) ReadByte(ctx context.Context) (int8, error) {
 	v, err := p.readByteDirect()
 	if err != nil {
 		return 0, NewTProtocolException(err)
@@ -517,13 +561,13 @@
 }
 
 // Read an i16 from the wire as a zigzag varint.
-func (p *TCompactProtocol) ReadI16() (value int16, err error) {
-	v, err := p.ReadI32()
+func (p *TCompactProtocol) ReadI16(ctx context.Context) (value int16, err error) {
+	v, err := p.ReadI32(ctx)
 	return int16(v), err
 }
 
 // Read an i32 from the wire as a zigzag varint.
-func (p *TCompactProtocol) ReadI32() (value int32, err error) {
+func (p *TCompactProtocol) ReadI32(ctx context.Context) (value int32, err error) {
 	v, e := p.readVarint32()
 	if e != nil {
 		return 0, NewTProtocolException(e)
@@ -533,7 +577,7 @@
 }
 
 // Read an i64 from the wire as a zigzag varint.
-func (p *TCompactProtocol) ReadI64() (value int64, err error) {
+func (p *TCompactProtocol) ReadI64(ctx context.Context) (value int64, err error) {
 	v, e := p.readVarint64()
 	if e != nil {
 		return 0, NewTProtocolException(e)
@@ -543,7 +587,7 @@
 }
 
 // No magic here - just read a double off the wire.
-func (p *TCompactProtocol) ReadDouble() (value float64, err error) {
+func (p *TCompactProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
 	longBits := p.buffer[0:8]
 	_, e := io.ReadFull(p.trans, longBits)
 	if e != nil {
@@ -553,58 +597,53 @@
 }
 
 // Reads a []byte (via readBinary), and then UTF-8 decodes it.
-func (p *TCompactProtocol) ReadString() (value string, err error) {
+func (p *TCompactProtocol) ReadString(ctx context.Context) (value string, err error) {
 	length, e := p.readVarint32()
 	if e != nil {
 		return "", NewTProtocolException(e)
 	}
-	if length < 0 {
-		return "", invalidDataLength
+	err = checkSizeForProtocol(length, p.cfg)
+	if err != nil {
+		return
 	}
-	if uint64(length) > p.trans.RemainingBytes() {
-		return "", invalidDataLength
-	}
-
 	if length == 0 {
 		return "", nil
 	}
-	var buf []byte
-	if length <= int32(len(p.buffer)) {
-		buf = p.buffer[0:length]
-	} else {
-		buf = make([]byte, length)
+	if length < int32(len(p.buffer)) {
+		// Avoid allocation on small reads
+		buf := p.buffer[:length]
+		read, e := io.ReadFull(p.trans, buf)
+		return string(buf[:read]), NewTProtocolException(e)
 	}
-	_, e = io.ReadFull(p.trans, buf)
+
+	buf, e := safeReadBytes(length, p.trans)
 	return string(buf), NewTProtocolException(e)
 }
 
 // Read a []byte from the wire.
-func (p *TCompactProtocol) ReadBinary() (value []byte, err error) {
+func (p *TCompactProtocol) ReadBinary(ctx context.Context) (value []byte, err error) {
 	length, e := p.readVarint32()
 	if e != nil {
 		return nil, NewTProtocolException(e)
 	}
+	err = checkSizeForProtocol(length, p.cfg)
+	if err != nil {
+		return
+	}
 	if length == 0 {
 		return []byte{}, nil
 	}
-	if length < 0 {
-		return nil, invalidDataLength
-	}
-	if uint64(length) > p.trans.RemainingBytes() {
-		return nil, invalidDataLength
-	}
 
-	buf := make([]byte, length)
-	_, e = io.ReadFull(p.trans, buf)
+	buf, e := safeReadBytes(length, p.trans)
 	return buf, NewTProtocolException(e)
 }
 
-func (p *TCompactProtocol) Flush() (err error) {
-	return NewTProtocolException(p.trans.Flush())
+func (p *TCompactProtocol) Flush(ctx context.Context) (err error) {
+	return NewTProtocolException(p.trans.Flush(ctx))
 }
 
-func (p *TCompactProtocol) Skip(fieldType TType) (err error) {
-	return SkipDefaultDepth(p, fieldType)
+func (p *TCompactProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
+	return SkipDefaultDepth(ctx, p, fieldType)
 }
 
 func (p *TCompactProtocol) Transport() TTransport {
@@ -806,10 +845,21 @@
 	case COMPACT_STRUCT:
 		return STRUCT, nil
 	}
-	return STOP, TException(fmt.Errorf("don't know what type: %d", t&0x0f))
+	return STOP, NewTProtocolException(fmt.Errorf("don't know what type: %v", t&0x0f))
 }
 
 // Given a TType value, find the appropriate TCompactProtocol.Types constant.
 func (p *TCompactProtocol) getCompactType(t TType) tCompactType {
 	return ttypeToCompactType[t]
 }
+
+func (p *TCompactProtocol) SetTConfiguration(conf *TConfiguration) {
+	PropagateTConfiguration(p.trans, conf)
+	PropagateTConfiguration(p.origTransport, conf)
+	p.cfg = conf
+}
+
+var (
+	_ TConfigurationSetter = (*TCompactProtocolFactory)(nil)
+	_ TConfigurationSetter = (*TCompactProtocol)(nil)
+)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/configuration.go b/vendor/github.com/uber/jaeger-client-go/thrift/configuration.go
new file mode 100644
index 0000000..454d9f3
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/configuration.go
@@ -0,0 +1,378 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"crypto/tls"
+	"fmt"
+	"time"
+)
+
+// Default TConfiguration values.
+const (
+	DEFAULT_MAX_MESSAGE_SIZE = 100 * 1024 * 1024
+	DEFAULT_MAX_FRAME_SIZE   = 16384000
+
+	DEFAULT_TBINARY_STRICT_READ  = false
+	DEFAULT_TBINARY_STRICT_WRITE = true
+
+	DEFAULT_CONNECT_TIMEOUT = 0
+	DEFAULT_SOCKET_TIMEOUT  = 0
+)
+
+// TConfiguration defines some configurations shared between TTransport,
+// TProtocol, TTransportFactory, TProtocolFactory, and other implementations.
+//
+// When constructing TConfiguration, you only need to specify the non-default
+// fields. All zero values have sane default values.
+//
+// Not all configurations defined are applicable to all implementations.
+// Implementations are free to ignore the configurations not applicable to them.
+//
+// All functions attached to this type are nil-safe.
+//
+// See [1] for spec.
+//
+// NOTE: When using TConfiguration, fill in all the configurations you want to
+// set across the stack, not only the ones you want to set in the immediate
+// TTransport/TProtocol.
+//
+// For example, say you want to migrate this old code into using TConfiguration:
+//
+//     sccket := thrift.NewTSocketTimeout("host:port", time.Second)
+//     transFactory := thrift.NewTFramedTransportFactoryMaxLength(
+//         thrift.NewTTransportFactory(),
+//         1024 * 1024 * 256,
+//     )
+//     protoFactory := thrift.NewTBinaryProtocolFactory(true, true)
+//
+// This is the wrong way to do it because in the end the TConfiguration used by
+// socket and transFactory will be overwritten by the one used by protoFactory
+// because of TConfiguration propagation:
+//
+//     // bad example, DO NOT USE
+//     sccket := thrift.NewTSocketConf("host:port", &thrift.TConfiguration{
+//         ConnectTimeout: time.Second,
+//         SocketTimeout:  time.Second,
+//     })
+//     transFactory := thrift.NewTFramedTransportFactoryConf(
+//         thrift.NewTTransportFactory(),
+//         &thrift.TConfiguration{
+//             MaxFrameSize: 1024 * 1024 * 256,
+//         },
+//     )
+//     protoFactory := thrift.NewTBinaryProtocolFactoryConf(&thrift.TConfiguration{
+//         TBinaryStrictRead:  thrift.BoolPtr(true),
+//         TBinaryStrictWrite: thrift.BoolPtr(true),
+//     })
+//
+// This is the correct way to do it:
+//
+//     conf := &thrift.TConfiguration{
+//         ConnectTimeout: time.Second,
+//         SocketTimeout:  time.Second,
+//
+//         MaxFrameSize: 1024 * 1024 * 256,
+//
+//         TBinaryStrictRead:  thrift.BoolPtr(true),
+//         TBinaryStrictWrite: thrift.BoolPtr(true),
+//     }
+//     sccket := thrift.NewTSocketConf("host:port", conf)
+//     transFactory := thrift.NewTFramedTransportFactoryConf(thrift.NewTTransportFactory(), conf)
+//     protoFactory := thrift.NewTBinaryProtocolFactoryConf(conf)
+//
+// [1]: https://github.com/apache/thrift/blob/master/doc/specs/thrift-tconfiguration.md
+type TConfiguration struct {
+	// If <= 0, DEFAULT_MAX_MESSAGE_SIZE will be used instead.
+	MaxMessageSize int32
+
+	// If <= 0, DEFAULT_MAX_FRAME_SIZE will be used instead.
+	//
+	// Also if MaxMessageSize < MaxFrameSize,
+	// MaxMessageSize will be used instead.
+	MaxFrameSize int32
+
+	// Connect and socket timeouts to be used by TSocket and TSSLSocket.
+	//
+	// 0 means no timeout.
+	//
+	// If <0, DEFAULT_CONNECT_TIMEOUT and DEFAULT_SOCKET_TIMEOUT will be
+	// used.
+	ConnectTimeout time.Duration
+	SocketTimeout  time.Duration
+
+	// TLS config to be used by TSSLSocket.
+	TLSConfig *tls.Config
+
+	// Strict read/write configurations for TBinaryProtocol.
+	//
+	// BoolPtr helper function is available to use literal values.
+	TBinaryStrictRead  *bool
+	TBinaryStrictWrite *bool
+
+	// The wrapped protocol id to be used in THeader transport/protocol.
+	//
+	// THeaderProtocolIDPtr and THeaderProtocolIDPtrMust helper functions
+	// are provided to help filling this value.
+	THeaderProtocolID *THeaderProtocolID
+
+	// Used internally by deprecated constructors, to avoid overriding
+	// underlying TTransport/TProtocol's cfg by accidental propagations.
+	//
+	// For external users this is always false.
+	noPropagation bool
+}
+
+// GetMaxMessageSize returns the max message size an implementation should
+// follow.
+//
+// It's nil-safe. DEFAULT_MAX_MESSAGE_SIZE will be returned if tc is nil.
+func (tc *TConfiguration) GetMaxMessageSize() int32 {
+	if tc == nil || tc.MaxMessageSize <= 0 {
+		return DEFAULT_MAX_MESSAGE_SIZE
+	}
+	return tc.MaxMessageSize
+}
+
+// GetMaxFrameSize returns the max frame size an implementation should follow.
+//
+// It's nil-safe. DEFAULT_MAX_FRAME_SIZE will be returned if tc is nil.
+//
+// If the configured max message size is smaller than the configured max frame
+// size, the smaller one will be returned instead.
+func (tc *TConfiguration) GetMaxFrameSize() int32 {
+	if tc == nil {
+		return DEFAULT_MAX_FRAME_SIZE
+	}
+	maxFrameSize := tc.MaxFrameSize
+	if maxFrameSize <= 0 {
+		maxFrameSize = DEFAULT_MAX_FRAME_SIZE
+	}
+	if maxMessageSize := tc.GetMaxMessageSize(); maxMessageSize < maxFrameSize {
+		return maxMessageSize
+	}
+	return maxFrameSize
+}
+
+// GetConnectTimeout returns the connect timeout should be used by TSocket and
+// TSSLSocket.
+//
+// It's nil-safe. If tc is nil, DEFAULT_CONNECT_TIMEOUT will be returned instead.
+func (tc *TConfiguration) GetConnectTimeout() time.Duration {
+	if tc == nil || tc.ConnectTimeout < 0 {
+		return DEFAULT_CONNECT_TIMEOUT
+	}
+	return tc.ConnectTimeout
+}
+
+// GetSocketTimeout returns the socket timeout should be used by TSocket and
+// TSSLSocket.
+//
+// It's nil-safe. If tc is nil, DEFAULT_SOCKET_TIMEOUT will be returned instead.
+func (tc *TConfiguration) GetSocketTimeout() time.Duration {
+	if tc == nil || tc.SocketTimeout < 0 {
+		return DEFAULT_SOCKET_TIMEOUT
+	}
+	return tc.SocketTimeout
+}
+
+// GetTLSConfig returns the tls config should be used by TSSLSocket.
+//
+// It's nil-safe. If tc is nil, nil will be returned instead.
+func (tc *TConfiguration) GetTLSConfig() *tls.Config {
+	if tc == nil {
+		return nil
+	}
+	return tc.TLSConfig
+}
+
+// GetTBinaryStrictRead returns the strict read configuration TBinaryProtocol
+// should follow.
+//
+// It's nil-safe. DEFAULT_TBINARY_STRICT_READ will be returned if either tc or
+// tc.TBinaryStrictRead is nil.
+func (tc *TConfiguration) GetTBinaryStrictRead() bool {
+	if tc == nil || tc.TBinaryStrictRead == nil {
+		return DEFAULT_TBINARY_STRICT_READ
+	}
+	return *tc.TBinaryStrictRead
+}
+
+// GetTBinaryStrictWrite returns the strict read configuration TBinaryProtocol
+// should follow.
+//
+// It's nil-safe. DEFAULT_TBINARY_STRICT_WRITE will be returned if either tc or
+// tc.TBinaryStrictWrite is nil.
+func (tc *TConfiguration) GetTBinaryStrictWrite() bool {
+	if tc == nil || tc.TBinaryStrictWrite == nil {
+		return DEFAULT_TBINARY_STRICT_WRITE
+	}
+	return *tc.TBinaryStrictWrite
+}
+
+// GetTHeaderProtocolID returns the THeaderProtocolID should be used by
+// THeaderProtocol clients (for servers, they always use the same one as the
+// client instead).
+//
+// It's nil-safe. If either tc or tc.THeaderProtocolID is nil,
+// THeaderProtocolDefault will be returned instead.
+// THeaderProtocolDefault will also be returned if configured value is invalid.
+func (tc *TConfiguration) GetTHeaderProtocolID() THeaderProtocolID {
+	if tc == nil || tc.THeaderProtocolID == nil {
+		return THeaderProtocolDefault
+	}
+	protoID := *tc.THeaderProtocolID
+	if err := protoID.Validate(); err != nil {
+		return THeaderProtocolDefault
+	}
+	return protoID
+}
+
+// THeaderProtocolIDPtr validates and returns the pointer to id.
+//
+// If id is not a valid THeaderProtocolID, a pointer to THeaderProtocolDefault
+// and the validation error will be returned.
+func THeaderProtocolIDPtr(id THeaderProtocolID) (*THeaderProtocolID, error) {
+	err := id.Validate()
+	if err != nil {
+		id = THeaderProtocolDefault
+	}
+	return &id, err
+}
+
+// THeaderProtocolIDPtrMust validates and returns the pointer to id.
+//
+// It's similar to THeaderProtocolIDPtr, but it panics on validation errors
+// instead of returning them.
+func THeaderProtocolIDPtrMust(id THeaderProtocolID) *THeaderProtocolID {
+	ptr, err := THeaderProtocolIDPtr(id)
+	if err != nil {
+		panic(err)
+	}
+	return ptr
+}
+
+// TConfigurationSetter is an optional interface TProtocol, TTransport,
+// TProtocolFactory, TTransportFactory, and other implementations can implement.
+//
+// It's intended to be called during intializations.
+// The behavior of calling SetTConfiguration on a TTransport/TProtocol in the
+// middle of a message is undefined:
+// It may or may not change the behavior of the current processing message,
+// and it may even cause the current message to fail.
+//
+// Note for implementations: SetTConfiguration might be called multiple times
+// with the same value in quick successions due to the implementation of the
+// propagation. Implementations should make SetTConfiguration as simple as
+// possible (usually just overwrite the stored configuration and propagate it to
+// the wrapped TTransports/TProtocols).
+type TConfigurationSetter interface {
+	SetTConfiguration(*TConfiguration)
+}
+
+// PropagateTConfiguration propagates cfg to impl if impl implements
+// TConfigurationSetter and cfg is non-nil, otherwise it does nothing.
+//
+// NOTE: nil cfg is not propagated. If you want to propagate a TConfiguration
+// with everything being default value, use &TConfiguration{} explicitly instead.
+func PropagateTConfiguration(impl interface{}, cfg *TConfiguration) {
+	if cfg == nil || cfg.noPropagation {
+		return
+	}
+
+	if setter, ok := impl.(TConfigurationSetter); ok {
+		setter.SetTConfiguration(cfg)
+	}
+}
+
+func checkSizeForProtocol(size int32, cfg *TConfiguration) error {
+	if size < 0 {
+		return NewTProtocolExceptionWithType(
+			NEGATIVE_SIZE,
+			fmt.Errorf("negative size: %d", size),
+		)
+	}
+	if size > cfg.GetMaxMessageSize() {
+		return NewTProtocolExceptionWithType(
+			SIZE_LIMIT,
+			fmt.Errorf("size exceeded max allowed: %d", size),
+		)
+	}
+	return nil
+}
+
+type tTransportFactoryConf struct {
+	delegate TTransportFactory
+	cfg      *TConfiguration
+}
+
+func (f *tTransportFactoryConf) GetTransport(orig TTransport) (TTransport, error) {
+	trans, err := f.delegate.GetTransport(orig)
+	if err == nil {
+		PropagateTConfiguration(orig, f.cfg)
+		PropagateTConfiguration(trans, f.cfg)
+	}
+	return trans, err
+}
+
+func (f *tTransportFactoryConf) SetTConfiguration(cfg *TConfiguration) {
+	PropagateTConfiguration(f.delegate, f.cfg)
+	f.cfg = cfg
+}
+
+// TTransportFactoryConf wraps a TTransportFactory to propagate
+// TConfiguration on the factory's GetTransport calls.
+func TTransportFactoryConf(delegate TTransportFactory, conf *TConfiguration) TTransportFactory {
+	return &tTransportFactoryConf{
+		delegate: delegate,
+		cfg:      conf,
+	}
+}
+
+type tProtocolFactoryConf struct {
+	delegate TProtocolFactory
+	cfg      *TConfiguration
+}
+
+func (f *tProtocolFactoryConf) GetProtocol(trans TTransport) TProtocol {
+	proto := f.delegate.GetProtocol(trans)
+	PropagateTConfiguration(trans, f.cfg)
+	PropagateTConfiguration(proto, f.cfg)
+	return proto
+}
+
+func (f *tProtocolFactoryConf) SetTConfiguration(cfg *TConfiguration) {
+	PropagateTConfiguration(f.delegate, f.cfg)
+	f.cfg = cfg
+}
+
+// TProtocolFactoryConf wraps a TProtocolFactory to propagate
+// TConfiguration on the factory's GetProtocol calls.
+func TProtocolFactoryConf(delegate TProtocolFactory, conf *TConfiguration) TProtocolFactory {
+	return &tProtocolFactoryConf{
+		delegate: delegate,
+		cfg:      conf,
+	}
+}
+
+var (
+	_ TConfigurationSetter = (*tTransportFactoryConf)(nil)
+	_ TConfigurationSetter = (*tProtocolFactoryConf)(nil)
+)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/processor.go b/vendor/github.com/uber/jaeger-client-go/thrift/context.go
similarity index 74%
rename from vendor/github.com/uber/jaeger-client-go/thrift/processor.go
rename to vendor/github.com/uber/jaeger-client-go/thrift/context.go
index ca0d3fa..d15c1bc 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/processor.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/context.go
@@ -19,12 +19,6 @@
 
 package thrift
 
-// A processor is a generic object which operates upon an input stream and
-// writes to some output stream.
-type TProcessor interface {
-	Process(in, out TProtocol) (bool, TException)
-}
+import "context"
 
-type TProcessorFunction interface {
-	Process(seqId int32, in, out TProtocol) (bool, TException)
-}
+var defaultCtx = context.Background()
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/exception.go
index ea8d6f6..53bf862 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/exception.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/exception.go
@@ -26,19 +26,91 @@
 // Generic Thrift exception
 type TException interface {
 	error
+
+	TExceptionType() TExceptionType
 }
 
 // Prepends additional information to an error without losing the Thrift exception interface
 func PrependError(prepend string, err error) error {
-	if t, ok := err.(TTransportException); ok {
-		return NewTTransportException(t.TypeId(), prepend+t.Error())
-	}
-	if t, ok := err.(TProtocolException); ok {
-		return NewTProtocolExceptionWithType(t.TypeId(), errors.New(prepend+err.Error()))
-	}
-	if t, ok := err.(TApplicationException); ok {
-		return NewTApplicationException(t.TypeId(), prepend+t.Error())
+	msg := prepend + err.Error()
+
+	var te TException
+	if errors.As(err, &te) {
+		switch te.TExceptionType() {
+		case TExceptionTypeTransport:
+			if t, ok := err.(TTransportException); ok {
+				return prependTTransportException(prepend, t)
+			}
+		case TExceptionTypeProtocol:
+			if t, ok := err.(TProtocolException); ok {
+				return prependTProtocolException(prepend, t)
+			}
+		case TExceptionTypeApplication:
+			var t TApplicationException
+			if errors.As(err, &t) {
+				return NewTApplicationException(t.TypeId(), msg)
+			}
+		}
+
+		return wrappedTException{
+			err:            err,
+			msg:            msg,
+			tExceptionType: te.TExceptionType(),
+		}
 	}
 
-	return errors.New(prepend + err.Error())
+	return errors.New(msg)
 }
+
+// TExceptionType is an enum type to categorize different "subclasses" of TExceptions.
+type TExceptionType byte
+
+// TExceptionType values
+const (
+	TExceptionTypeUnknown     TExceptionType = iota
+	TExceptionTypeCompiled                   // TExceptions defined in thrift files and generated by thrift compiler
+	TExceptionTypeApplication                // TApplicationExceptions
+	TExceptionTypeProtocol                   // TProtocolExceptions
+	TExceptionTypeTransport                  // TTransportExceptions
+)
+
+// WrapTException wraps an error into TException.
+//
+// If err is nil or already TException, it's returned as-is.
+// Otherwise it will be wraped into TException with TExceptionType() returning
+// TExceptionTypeUnknown, and Unwrap() returning the original error.
+func WrapTException(err error) TException {
+	if err == nil {
+		return nil
+	}
+
+	if te, ok := err.(TException); ok {
+		return te
+	}
+
+	return wrappedTException{
+		err:            err,
+		msg:            err.Error(),
+		tExceptionType: TExceptionTypeUnknown,
+	}
+}
+
+type wrappedTException struct {
+	err            error
+	msg            string
+	tExceptionType TExceptionType
+}
+
+func (w wrappedTException) Error() string {
+	return w.msg
+}
+
+func (w wrappedTException) TExceptionType() TExceptionType {
+	return w.tExceptionType
+}
+
+func (w wrappedTException) Unwrap() error {
+	return w.err
+}
+
+var _ TException = wrappedTException{}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go b/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go
new file mode 100644
index 0000000..ac9bd48
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"context"
+)
+
+// See https://godoc.org/context#WithValue on why do we need the unexported typedefs.
+type (
+	headerKey     string
+	headerKeyList int
+)
+
+// Values for headerKeyList.
+const (
+	headerKeyListRead headerKeyList = iota
+	headerKeyListWrite
+)
+
+// SetHeader sets a header in the context.
+func SetHeader(ctx context.Context, key, value string) context.Context {
+	return context.WithValue(
+		ctx,
+		headerKey(key),
+		value,
+	)
+}
+
+// UnsetHeader unsets a previously set header in the context.
+func UnsetHeader(ctx context.Context, key string) context.Context {
+	return context.WithValue(
+		ctx,
+		headerKey(key),
+		nil,
+	)
+}
+
+// GetHeader returns a value of the given header from the context.
+func GetHeader(ctx context.Context, key string) (value string, ok bool) {
+	if v := ctx.Value(headerKey(key)); v != nil {
+		value, ok = v.(string)
+	}
+	return
+}
+
+// SetReadHeaderList sets the key list of read THeaders in the context.
+func SetReadHeaderList(ctx context.Context, keys []string) context.Context {
+	return context.WithValue(
+		ctx,
+		headerKeyListRead,
+		keys,
+	)
+}
+
+// GetReadHeaderList returns the key list of read THeaders from the context.
+func GetReadHeaderList(ctx context.Context) []string {
+	if v := ctx.Value(headerKeyListRead); v != nil {
+		if value, ok := v.([]string); ok {
+			return value
+		}
+	}
+	return nil
+}
+
+// SetWriteHeaderList sets the key list of THeaders to write in the context.
+func SetWriteHeaderList(ctx context.Context, keys []string) context.Context {
+	return context.WithValue(
+		ctx,
+		headerKeyListWrite,
+		keys,
+	)
+}
+
+// GetWriteHeaderList returns the key list of THeaders to write from the context.
+func GetWriteHeaderList(ctx context.Context) []string {
+	if v := ctx.Value(headerKeyListWrite); v != nil {
+		if value, ok := v.([]string); ok {
+			return value
+		}
+	}
+	return nil
+}
+
+// AddReadTHeaderToContext adds the whole THeader headers into context.
+func AddReadTHeaderToContext(ctx context.Context, headers THeaderMap) context.Context {
+	keys := make([]string, 0, len(headers))
+	for key, value := range headers {
+		ctx = SetHeader(ctx, key, value)
+		keys = append(keys, key)
+	}
+	return SetReadHeaderList(ctx, keys)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/header_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/header_protocol.go
new file mode 100644
index 0000000..878041f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/header_protocol.go
@@ -0,0 +1,351 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"context"
+	"errors"
+)
+
+// THeaderProtocol is a thrift protocol that implements THeader:
+// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md
+//
+// It supports either binary or compact protocol as the wrapped protocol.
+//
+// Most of the THeader handlings are happening inside THeaderTransport.
+type THeaderProtocol struct {
+	transport *THeaderTransport
+
+	// Will be initialized on first read/write.
+	protocol TProtocol
+
+	cfg *TConfiguration
+}
+
+// Deprecated: Use NewTHeaderProtocolConf instead.
+func NewTHeaderProtocol(trans TTransport) *THeaderProtocol {
+	return newTHeaderProtocolConf(trans, &TConfiguration{
+		noPropagation: true,
+	})
+}
+
+// NewTHeaderProtocolConf creates a new THeaderProtocol from the underlying
+// transport with given TConfiguration.
+//
+// The passed in transport will be wrapped with THeaderTransport.
+//
+// Note that THeaderTransport handles frame and zlib by itself,
+// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket),
+// instead of rich transports like TZlibTransport or TFramedTransport.
+func NewTHeaderProtocolConf(trans TTransport, conf *TConfiguration) *THeaderProtocol {
+	return newTHeaderProtocolConf(trans, conf)
+}
+
+func newTHeaderProtocolConf(trans TTransport, cfg *TConfiguration) *THeaderProtocol {
+	t := NewTHeaderTransportConf(trans, cfg)
+	p, _ := t.cfg.GetTHeaderProtocolID().GetProtocol(t)
+	PropagateTConfiguration(p, cfg)
+	return &THeaderProtocol{
+		transport: t,
+		protocol:  p,
+		cfg:       cfg,
+	}
+}
+
+type tHeaderProtocolFactory struct {
+	cfg *TConfiguration
+}
+
+func (f tHeaderProtocolFactory) GetProtocol(trans TTransport) TProtocol {
+	return newTHeaderProtocolConf(trans, f.cfg)
+}
+
+func (f *tHeaderProtocolFactory) SetTConfiguration(cfg *TConfiguration) {
+	f.cfg = cfg
+}
+
+// Deprecated: Use NewTHeaderProtocolFactoryConf instead.
+func NewTHeaderProtocolFactory() TProtocolFactory {
+	return NewTHeaderProtocolFactoryConf(&TConfiguration{
+		noPropagation: true,
+	})
+}
+
+// NewTHeaderProtocolFactoryConf creates a factory for THeader with given
+// TConfiguration.
+func NewTHeaderProtocolFactoryConf(conf *TConfiguration) TProtocolFactory {
+	return tHeaderProtocolFactory{
+		cfg: conf,
+	}
+}
+
+// Transport returns the underlying transport.
+//
+// It's guaranteed to be of type *THeaderTransport.
+func (p *THeaderProtocol) Transport() TTransport {
+	return p.transport
+}
+
+// GetReadHeaders returns the THeaderMap read from transport.
+func (p *THeaderProtocol) GetReadHeaders() THeaderMap {
+	return p.transport.GetReadHeaders()
+}
+
+// SetWriteHeader sets a header for write.
+func (p *THeaderProtocol) SetWriteHeader(key, value string) {
+	p.transport.SetWriteHeader(key, value)
+}
+
+// ClearWriteHeaders clears all write headers previously set.
+func (p *THeaderProtocol) ClearWriteHeaders() {
+	p.transport.ClearWriteHeaders()
+}
+
+// AddTransform add a transform for writing.
+func (p *THeaderProtocol) AddTransform(transform THeaderTransformID) error {
+	return p.transport.AddTransform(transform)
+}
+
+func (p *THeaderProtocol) Flush(ctx context.Context) error {
+	return p.transport.Flush(ctx)
+}
+
+func (p *THeaderProtocol) WriteMessageBegin(ctx context.Context, name string, typeID TMessageType, seqID int32) error {
+	newProto, err := p.transport.Protocol().GetProtocol(p.transport)
+	if err != nil {
+		return err
+	}
+	PropagateTConfiguration(newProto, p.cfg)
+	p.protocol = newProto
+	p.transport.SequenceID = seqID
+	return p.protocol.WriteMessageBegin(ctx, name, typeID, seqID)
+}
+
+func (p *THeaderProtocol) WriteMessageEnd(ctx context.Context) error {
+	if err := p.protocol.WriteMessageEnd(ctx); err != nil {
+		return err
+	}
+	return p.transport.Flush(ctx)
+}
+
+func (p *THeaderProtocol) WriteStructBegin(ctx context.Context, name string) error {
+	return p.protocol.WriteStructBegin(ctx, name)
+}
+
+func (p *THeaderProtocol) WriteStructEnd(ctx context.Context) error {
+	return p.protocol.WriteStructEnd(ctx)
+}
+
+func (p *THeaderProtocol) WriteFieldBegin(ctx context.Context, name string, typeID TType, id int16) error {
+	return p.protocol.WriteFieldBegin(ctx, name, typeID, id)
+}
+
+func (p *THeaderProtocol) WriteFieldEnd(ctx context.Context) error {
+	return p.protocol.WriteFieldEnd(ctx)
+}
+
+func (p *THeaderProtocol) WriteFieldStop(ctx context.Context) error {
+	return p.protocol.WriteFieldStop(ctx)
+}
+
+func (p *THeaderProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
+	return p.protocol.WriteMapBegin(ctx, keyType, valueType, size)
+}
+
+func (p *THeaderProtocol) WriteMapEnd(ctx context.Context) error {
+	return p.protocol.WriteMapEnd(ctx)
+}
+
+func (p *THeaderProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
+	return p.protocol.WriteListBegin(ctx, elemType, size)
+}
+
+func (p *THeaderProtocol) WriteListEnd(ctx context.Context) error {
+	return p.protocol.WriteListEnd(ctx)
+}
+
+func (p *THeaderProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
+	return p.protocol.WriteSetBegin(ctx, elemType, size)
+}
+
+func (p *THeaderProtocol) WriteSetEnd(ctx context.Context) error {
+	return p.protocol.WriteSetEnd(ctx)
+}
+
+func (p *THeaderProtocol) WriteBool(ctx context.Context, value bool) error {
+	return p.protocol.WriteBool(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteByte(ctx context.Context, value int8) error {
+	return p.protocol.WriteByte(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteI16(ctx context.Context, value int16) error {
+	return p.protocol.WriteI16(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteI32(ctx context.Context, value int32) error {
+	return p.protocol.WriteI32(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteI64(ctx context.Context, value int64) error {
+	return p.protocol.WriteI64(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteDouble(ctx context.Context, value float64) error {
+	return p.protocol.WriteDouble(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteString(ctx context.Context, value string) error {
+	return p.protocol.WriteString(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteBinary(ctx context.Context, value []byte) error {
+	return p.protocol.WriteBinary(ctx, value)
+}
+
+// ReadFrame calls underlying THeaderTransport's ReadFrame function.
+func (p *THeaderProtocol) ReadFrame(ctx context.Context) error {
+	return p.transport.ReadFrame(ctx)
+}
+
+func (p *THeaderProtocol) ReadMessageBegin(ctx context.Context) (name string, typeID TMessageType, seqID int32, err error) {
+	if err = p.transport.ReadFrame(ctx); err != nil {
+		return
+	}
+
+	var newProto TProtocol
+	newProto, err = p.transport.Protocol().GetProtocol(p.transport)
+	if err != nil {
+		var tAppExc TApplicationException
+		if !errors.As(err, &tAppExc) {
+			return
+		}
+		if e := p.protocol.WriteMessageBegin(ctx, "", EXCEPTION, seqID); e != nil {
+			return
+		}
+		if e := tAppExc.Write(ctx, p.protocol); e != nil {
+			return
+		}
+		if e := p.protocol.WriteMessageEnd(ctx); e != nil {
+			return
+		}
+		if e := p.transport.Flush(ctx); e != nil {
+			return
+		}
+		return
+	}
+	PropagateTConfiguration(newProto, p.cfg)
+	p.protocol = newProto
+
+	return p.protocol.ReadMessageBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadMessageEnd(ctx context.Context) error {
+	return p.protocol.ReadMessageEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
+	return p.protocol.ReadStructBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadStructEnd(ctx context.Context) error {
+	return p.protocol.ReadStructEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadFieldBegin(ctx context.Context) (name string, typeID TType, id int16, err error) {
+	return p.protocol.ReadFieldBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadFieldEnd(ctx context.Context) error {
+	return p.protocol.ReadFieldEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) {
+	return p.protocol.ReadMapBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadMapEnd(ctx context.Context) error {
+	return p.protocol.ReadMapEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
+	return p.protocol.ReadListBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadListEnd(ctx context.Context) error {
+	return p.protocol.ReadListEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
+	return p.protocol.ReadSetBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadSetEnd(ctx context.Context) error {
+	return p.protocol.ReadSetEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadBool(ctx context.Context) (value bool, err error) {
+	return p.protocol.ReadBool(ctx)
+}
+
+func (p *THeaderProtocol) ReadByte(ctx context.Context) (value int8, err error) {
+	return p.protocol.ReadByte(ctx)
+}
+
+func (p *THeaderProtocol) ReadI16(ctx context.Context) (value int16, err error) {
+	return p.protocol.ReadI16(ctx)
+}
+
+func (p *THeaderProtocol) ReadI32(ctx context.Context) (value int32, err error) {
+	return p.protocol.ReadI32(ctx)
+}
+
+func (p *THeaderProtocol) ReadI64(ctx context.Context) (value int64, err error) {
+	return p.protocol.ReadI64(ctx)
+}
+
+func (p *THeaderProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
+	return p.protocol.ReadDouble(ctx)
+}
+
+func (p *THeaderProtocol) ReadString(ctx context.Context) (value string, err error) {
+	return p.protocol.ReadString(ctx)
+}
+
+func (p *THeaderProtocol) ReadBinary(ctx context.Context) (value []byte, err error) {
+	return p.protocol.ReadBinary(ctx)
+}
+
+func (p *THeaderProtocol) Skip(ctx context.Context, fieldType TType) error {
+	return p.protocol.Skip(ctx, fieldType)
+}
+
+// SetTConfiguration implements TConfigurationSetter.
+func (p *THeaderProtocol) SetTConfiguration(cfg *TConfiguration) {
+	PropagateTConfiguration(p.transport, cfg)
+	PropagateTConfiguration(p.protocol, cfg)
+	p.cfg = cfg
+}
+
+var (
+	_ TConfigurationSetter = (*tHeaderProtocolFactory)(nil)
+	_ TConfigurationSetter = (*THeaderProtocol)(nil)
+)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/header_transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/header_transport.go
new file mode 100644
index 0000000..f5736df
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/header_transport.go
@@ -0,0 +1,810 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"bufio"
+	"bytes"
+	"compress/zlib"
+	"context"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+)
+
+// Size in bytes for 32-bit ints.
+const size32 = 4
+
+type headerMeta struct {
+	MagicFlags   uint32
+	SequenceID   int32
+	HeaderLength uint16
+}
+
+const headerMetaSize = 10
+
+type clientType int
+
+const (
+	clientUnknown clientType = iota
+	clientHeaders
+	clientFramedBinary
+	clientUnframedBinary
+	clientFramedCompact
+	clientUnframedCompact
+)
+
+// Constants defined in THeader format:
+// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md
+const (
+	THeaderHeaderMagic  uint32 = 0x0fff0000
+	THeaderHeaderMask   uint32 = 0xffff0000
+	THeaderFlagsMask    uint32 = 0x0000ffff
+	THeaderMaxFrameSize uint32 = 0x3fffffff
+)
+
+// THeaderMap is the type of the header map in THeader transport.
+type THeaderMap map[string]string
+
+// THeaderProtocolID is the wrapped protocol id used in THeader.
+type THeaderProtocolID int32
+
+// Supported THeaderProtocolID values.
+const (
+	THeaderProtocolBinary  THeaderProtocolID = 0x00
+	THeaderProtocolCompact THeaderProtocolID = 0x02
+	THeaderProtocolDefault                   = THeaderProtocolBinary
+)
+
+// Declared globally to avoid repetitive allocations, not really used.
+var globalMemoryBuffer = NewTMemoryBuffer()
+
+// Validate checks whether the THeaderProtocolID is a valid/supported one.
+func (id THeaderProtocolID) Validate() error {
+	_, err := id.GetProtocol(globalMemoryBuffer)
+	return err
+}
+
+// GetProtocol gets the corresponding TProtocol from the wrapped protocol id.
+func (id THeaderProtocolID) GetProtocol(trans TTransport) (TProtocol, error) {
+	switch id {
+	default:
+		return nil, NewTApplicationException(
+			INVALID_PROTOCOL,
+			fmt.Sprintf("THeader protocol id %d not supported", id),
+		)
+	case THeaderProtocolBinary:
+		return NewTBinaryProtocolTransport(trans), nil
+	case THeaderProtocolCompact:
+		return NewTCompactProtocol(trans), nil
+	}
+}
+
+// THeaderTransformID defines the numeric id of the transform used.
+type THeaderTransformID int32
+
+// THeaderTransformID values.
+//
+// Values not defined here are not currently supported, namely HMAC and Snappy.
+const (
+	TransformNone THeaderTransformID = iota // 0, no special handling
+	TransformZlib                           // 1, zlib
+)
+
+var supportedTransformIDs = map[THeaderTransformID]bool{
+	TransformNone: true,
+	TransformZlib: true,
+}
+
+// TransformReader is an io.ReadCloser that handles transforms reading.
+type TransformReader struct {
+	io.Reader
+
+	closers []io.Closer
+}
+
+var _ io.ReadCloser = (*TransformReader)(nil)
+
+// NewTransformReaderWithCapacity initializes a TransformReader with expected
+// closers capacity.
+//
+// If you don't know the closers capacity beforehand, just use
+//
+//     &TransformReader{Reader: baseReader}
+//
+// instead would be sufficient.
+func NewTransformReaderWithCapacity(baseReader io.Reader, capacity int) *TransformReader {
+	return &TransformReader{
+		Reader:  baseReader,
+		closers: make([]io.Closer, 0, capacity),
+	}
+}
+
+// Close calls the underlying closers in appropriate order,
+// stops at and returns the first error encountered.
+func (tr *TransformReader) Close() error {
+	// Call closers in reversed order
+	for i := len(tr.closers) - 1; i >= 0; i-- {
+		if err := tr.closers[i].Close(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// AddTransform adds a transform.
+func (tr *TransformReader) AddTransform(id THeaderTransformID) error {
+	switch id {
+	default:
+		return NewTApplicationException(
+			INVALID_TRANSFORM,
+			fmt.Sprintf("THeaderTransformID %d not supported", id),
+		)
+	case TransformNone:
+		// no-op
+	case TransformZlib:
+		readCloser, err := zlib.NewReader(tr.Reader)
+		if err != nil {
+			return err
+		}
+		tr.Reader = readCloser
+		tr.closers = append(tr.closers, readCloser)
+	}
+	return nil
+}
+
+// TransformWriter is an io.WriteCloser that handles transforms writing.
+type TransformWriter struct {
+	io.Writer
+
+	closers []io.Closer
+}
+
+var _ io.WriteCloser = (*TransformWriter)(nil)
+
+// NewTransformWriter creates a new TransformWriter with base writer and transforms.
+func NewTransformWriter(baseWriter io.Writer, transforms []THeaderTransformID) (io.WriteCloser, error) {
+	writer := &TransformWriter{
+		Writer:  baseWriter,
+		closers: make([]io.Closer, 0, len(transforms)),
+	}
+	for _, id := range transforms {
+		if err := writer.AddTransform(id); err != nil {
+			return nil, err
+		}
+	}
+	return writer, nil
+}
+
+// Close calls the underlying closers in appropriate order,
+// stops at and returns the first error encountered.
+func (tw *TransformWriter) Close() error {
+	// Call closers in reversed order
+	for i := len(tw.closers) - 1; i >= 0; i-- {
+		if err := tw.closers[i].Close(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// AddTransform adds a transform.
+func (tw *TransformWriter) AddTransform(id THeaderTransformID) error {
+	switch id {
+	default:
+		return NewTApplicationException(
+			INVALID_TRANSFORM,
+			fmt.Sprintf("THeaderTransformID %d not supported", id),
+		)
+	case TransformNone:
+		// no-op
+	case TransformZlib:
+		writeCloser := zlib.NewWriter(tw.Writer)
+		tw.Writer = writeCloser
+		tw.closers = append(tw.closers, writeCloser)
+	}
+	return nil
+}
+
+// THeaderInfoType is the type id of the info headers.
+type THeaderInfoType int32
+
+// Supported THeaderInfoType values.
+const (
+	_            THeaderInfoType = iota // Skip 0
+	InfoKeyValue                        // 1
+	// Rest of the info types are not supported.
+)
+
+// THeaderTransport is a Transport mode that implements THeader.
+//
+// Note that THeaderTransport handles frame and zlib by itself,
+// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket),
+// instead of rich transports like TZlibTransport or TFramedTransport.
+type THeaderTransport struct {
+	SequenceID int32
+	Flags      uint32
+
+	transport TTransport
+
+	// THeaderMap for read and write
+	readHeaders  THeaderMap
+	writeHeaders THeaderMap
+
+	// Reading related variables.
+	reader *bufio.Reader
+	// When frame is detected, we read the frame fully into frameBuffer.
+	frameBuffer bytes.Buffer
+	// When it's non-nil, Read should read from frameReader instead of
+	// reader, and EOF error indicates end of frame instead of end of all
+	// transport.
+	frameReader io.ReadCloser
+
+	// Writing related variables
+	writeBuffer     bytes.Buffer
+	writeTransforms []THeaderTransformID
+
+	clientType clientType
+	protocolID THeaderProtocolID
+	cfg        *TConfiguration
+
+	// buffer is used in the following scenarios to avoid repetitive
+	// allocations, while 4 is big enough for all those scenarios:
+	//
+	// * header padding (max size 4)
+	// * write the frame size (size 4)
+	buffer [4]byte
+}
+
+var _ TTransport = (*THeaderTransport)(nil)
+
+// Deprecated: Use NewTHeaderTransportConf instead.
+func NewTHeaderTransport(trans TTransport) *THeaderTransport {
+	return NewTHeaderTransportConf(trans, &TConfiguration{
+		noPropagation: true,
+	})
+}
+
+// NewTHeaderTransportConf creates THeaderTransport from the
+// underlying transport, with given TConfiguration attached.
+//
+// If trans is already a *THeaderTransport, it will be returned as is,
+// but with TConfiguration overridden by the value passed in.
+//
+// The protocol ID in TConfiguration is only useful for client transports.
+// For servers,
+// the protocol ID will be overridden again to the one set by the client,
+// to ensure that servers always speak the same dialect as the client.
+func NewTHeaderTransportConf(trans TTransport, conf *TConfiguration) *THeaderTransport {
+	if ht, ok := trans.(*THeaderTransport); ok {
+		ht.SetTConfiguration(conf)
+		return ht
+	}
+	PropagateTConfiguration(trans, conf)
+	return &THeaderTransport{
+		transport:    trans,
+		reader:       bufio.NewReader(trans),
+		writeHeaders: make(THeaderMap),
+		protocolID:   conf.GetTHeaderProtocolID(),
+		cfg:          conf,
+	}
+}
+
+// Open calls the underlying transport's Open function.
+func (t *THeaderTransport) Open() error {
+	return t.transport.Open()
+}
+
+// IsOpen calls the underlying transport's IsOpen function.
+func (t *THeaderTransport) IsOpen() bool {
+	return t.transport.IsOpen()
+}
+
+// ReadFrame tries to read the frame header, guess the client type, and handle
+// unframed clients.
+func (t *THeaderTransport) ReadFrame(ctx context.Context) error {
+	if !t.needReadFrame() {
+		// No need to read frame, skipping.
+		return nil
+	}
+
+	// Peek and handle the first 32 bits.
+	// They could either be the length field of a framed message,
+	// or the first bytes of an unframed message.
+	var buf []byte
+	var err error
+	// This is also usually the first read from a connection,
+	// so handle retries around socket timeouts.
+	_, deadlineSet := ctx.Deadline()
+	for {
+		buf, err = t.reader.Peek(size32)
+		if deadlineSet && isTimeoutError(err) && ctx.Err() == nil {
+			// This is I/O timeout and we still have time,
+			// continue trying
+			continue
+		}
+		// For anything else, do not retry
+		break
+	}
+	if err != nil {
+		return err
+	}
+
+	frameSize := binary.BigEndian.Uint32(buf)
+	if frameSize&VERSION_MASK == VERSION_1 {
+		t.clientType = clientUnframedBinary
+		return nil
+	}
+	if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION {
+		t.clientType = clientUnframedCompact
+		return nil
+	}
+
+	// At this point it should be a framed message,
+	// sanity check on frameSize then discard the peeked part.
+	if frameSize > THeaderMaxFrameSize || frameSize > uint32(t.cfg.GetMaxFrameSize()) {
+		return NewTProtocolExceptionWithType(
+			SIZE_LIMIT,
+			errors.New("frame too large"),
+		)
+	}
+	t.reader.Discard(size32)
+
+	// Read the frame fully into frameBuffer.
+	_, err = io.CopyN(&t.frameBuffer, t.reader, int64(frameSize))
+	if err != nil {
+		return err
+	}
+	t.frameReader = ioutil.NopCloser(&t.frameBuffer)
+
+	// Peek and handle the next 32 bits.
+	buf = t.frameBuffer.Bytes()[:size32]
+	version := binary.BigEndian.Uint32(buf)
+	if version&THeaderHeaderMask == THeaderHeaderMagic {
+		t.clientType = clientHeaders
+		return t.parseHeaders(ctx, frameSize)
+	}
+	if version&VERSION_MASK == VERSION_1 {
+		t.clientType = clientFramedBinary
+		return nil
+	}
+	if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION {
+		t.clientType = clientFramedCompact
+		return nil
+	}
+	if err := t.endOfFrame(); err != nil {
+		return err
+	}
+	return NewTProtocolExceptionWithType(
+		NOT_IMPLEMENTED,
+		errors.New("unsupported client transport type"),
+	)
+}
+
+// endOfFrame does end of frame handling.
+//
+// It closes frameReader, and also resets frame related states.
+func (t *THeaderTransport) endOfFrame() error {
+	defer func() {
+		t.frameBuffer.Reset()
+		t.frameReader = nil
+	}()
+	return t.frameReader.Close()
+}
+
+func (t *THeaderTransport) parseHeaders(ctx context.Context, frameSize uint32) error {
+	if t.clientType != clientHeaders {
+		return nil
+	}
+
+	var err error
+	var meta headerMeta
+	if err = binary.Read(&t.frameBuffer, binary.BigEndian, &meta); err != nil {
+		return err
+	}
+	frameSize -= headerMetaSize
+	t.Flags = meta.MagicFlags & THeaderFlagsMask
+	t.SequenceID = meta.SequenceID
+	headerLength := int64(meta.HeaderLength) * 4
+	if int64(frameSize) < headerLength {
+		return NewTProtocolExceptionWithType(
+			SIZE_LIMIT,
+			errors.New("header size is larger than the whole frame"),
+		)
+	}
+	headerBuf := NewTMemoryBuffer()
+	_, err = io.CopyN(headerBuf, &t.frameBuffer, headerLength)
+	if err != nil {
+		return err
+	}
+	hp := NewTCompactProtocol(headerBuf)
+	hp.SetTConfiguration(t.cfg)
+
+	// At this point the header is already read into headerBuf,
+	// and t.frameBuffer starts from the actual payload.
+	protoID, err := hp.readVarint32()
+	if err != nil {
+		return err
+	}
+	t.protocolID = THeaderProtocolID(protoID)
+
+	var transformCount int32
+	transformCount, err = hp.readVarint32()
+	if err != nil {
+		return err
+	}
+	if transformCount > 0 {
+		reader := NewTransformReaderWithCapacity(
+			&t.frameBuffer,
+			int(transformCount),
+		)
+		t.frameReader = reader
+		transformIDs := make([]THeaderTransformID, transformCount)
+		for i := 0; i < int(transformCount); i++ {
+			id, err := hp.readVarint32()
+			if err != nil {
+				return err
+			}
+			transformIDs[i] = THeaderTransformID(id)
+		}
+		// The transform IDs on the wire was added based on the order of
+		// writing, so on the reading side we need to reverse the order.
+		for i := transformCount - 1; i >= 0; i-- {
+			id := transformIDs[i]
+			if err := reader.AddTransform(id); err != nil {
+				return err
+			}
+		}
+	}
+
+	// The info part does not use the transforms yet, so it's
+	// important to continue using headerBuf.
+	headers := make(THeaderMap)
+	for {
+		infoType, err := hp.readVarint32()
+		if errors.Is(err, io.EOF) {
+			break
+		}
+		if err != nil {
+			return err
+		}
+		if THeaderInfoType(infoType) == InfoKeyValue {
+			count, err := hp.readVarint32()
+			if err != nil {
+				return err
+			}
+			for i := 0; i < int(count); i++ {
+				key, err := hp.ReadString(ctx)
+				if err != nil {
+					return err
+				}
+				value, err := hp.ReadString(ctx)
+				if err != nil {
+					return err
+				}
+				headers[key] = value
+			}
+		} else {
+			// Skip reading info section on the first
+			// unsupported info type.
+			break
+		}
+	}
+	t.readHeaders = headers
+
+	return nil
+}
+
+func (t *THeaderTransport) needReadFrame() bool {
+	if t.clientType == clientUnknown {
+		// This is a new connection that's never read before.
+		return true
+	}
+	if t.isFramed() && t.frameReader == nil {
+		// We just finished the last frame.
+		return true
+	}
+	return false
+}
+
+func (t *THeaderTransport) Read(p []byte) (read int, err error) {
+	// Here using context.Background instead of a context passed in is safe.
+	// First is that there's no way to pass context into this function.
+	// Then, 99% of the case when calling this Read frame is already read
+	// into frameReader. ReadFrame here is more of preventing bugs that
+	// didn't call ReadFrame before calling Read.
+	err = t.ReadFrame(context.Background())
+	if err != nil {
+		return
+	}
+	if t.frameReader != nil {
+		read, err = t.frameReader.Read(p)
+		if err == nil && t.frameBuffer.Len() <= 0 {
+			// the last Read finished the frame, do endOfFrame
+			// handling here.
+			err = t.endOfFrame()
+		} else if err == io.EOF {
+			err = t.endOfFrame()
+			if err != nil {
+				return
+			}
+			if read == 0 {
+				// Try to read the next frame when we hit EOF
+				// (end of frame) immediately.
+				// When we got here, it means the last read
+				// finished the previous frame, but didn't
+				// do endOfFrame handling yet.
+				// We have to read the next frame here,
+				// as otherwise we would return 0 and nil,
+				// which is a case not handled well by most
+				// protocol implementations.
+				return t.Read(p)
+			}
+		}
+		return
+	}
+	return t.reader.Read(p)
+}
+
+// Write writes data to the write buffer.
+//
+// You need to call Flush to actually write them to the transport.
+func (t *THeaderTransport) Write(p []byte) (int, error) {
+	return t.writeBuffer.Write(p)
+}
+
+// Flush writes the appropriate header and the write buffer to the underlying transport.
+func (t *THeaderTransport) Flush(ctx context.Context) error {
+	if t.writeBuffer.Len() == 0 {
+		return nil
+	}
+
+	defer t.writeBuffer.Reset()
+
+	switch t.clientType {
+	default:
+		fallthrough
+	case clientUnknown:
+		t.clientType = clientHeaders
+		fallthrough
+	case clientHeaders:
+		headers := NewTMemoryBuffer()
+		hp := NewTCompactProtocol(headers)
+		hp.SetTConfiguration(t.cfg)
+		if _, err := hp.writeVarint32(int32(t.protocolID)); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+		if _, err := hp.writeVarint32(int32(len(t.writeTransforms))); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+		for _, transform := range t.writeTransforms {
+			if _, err := hp.writeVarint32(int32(transform)); err != nil {
+				return NewTTransportExceptionFromError(err)
+			}
+		}
+		if len(t.writeHeaders) > 0 {
+			if _, err := hp.writeVarint32(int32(InfoKeyValue)); err != nil {
+				return NewTTransportExceptionFromError(err)
+			}
+			if _, err := hp.writeVarint32(int32(len(t.writeHeaders))); err != nil {
+				return NewTTransportExceptionFromError(err)
+			}
+			for key, value := range t.writeHeaders {
+				if err := hp.WriteString(ctx, key); err != nil {
+					return NewTTransportExceptionFromError(err)
+				}
+				if err := hp.WriteString(ctx, value); err != nil {
+					return NewTTransportExceptionFromError(err)
+				}
+			}
+		}
+		padding := 4 - headers.Len()%4
+		if padding < 4 {
+			buf := t.buffer[:padding]
+			for i := range buf {
+				buf[i] = 0
+			}
+			if _, err := headers.Write(buf); err != nil {
+				return NewTTransportExceptionFromError(err)
+			}
+		}
+
+		var payload bytes.Buffer
+		meta := headerMeta{
+			MagicFlags:   THeaderHeaderMagic + t.Flags&THeaderFlagsMask,
+			SequenceID:   t.SequenceID,
+			HeaderLength: uint16(headers.Len() / 4),
+		}
+		if err := binary.Write(&payload, binary.BigEndian, meta); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+		if _, err := io.Copy(&payload, headers); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+
+		writer, err := NewTransformWriter(&payload, t.writeTransforms)
+		if err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+		if _, err := io.Copy(writer, &t.writeBuffer); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+		if err := writer.Close(); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+
+		// First write frame length
+		buf := t.buffer[:size32]
+		binary.BigEndian.PutUint32(buf, uint32(payload.Len()))
+		if _, err := t.transport.Write(buf); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+		// Then write the payload
+		if _, err := io.Copy(t.transport, &payload); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+
+	case clientFramedBinary, clientFramedCompact:
+		buf := t.buffer[:size32]
+		binary.BigEndian.PutUint32(buf, uint32(t.writeBuffer.Len()))
+		if _, err := t.transport.Write(buf); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+		fallthrough
+	case clientUnframedBinary, clientUnframedCompact:
+		if _, err := io.Copy(t.transport, &t.writeBuffer); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+	}
+
+	select {
+	default:
+	case <-ctx.Done():
+		return NewTTransportExceptionFromError(ctx.Err())
+	}
+
+	return t.transport.Flush(ctx)
+}
+
+// Close closes the transport, along with its underlying transport.
+func (t *THeaderTransport) Close() error {
+	if err := t.Flush(context.Background()); err != nil {
+		return err
+	}
+	return t.transport.Close()
+}
+
+// RemainingBytes calls underlying transport's RemainingBytes.
+//
+// Even in framed cases, because of all the possible compression transforms
+// involved, the remaining frame size is likely to be different from the actual
+// remaining readable bytes, so we don't bother to keep tracking the remaining
+// frame size by ourselves and just use the underlying transport's
+// RemainingBytes directly.
+func (t *THeaderTransport) RemainingBytes() uint64 {
+	return t.transport.RemainingBytes()
+}
+
+// GetReadHeaders returns the THeaderMap read from transport.
+func (t *THeaderTransport) GetReadHeaders() THeaderMap {
+	return t.readHeaders
+}
+
+// SetWriteHeader sets a header for write.
+func (t *THeaderTransport) SetWriteHeader(key, value string) {
+	t.writeHeaders[key] = value
+}
+
+// ClearWriteHeaders clears all write headers previously set.
+func (t *THeaderTransport) ClearWriteHeaders() {
+	t.writeHeaders = make(THeaderMap)
+}
+
+// AddTransform add a transform for writing.
+func (t *THeaderTransport) AddTransform(transform THeaderTransformID) error {
+	if !supportedTransformIDs[transform] {
+		return NewTProtocolExceptionWithType(
+			NOT_IMPLEMENTED,
+			fmt.Errorf("THeaderTransformID %d not supported", transform),
+		)
+	}
+	t.writeTransforms = append(t.writeTransforms, transform)
+	return nil
+}
+
+// Protocol returns the wrapped protocol id used in this THeaderTransport.
+func (t *THeaderTransport) Protocol() THeaderProtocolID {
+	switch t.clientType {
+	default:
+		return t.protocolID
+	case clientFramedBinary, clientUnframedBinary:
+		return THeaderProtocolBinary
+	case clientFramedCompact, clientUnframedCompact:
+		return THeaderProtocolCompact
+	}
+}
+
+func (t *THeaderTransport) isFramed() bool {
+	switch t.clientType {
+	default:
+		return false
+	case clientHeaders, clientFramedBinary, clientFramedCompact:
+		return true
+	}
+}
+
+// SetTConfiguration implements TConfigurationSetter.
+func (t *THeaderTransport) SetTConfiguration(cfg *TConfiguration) {
+	PropagateTConfiguration(t.transport, cfg)
+	t.cfg = cfg
+}
+
+// THeaderTransportFactory is a TTransportFactory implementation to create
+// THeaderTransport.
+//
+// It also implements TConfigurationSetter.
+type THeaderTransportFactory struct {
+	// The underlying factory, could be nil.
+	Factory TTransportFactory
+
+	cfg *TConfiguration
+}
+
+// Deprecated: Use NewTHeaderTransportFactoryConf instead.
+func NewTHeaderTransportFactory(factory TTransportFactory) TTransportFactory {
+	return NewTHeaderTransportFactoryConf(factory, &TConfiguration{
+		noPropagation: true,
+	})
+}
+
+// NewTHeaderTransportFactoryConf creates a new *THeaderTransportFactory with
+// the given *TConfiguration.
+func NewTHeaderTransportFactoryConf(factory TTransportFactory, conf *TConfiguration) TTransportFactory {
+	return &THeaderTransportFactory{
+		Factory: factory,
+
+		cfg: conf,
+	}
+}
+
+// GetTransport implements TTransportFactory.
+func (f *THeaderTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
+	if f.Factory != nil {
+		t, err := f.Factory.GetTransport(trans)
+		if err != nil {
+			return nil, err
+		}
+		return NewTHeaderTransportConf(t, f.cfg), nil
+	}
+	return NewTHeaderTransportConf(trans, f.cfg), nil
+}
+
+// SetTConfiguration implements TConfigurationSetter.
+func (f *THeaderTransportFactory) SetTConfiguration(cfg *TConfiguration) {
+	PropagateTConfiguration(f.Factory, f.cfg)
+	f.cfg = cfg
+}
+
+var (
+	_ TConfigurationSetter = (*THeaderTransportFactory)(nil)
+	_ TConfigurationSetter = (*THeaderTransport)(nil)
+)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/logger.go b/vendor/github.com/uber/jaeger-client-go/thrift/logger.go
new file mode 100644
index 0000000..50d44ec
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/logger.go
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"log"
+	"os"
+)
+
+// Logger is a simple wrapper of a logging function.
+//
+// In reality the users might actually use different logging libraries, and they
+// are not always compatible with each other.
+//
+// Logger is meant to be a simple common ground that it's easy to wrap whatever
+// logging library they use into.
+//
+// See https://issues.apache.org/jira/browse/THRIFT-4985 for the design
+// discussion behind it.
+type Logger func(msg string)
+
+// NopLogger is a Logger implementation that does nothing.
+func NopLogger(msg string) {}
+
+// StdLogger wraps stdlib log package into a Logger.
+//
+// If logger passed in is nil, it will fallback to use stderr and default flags.
+func StdLogger(logger *log.Logger) Logger {
+	if logger == nil {
+		logger = log.New(os.Stderr, "", log.LstdFlags)
+	}
+	return func(msg string) {
+		logger.Print(msg)
+	}
+}
+
+func fallbackLogger(logger Logger) Logger {
+	if logger == nil {
+		return StdLogger(nil)
+	}
+	return logger
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go b/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go
index b62fd56..5936d27 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go
@@ -21,6 +21,7 @@
 
 import (
 	"bytes"
+	"context"
 )
 
 // Memory buffer-based implementation of the TTransport interface.
@@ -33,14 +34,14 @@
 	size int
 }
 
-func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) TTransport {
+func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
 	if trans != nil {
 		t, ok := trans.(*TMemoryBuffer)
 		if ok && t.size > 0 {
-			return NewTMemoryBufferLen(t.size)
+			return NewTMemoryBufferLen(t.size), nil
 		}
 	}
-	return NewTMemoryBufferLen(p.size)
+	return NewTMemoryBufferLen(p.size), nil
 }
 
 func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory {
@@ -70,7 +71,7 @@
 }
 
 // Flushing a memory buffer is a no-op
-func (p *TMemoryBuffer) Flush() error {
+func (p *TMemoryBuffer) Flush(ctx context.Context) error {
 	return nil
 }
 
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go b/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go
index aa8daa9..e4512d2 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go
@@ -69,14 +69,14 @@
 
 func NewNumericFromI64(iValue int64) Numeric {
 	dValue := float64(iValue)
-	sValue := string(iValue)
+	sValue := strconv.FormatInt(iValue, 10)
 	isNil := false
 	return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
 }
 
 func NewNumericFromI32(iValue int32) Numeric {
 	dValue := float64(iValue)
-	sValue := string(iValue)
+	sValue := strconv.FormatInt(int64(iValue), 10)
 	isNil := false
 	return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil}
 }
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/processor_factory.go b/vendor/github.com/uber/jaeger-client-go/thrift/processor_factory.go
new file mode 100644
index 0000000..245a3cc
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/processor_factory.go
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import "context"
+
+// A processor is a generic object which operates upon an input stream and
+// writes to some output stream.
+type TProcessor interface {
+	Process(ctx context.Context, in, out TProtocol) (bool, TException)
+
+	// ProcessorMap returns a map of thrift method names to TProcessorFunctions.
+	ProcessorMap() map[string]TProcessorFunction
+
+	// AddToProcessorMap adds the given TProcessorFunction to the internal
+	// processor map at the given key.
+	//
+	// If one is already set at the given key, it will be replaced with the new
+	// TProcessorFunction.
+	AddToProcessorMap(string, TProcessorFunction)
+}
+
+type TProcessorFunction interface {
+	Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException)
+}
+
+// The default processor factory just returns a singleton
+// instance.
+type TProcessorFactory interface {
+	GetProcessor(trans TTransport) TProcessor
+}
+
+type tProcessorFactory struct {
+	processor TProcessor
+}
+
+func NewTProcessorFactory(p TProcessor) TProcessorFactory {
+	return &tProcessorFactory{processor: p}
+}
+
+func (p *tProcessorFactory) GetProcessor(trans TTransport) TProcessor {
+	return p.processor
+}
+
+/**
+ * The default processor factory just returns a singleton
+ * instance.
+ */
+type TProcessorFunctionFactory interface {
+	GetProcessorFunction(trans TTransport) TProcessorFunction
+}
+
+type tProcessorFunctionFactory struct {
+	processor TProcessorFunction
+}
+
+func NewTProcessorFunctionFactory(p TProcessorFunction) TProcessorFunctionFactory {
+	return &tProcessorFunctionFactory{processor: p}
+}
+
+func (p *tProcessorFunctionFactory) GetProcessorFunction(trans TTransport) TProcessorFunction {
+	return p.processor
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go
index 45fa202..0a69bd4 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go
@@ -20,7 +20,9 @@
 package thrift
 
 import (
+	"context"
 	"errors"
+	"fmt"
 )
 
 const (
@@ -29,51 +31,51 @@
 )
 
 type TProtocol interface {
-	WriteMessageBegin(name string, typeId TMessageType, seqid int32) error
-	WriteMessageEnd() error
-	WriteStructBegin(name string) error
-	WriteStructEnd() error
-	WriteFieldBegin(name string, typeId TType, id int16) error
-	WriteFieldEnd() error
-	WriteFieldStop() error
-	WriteMapBegin(keyType TType, valueType TType, size int) error
-	WriteMapEnd() error
-	WriteListBegin(elemType TType, size int) error
-	WriteListEnd() error
-	WriteSetBegin(elemType TType, size int) error
-	WriteSetEnd() error
-	WriteBool(value bool) error
-	WriteByte(value int8) error
-	WriteI16(value int16) error
-	WriteI32(value int32) error
-	WriteI64(value int64) error
-	WriteDouble(value float64) error
-	WriteString(value string) error
-	WriteBinary(value []byte) error
+	WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error
+	WriteMessageEnd(ctx context.Context) error
+	WriteStructBegin(ctx context.Context, name string) error
+	WriteStructEnd(ctx context.Context) error
+	WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error
+	WriteFieldEnd(ctx context.Context) error
+	WriteFieldStop(ctx context.Context) error
+	WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error
+	WriteMapEnd(ctx context.Context) error
+	WriteListBegin(ctx context.Context, elemType TType, size int) error
+	WriteListEnd(ctx context.Context) error
+	WriteSetBegin(ctx context.Context, elemType TType, size int) error
+	WriteSetEnd(ctx context.Context) error
+	WriteBool(ctx context.Context, value bool) error
+	WriteByte(ctx context.Context, value int8) error
+	WriteI16(ctx context.Context, value int16) error
+	WriteI32(ctx context.Context, value int32) error
+	WriteI64(ctx context.Context, value int64) error
+	WriteDouble(ctx context.Context, value float64) error
+	WriteString(ctx context.Context, value string) error
+	WriteBinary(ctx context.Context, value []byte) error
 
-	ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error)
-	ReadMessageEnd() error
-	ReadStructBegin() (name string, err error)
-	ReadStructEnd() error
-	ReadFieldBegin() (name string, typeId TType, id int16, err error)
-	ReadFieldEnd() error
-	ReadMapBegin() (keyType TType, valueType TType, size int, err error)
-	ReadMapEnd() error
-	ReadListBegin() (elemType TType, size int, err error)
-	ReadListEnd() error
-	ReadSetBegin() (elemType TType, size int, err error)
-	ReadSetEnd() error
-	ReadBool() (value bool, err error)
-	ReadByte() (value int8, err error)
-	ReadI16() (value int16, err error)
-	ReadI32() (value int32, err error)
-	ReadI64() (value int64, err error)
-	ReadDouble() (value float64, err error)
-	ReadString() (value string, err error)
-	ReadBinary() (value []byte, err error)
+	ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error)
+	ReadMessageEnd(ctx context.Context) error
+	ReadStructBegin(ctx context.Context) (name string, err error)
+	ReadStructEnd(ctx context.Context) error
+	ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error)
+	ReadFieldEnd(ctx context.Context) error
+	ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error)
+	ReadMapEnd(ctx context.Context) error
+	ReadListBegin(ctx context.Context) (elemType TType, size int, err error)
+	ReadListEnd(ctx context.Context) error
+	ReadSetBegin(ctx context.Context) (elemType TType, size int, err error)
+	ReadSetEnd(ctx context.Context) error
+	ReadBool(ctx context.Context) (value bool, err error)
+	ReadByte(ctx context.Context) (value int8, err error)
+	ReadI16(ctx context.Context) (value int16, err error)
+	ReadI32(ctx context.Context) (value int32, err error)
+	ReadI64(ctx context.Context) (value int64, err error)
+	ReadDouble(ctx context.Context) (value float64, err error)
+	ReadString(ctx context.Context) (value string, err error)
+	ReadBinary(ctx context.Context) (value []byte, err error)
 
-	Skip(fieldType TType) (err error)
-	Flush() (err error)
+	Skip(ctx context.Context, fieldType TType) (err error)
+	Flush(ctx context.Context) (err error)
 
 	Transport() TTransport
 }
@@ -82,94 +84,94 @@
 const DEFAULT_RECURSION_DEPTH = 64
 
 // Skips over the next data element from the provided input TProtocol object.
-func SkipDefaultDepth(prot TProtocol, typeId TType) (err error) {
-	return Skip(prot, typeId, DEFAULT_RECURSION_DEPTH)
+func SkipDefaultDepth(ctx context.Context, prot TProtocol, typeId TType) (err error) {
+	return Skip(ctx, prot, typeId, DEFAULT_RECURSION_DEPTH)
 }
 
 // Skips over the next data element from the provided input TProtocol object.
-func Skip(self TProtocol, fieldType TType, maxDepth int) (err error) {
-	
-    if maxDepth <= 0 {
-		return NewTProtocolExceptionWithType( DEPTH_LIMIT, errors.New("Depth limit exceeded"))
+func Skip(ctx context.Context, self TProtocol, fieldType TType, maxDepth int) (err error) {
+
+	if maxDepth <= 0 {
+		return NewTProtocolExceptionWithType(DEPTH_LIMIT, errors.New("Depth limit exceeded"))
 	}
 
 	switch fieldType {
-	case STOP:
-		return
 	case BOOL:
-		_, err = self.ReadBool()
+		_, err = self.ReadBool(ctx)
 		return
 	case BYTE:
-		_, err = self.ReadByte()
+		_, err = self.ReadByte(ctx)
 		return
 	case I16:
-		_, err = self.ReadI16()
+		_, err = self.ReadI16(ctx)
 		return
 	case I32:
-		_, err = self.ReadI32()
+		_, err = self.ReadI32(ctx)
 		return
 	case I64:
-		_, err = self.ReadI64()
+		_, err = self.ReadI64(ctx)
 		return
 	case DOUBLE:
-		_, err = self.ReadDouble()
+		_, err = self.ReadDouble(ctx)
 		return
 	case STRING:
-		_, err = self.ReadString()
+		_, err = self.ReadString(ctx)
 		return
 	case STRUCT:
-		if _, err = self.ReadStructBegin(); err != nil {
+		if _, err = self.ReadStructBegin(ctx); err != nil {
 			return err
 		}
 		for {
-			_, typeId, _, _ := self.ReadFieldBegin()
+			_, typeId, _, _ := self.ReadFieldBegin(ctx)
 			if typeId == STOP {
 				break
 			}
-			err := Skip(self, typeId, maxDepth-1)
+			err := Skip(ctx, self, typeId, maxDepth-1)
 			if err != nil {
 				return err
 			}
-			self.ReadFieldEnd()
+			self.ReadFieldEnd(ctx)
 		}
-		return self.ReadStructEnd()
+		return self.ReadStructEnd(ctx)
 	case MAP:
-		keyType, valueType, size, err := self.ReadMapBegin()
+		keyType, valueType, size, err := self.ReadMapBegin(ctx)
 		if err != nil {
 			return err
 		}
 		for i := 0; i < size; i++ {
-			err := Skip(self, keyType, maxDepth-1)
+			err := Skip(ctx, self, keyType, maxDepth-1)
 			if err != nil {
 				return err
 			}
-			self.Skip(valueType)
+			self.Skip(ctx, valueType)
 		}
-		return self.ReadMapEnd()
+		return self.ReadMapEnd(ctx)
 	case SET:
-		elemType, size, err := self.ReadSetBegin()
+		elemType, size, err := self.ReadSetBegin(ctx)
 		if err != nil {
 			return err
 		}
 		for i := 0; i < size; i++ {
-			err := Skip(self, elemType, maxDepth-1)
+			err := Skip(ctx, self, elemType, maxDepth-1)
 			if err != nil {
 				return err
 			}
 		}
-		return self.ReadSetEnd()
+		return self.ReadSetEnd(ctx)
 	case LIST:
-		elemType, size, err := self.ReadListBegin()
+		elemType, size, err := self.ReadListBegin(ctx)
 		if err != nil {
 			return err
 		}
 		for i := 0; i < size; i++ {
-			err := Skip(self, elemType, maxDepth-1)
+			err := Skip(ctx, self, elemType, maxDepth-1)
 			if err != nil {
 				return err
 			}
 		}
-		return self.ReadListEnd()
+		return self.ReadListEnd(ctx)
+	default:
+		return NewTProtocolExceptionWithType(INVALID_DATA, errors.New(fmt.Sprintf("Unknown data type %d", fieldType)))
 	}
 	return nil
 }
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go
index 6e357ee..9dcf4bf 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go
@@ -21,6 +21,7 @@
 
 import (
 	"encoding/base64"
+	"errors"
 )
 
 // Thrift Protocol exception
@@ -40,8 +41,15 @@
 )
 
 type tProtocolException struct {
-	typeId  int
-	message string
+	typeId int
+	err    error
+	msg    string
+}
+
+var _ TProtocolException = (*tProtocolException)(nil)
+
+func (tProtocolException) TExceptionType() TExceptionType {
+	return TExceptionTypeProtocol
 }
 
 func (p *tProtocolException) TypeId() int {
@@ -49,30 +57,48 @@
 }
 
 func (p *tProtocolException) String() string {
-	return p.message
+	return p.msg
 }
 
 func (p *tProtocolException) Error() string {
-	return p.message
+	return p.msg
+}
+
+func (p *tProtocolException) Unwrap() error {
+	return p.err
 }
 
 func NewTProtocolException(err error) TProtocolException {
 	if err == nil {
 		return nil
 	}
-	if e,ok := err.(TProtocolException); ok {
+
+	if e, ok := err.(TProtocolException); ok {
 		return e
 	}
-	if _, ok := err.(base64.CorruptInputError); ok {
-		return &tProtocolException{INVALID_DATA, err.Error()}
+
+	if errors.As(err, new(base64.CorruptInputError)) {
+		return NewTProtocolExceptionWithType(INVALID_DATA, err)
 	}
-	return &tProtocolException{UNKNOWN_PROTOCOL_EXCEPTION, err.Error()}
+
+	return NewTProtocolExceptionWithType(UNKNOWN_PROTOCOL_EXCEPTION, err)
 }
 
 func NewTProtocolExceptionWithType(errType int, err error) TProtocolException {
 	if err == nil {
 		return nil
 	}
-	return &tProtocolException{errType, err.Error()}
+	return &tProtocolException{
+		typeId: errType,
+		err:    err,
+		msg:    err.Error(),
+	}
 }
 
+func prependTProtocolException(prepend string, err TProtocolException) TProtocolException {
+	return &tProtocolException{
+		typeId: err.TypeId(),
+		err:    err,
+		msg:    prepend + err.Error(),
+	}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go b/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go
new file mode 100644
index 0000000..d884c6a
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"context"
+)
+
+// See https://godoc.org/context#WithValue on why do we need the unexported typedefs.
+type responseHelperKey struct{}
+
+// TResponseHelper defines a object with a set of helper functions that can be
+// retrieved from the context object passed into server handler functions.
+//
+// Use GetResponseHelper to retrieve the injected TResponseHelper implementation
+// from the context object.
+//
+// The zero value of TResponseHelper is valid with all helper functions being
+// no-op.
+type TResponseHelper struct {
+	// THeader related functions
+	*THeaderResponseHelper
+}
+
+// THeaderResponseHelper defines THeader related TResponseHelper functions.
+//
+// The zero value of *THeaderResponseHelper is valid with all helper functions
+// being no-op.
+type THeaderResponseHelper struct {
+	proto *THeaderProtocol
+}
+
+// NewTHeaderResponseHelper creates a new THeaderResponseHelper from the
+// underlying TProtocol.
+func NewTHeaderResponseHelper(proto TProtocol) *THeaderResponseHelper {
+	if hp, ok := proto.(*THeaderProtocol); ok {
+		return &THeaderResponseHelper{
+			proto: hp,
+		}
+	}
+	return nil
+}
+
+// SetHeader sets a response header.
+//
+// It's no-op if the underlying protocol/transport does not support THeader.
+func (h *THeaderResponseHelper) SetHeader(key, value string) {
+	if h != nil && h.proto != nil {
+		h.proto.SetWriteHeader(key, value)
+	}
+}
+
+// ClearHeaders clears all the response headers previously set.
+//
+// It's no-op if the underlying protocol/transport does not support THeader.
+func (h *THeaderResponseHelper) ClearHeaders() {
+	if h != nil && h.proto != nil {
+		h.proto.ClearWriteHeaders()
+	}
+}
+
+// GetResponseHelper retrieves the TResponseHelper implementation injected into
+// the context object.
+//
+// If no helper was found in the context object, a nop helper with ok == false
+// will be returned.
+func GetResponseHelper(ctx context.Context) (helper TResponseHelper, ok bool) {
+	if v := ctx.Value(responseHelperKey{}); v != nil {
+		helper, ok = v.(TResponseHelper)
+	}
+	return
+}
+
+// SetResponseHelper injects TResponseHelper into the context object.
+func SetResponseHelper(ctx context.Context, helper TResponseHelper) context.Context {
+	return context.WithValue(ctx, responseHelperKey{}, helper)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go
index 8e296a9..83fdf29 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go
@@ -19,7 +19,10 @@
 
 package thrift
 
-import "io"
+import (
+	"errors"
+	"io"
+)
 
 type RichTransport struct {
 	TTransport
@@ -49,7 +52,7 @@
 func readByte(r io.Reader) (c byte, err error) {
 	v := [1]byte{0}
 	n, err := r.Read(v[0:1])
-	if n > 0 && (err == nil || err == io.EOF) {
+	if n > 0 && (err == nil || errors.Is(err, io.EOF)) {
 		return v[0], nil
 	}
 	if n > 0 && err != nil {
@@ -66,4 +69,3 @@
 	_, err := w.Write(v[0:1])
 	return err
 }
-
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go b/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go
index 7712229..c449790 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go
@@ -19,57 +19,118 @@
 
 package thrift
 
+import (
+	"context"
+	"sync"
+)
+
 type TSerializer struct {
 	Transport *TMemoryBuffer
 	Protocol  TProtocol
 }
 
 type TStruct interface {
-	Write(p TProtocol) error
-	Read(p TProtocol) error
+	Write(ctx context.Context, p TProtocol) error
+	Read(ctx context.Context, p TProtocol) error
 }
 
 func NewTSerializer() *TSerializer {
 	transport := NewTMemoryBufferLen(1024)
-	protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport)
+	protocol := NewTBinaryProtocolTransport(transport)
 
 	return &TSerializer{
-		transport,
-		protocol}
+		Transport: transport,
+		Protocol:  protocol,
+	}
 }
 
-func (t *TSerializer) WriteString(msg TStruct) (s string, err error) {
+func (t *TSerializer) WriteString(ctx context.Context, msg TStruct) (s string, err error) {
 	t.Transport.Reset()
 
-	if err = msg.Write(t.Protocol); err != nil {
+	if err = msg.Write(ctx, t.Protocol); err != nil {
 		return
 	}
 
-	if err = t.Protocol.Flush(); err != nil {
+	if err = t.Protocol.Flush(ctx); err != nil {
 		return
 	}
-	if err = t.Transport.Flush(); err != nil {
+	if err = t.Transport.Flush(ctx); err != nil {
 		return
 	}
 
 	return t.Transport.String(), nil
 }
 
-func (t *TSerializer) Write(msg TStruct) (b []byte, err error) {
+func (t *TSerializer) Write(ctx context.Context, msg TStruct) (b []byte, err error) {
 	t.Transport.Reset()
 
-	if err = msg.Write(t.Protocol); err != nil {
+	if err = msg.Write(ctx, t.Protocol); err != nil {
 		return
 	}
 
-	if err = t.Protocol.Flush(); err != nil {
+	if err = t.Protocol.Flush(ctx); err != nil {
 		return
 	}
 
-	if err = t.Transport.Flush(); err != nil {
+	if err = t.Transport.Flush(ctx); err != nil {
 		return
 	}
 
 	b = append(b, t.Transport.Bytes()...)
 	return
 }
+
+// TSerializerPool is the thread-safe version of TSerializer, it uses resource
+// pool of TSerializer under the hood.
+//
+// It must be initialized with either NewTSerializerPool or
+// NewTSerializerPoolSizeFactory.
+type TSerializerPool struct {
+	pool sync.Pool
+}
+
+// NewTSerializerPool creates a new TSerializerPool.
+//
+// NewTSerializer can be used as the arg here.
+func NewTSerializerPool(f func() *TSerializer) *TSerializerPool {
+	return &TSerializerPool{
+		pool: sync.Pool{
+			New: func() interface{} {
+				return f()
+			},
+		},
+	}
+}
+
+// NewTSerializerPoolSizeFactory creates a new TSerializerPool with the given
+// size and protocol factory.
+//
+// Note that the size is not the limit. The TMemoryBuffer underneath can grow
+// larger than that. It just dictates the initial size.
+func NewTSerializerPoolSizeFactory(size int, factory TProtocolFactory) *TSerializerPool {
+	return &TSerializerPool{
+		pool: sync.Pool{
+			New: func() interface{} {
+				transport := NewTMemoryBufferLen(size)
+				protocol := factory.GetProtocol(transport)
+
+				return &TSerializer{
+					Transport: transport,
+					Protocol:  protocol,
+				}
+			},
+		},
+	}
+}
+
+func (t *TSerializerPool) WriteString(ctx context.Context, msg TStruct) (string, error) {
+	s := t.pool.Get().(*TSerializer)
+	defer t.pool.Put(s)
+	return s.WriteString(ctx, msg)
+}
+
+func (t *TSerializerPool) Write(ctx context.Context, msg TStruct) ([]byte, error) {
+	s := t.pool.Get().(*TSerializer)
+	defer t.pool.Put(s)
+	return s.Write(ctx, msg)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/processor.go b/vendor/github.com/uber/jaeger-client-go/thrift/server_transport.go
similarity index 62%
copy from vendor/github.com/uber/jaeger-client-go/thrift/processor.go
copy to vendor/github.com/uber/jaeger-client-go/thrift/server_transport.go
index ca0d3fa..51c40b6 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/processor.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/server_transport.go
@@ -19,12 +19,16 @@
 
 package thrift
 
-// A processor is a generic object which operates upon an input stream and
-// writes to some output stream.
-type TProcessor interface {
-	Process(in, out TProtocol) (bool, TException)
-}
+// Server transport. Object which provides client transports.
+type TServerTransport interface {
+	Listen() error
+	Accept() (TTransport, error)
+	Close() error
 
-type TProcessorFunction interface {
-	Process(seqId int32, in, out TProtocol) (bool, TException)
+	// Optional method implementation. This signals to the server transport
+	// that it should break out of any accept() or listen() that it is currently
+	// blocked on. This method, if implemented, MUST be thread safe, as it may
+	// be called from a different thread context than the other TServerTransport
+	// methods.
+	Interrupt() error
 }
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go
index 412a482..d1a8154 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go
@@ -22,8 +22,10 @@
 import (
 	"bufio"
 	"bytes"
+	"context"
 	"encoding/base64"
 	"encoding/json"
+	"errors"
 	"fmt"
 	"io"
 	"math"
@@ -33,12 +35,13 @@
 type _ParseContext int
 
 const (
-	_CONTEXT_IN_TOPLEVEL          _ParseContext = 1
-	_CONTEXT_IN_LIST_FIRST        _ParseContext = 2
-	_CONTEXT_IN_LIST              _ParseContext = 3
-	_CONTEXT_IN_OBJECT_FIRST      _ParseContext = 4
-	_CONTEXT_IN_OBJECT_NEXT_KEY   _ParseContext = 5
-	_CONTEXT_IN_OBJECT_NEXT_VALUE _ParseContext = 6
+	_CONTEXT_INVALID              _ParseContext = iota
+	_CONTEXT_IN_TOPLEVEL                        // 1
+	_CONTEXT_IN_LIST_FIRST                      // 2
+	_CONTEXT_IN_LIST                            // 3
+	_CONTEXT_IN_OBJECT_FIRST                    // 4
+	_CONTEXT_IN_OBJECT_NEXT_KEY                 // 5
+	_CONTEXT_IN_OBJECT_NEXT_VALUE               // 6
 )
 
 func (p _ParseContext) String() string {
@@ -59,7 +62,33 @@
 	return "UNKNOWN-PARSE-CONTEXT"
 }
 
-// JSON protocol implementation for thrift.
+type jsonContextStack []_ParseContext
+
+func (s *jsonContextStack) push(v _ParseContext) {
+	*s = append(*s, v)
+}
+
+func (s jsonContextStack) peek() (v _ParseContext, ok bool) {
+	l := len(s)
+	if l <= 0 {
+		return
+	}
+	return s[l-1], true
+}
+
+func (s *jsonContextStack) pop() (v _ParseContext, ok bool) {
+	l := len(*s)
+	if l <= 0 {
+		return
+	}
+	v = (*s)[l-1]
+	*s = (*s)[0 : l-1]
+	return v, true
+}
+
+var errEmptyJSONContextStack = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Unexpected empty json protocol context stack"))
+
+// Simple JSON protocol implementation for thrift.
 //
 // This protocol produces/consumes a simple output format
 // suitable for parsing by scripting languages.  It should not be
@@ -68,8 +97,8 @@
 type TSimpleJSONProtocol struct {
 	trans TTransport
 
-	parseContextStack []int
-	dumpContext       []int
+	parseContextStack jsonContextStack
+	dumpContext       jsonContextStack
 
 	writer *bufio.Writer
 	reader *bufio.Reader
@@ -81,8 +110,8 @@
 		writer: bufio.NewWriter(t),
 		reader: bufio.NewReader(t),
 	}
-	v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL))
-	v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL))
+	v.parseContextStack.push(_CONTEXT_IN_TOPLEVEL)
+	v.dumpContext.push(_CONTEXT_IN_TOPLEVEL)
 	return v
 }
 
@@ -155,114 +184,113 @@
 	return fmt.Errorf("Expected '%s' but found '%s' while parsing JSON.", expected, actual)
 }
 
-func (p *TSimpleJSONProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error {
+func (p *TSimpleJSONProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error {
 	p.resetContextStack() // THRIFT-3735
 	if e := p.OutputListBegin(); e != nil {
 		return e
 	}
-	if e := p.WriteString(name); e != nil {
+	if e := p.WriteString(ctx, name); e != nil {
 		return e
 	}
-	if e := p.WriteByte(int8(typeId)); e != nil {
+	if e := p.WriteByte(ctx, int8(typeId)); e != nil {
 		return e
 	}
-	if e := p.WriteI32(seqId); e != nil {
+	if e := p.WriteI32(ctx, seqId); e != nil {
 		return e
 	}
 	return nil
 }
 
-func (p *TSimpleJSONProtocol) WriteMessageEnd() error {
+func (p *TSimpleJSONProtocol) WriteMessageEnd(ctx context.Context) error {
 	return p.OutputListEnd()
 }
 
-func (p *TSimpleJSONProtocol) WriteStructBegin(name string) error {
+func (p *TSimpleJSONProtocol) WriteStructBegin(ctx context.Context, name string) error {
 	if e := p.OutputObjectBegin(); e != nil {
 		return e
 	}
 	return nil
 }
 
-func (p *TSimpleJSONProtocol) WriteStructEnd() error {
+func (p *TSimpleJSONProtocol) WriteStructEnd(ctx context.Context) error {
 	return p.OutputObjectEnd()
 }
 
-func (p *TSimpleJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
-	if e := p.WriteString(name); e != nil {
+func (p *TSimpleJSONProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
+	if e := p.WriteString(ctx, name); e != nil {
 		return e
 	}
 	return nil
 }
 
-func (p *TSimpleJSONProtocol) WriteFieldEnd() error {
-	//return p.OutputListEnd()
+func (p *TSimpleJSONProtocol) WriteFieldEnd(ctx context.Context) error {
 	return nil
 }
 
-func (p *TSimpleJSONProtocol) WriteFieldStop() error { return nil }
+func (p *TSimpleJSONProtocol) WriteFieldStop(ctx context.Context) error { return nil }
 
-func (p *TSimpleJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
+func (p *TSimpleJSONProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
 	if e := p.OutputListBegin(); e != nil {
 		return e
 	}
-	if e := p.WriteByte(int8(keyType)); e != nil {
+	if e := p.WriteByte(ctx, int8(keyType)); e != nil {
 		return e
 	}
-	if e := p.WriteByte(int8(valueType)); e != nil {
+	if e := p.WriteByte(ctx, int8(valueType)); e != nil {
 		return e
 	}
-	return p.WriteI32(int32(size))
+	return p.WriteI32(ctx, int32(size))
 }
 
-func (p *TSimpleJSONProtocol) WriteMapEnd() error {
+func (p *TSimpleJSONProtocol) WriteMapEnd(ctx context.Context) error {
 	return p.OutputListEnd()
 }
 
-func (p *TSimpleJSONProtocol) WriteListBegin(elemType TType, size int) error {
+func (p *TSimpleJSONProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
 	return p.OutputElemListBegin(elemType, size)
 }
 
-func (p *TSimpleJSONProtocol) WriteListEnd() error {
+func (p *TSimpleJSONProtocol) WriteListEnd(ctx context.Context) error {
 	return p.OutputListEnd()
 }
 
-func (p *TSimpleJSONProtocol) WriteSetBegin(elemType TType, size int) error {
+func (p *TSimpleJSONProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
 	return p.OutputElemListBegin(elemType, size)
 }
 
-func (p *TSimpleJSONProtocol) WriteSetEnd() error {
+func (p *TSimpleJSONProtocol) WriteSetEnd(ctx context.Context) error {
 	return p.OutputListEnd()
 }
 
-func (p *TSimpleJSONProtocol) WriteBool(b bool) error {
+func (p *TSimpleJSONProtocol) WriteBool(ctx context.Context, b bool) error {
 	return p.OutputBool(b)
 }
 
-func (p *TSimpleJSONProtocol) WriteByte(b int8) error {
-	return p.WriteI32(int32(b))
+func (p *TSimpleJSONProtocol) WriteByte(ctx context.Context, b int8) error {
+	return p.WriteI32(ctx, int32(b))
 }
 
-func (p *TSimpleJSONProtocol) WriteI16(v int16) error {
-	return p.WriteI32(int32(v))
+func (p *TSimpleJSONProtocol) WriteI16(ctx context.Context, v int16) error {
+	return p.WriteI32(ctx, int32(v))
 }
 
-func (p *TSimpleJSONProtocol) WriteI32(v int32) error {
+func (p *TSimpleJSONProtocol) WriteI32(ctx context.Context, v int32) error {
 	return p.OutputI64(int64(v))
 }
 
-func (p *TSimpleJSONProtocol) WriteI64(v int64) error {
+func (p *TSimpleJSONProtocol) WriteI64(ctx context.Context, v int64) error {
 	return p.OutputI64(int64(v))
 }
 
-func (p *TSimpleJSONProtocol) WriteDouble(v float64) error {
+func (p *TSimpleJSONProtocol) WriteDouble(ctx context.Context, v float64) error {
 	return p.OutputF64(v)
 }
 
-func (p *TSimpleJSONProtocol) WriteString(v string) error {
+func (p *TSimpleJSONProtocol) WriteString(ctx context.Context, v string) error {
 	return p.OutputString(v)
 }
 
-func (p *TSimpleJSONProtocol) WriteBinary(v []byte) error {
+func (p *TSimpleJSONProtocol) WriteBinary(ctx context.Context, v []byte) error {
 	// JSON library only takes in a string,
 	// not an arbitrary byte array, to ensure bytes are transmitted
 	// efficiently we must convert this into a valid JSON string
@@ -288,39 +316,39 @@
 }
 
 // Reading methods.
-func (p *TSimpleJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
+func (p *TSimpleJSONProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
 	p.resetContextStack() // THRIFT-3735
 	if isNull, err := p.ParseListBegin(); isNull || err != nil {
 		return name, typeId, seqId, err
 	}
-	if name, err = p.ReadString(); err != nil {
+	if name, err = p.ReadString(ctx); err != nil {
 		return name, typeId, seqId, err
 	}
-	bTypeId, err := p.ReadByte()
+	bTypeId, err := p.ReadByte(ctx)
 	typeId = TMessageType(bTypeId)
 	if err != nil {
 		return name, typeId, seqId, err
 	}
-	if seqId, err = p.ReadI32(); err != nil {
+	if seqId, err = p.ReadI32(ctx); err != nil {
 		return name, typeId, seqId, err
 	}
 	return name, typeId, seqId, nil
 }
 
-func (p *TSimpleJSONProtocol) ReadMessageEnd() error {
+func (p *TSimpleJSONProtocol) ReadMessageEnd(ctx context.Context) error {
 	return p.ParseListEnd()
 }
 
-func (p *TSimpleJSONProtocol) ReadStructBegin() (name string, err error) {
+func (p *TSimpleJSONProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
 	_, err = p.ParseObjectStart()
 	return "", err
 }
 
-func (p *TSimpleJSONProtocol) ReadStructEnd() error {
+func (p *TSimpleJSONProtocol) ReadStructEnd(ctx context.Context) error {
 	return p.ParseObjectEnd()
 }
 
-func (p *TSimpleJSONProtocol) ReadFieldBegin() (string, TType, int16, error) {
+func (p *TSimpleJSONProtocol) ReadFieldBegin(ctx context.Context) (string, TType, int16, error) {
 	if err := p.ParsePreValue(); err != nil {
 		return "", STOP, 0, err
 	}
@@ -339,21 +367,6 @@
 				return name, STOP, 0, err
 			}
 			return name, STOP, -1, p.ParsePostValue()
-			/*
-			   if err = p.ParsePostValue(); err != nil {
-			     return name, STOP, 0, err
-			   }
-			   if isNull, err := p.ParseListBegin(); isNull || err != nil {
-			     return name, STOP, 0, err
-			   }
-			   bType, err := p.ReadByte()
-			   thetype := TType(bType)
-			   if err != nil {
-			     return name, thetype, 0, err
-			   }
-			   id, err := p.ReadI16()
-			   return name, thetype, id, err
-			*/
 		}
 		e := fmt.Errorf("Expected \"}\" or '\"', but found: '%s'", string(b))
 		return "", STOP, 0, NewTProtocolExceptionWithType(INVALID_DATA, e)
@@ -361,57 +374,56 @@
 	return "", STOP, 0, NewTProtocolException(io.EOF)
 }
 
-func (p *TSimpleJSONProtocol) ReadFieldEnd() error {
+func (p *TSimpleJSONProtocol) ReadFieldEnd(ctx context.Context) error {
 	return nil
-	//return p.ParseListEnd()
 }
 
-func (p *TSimpleJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, e error) {
+func (p *TSimpleJSONProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, e error) {
 	if isNull, e := p.ParseListBegin(); isNull || e != nil {
 		return VOID, VOID, 0, e
 	}
 
 	// read keyType
-	bKeyType, e := p.ReadByte()
+	bKeyType, e := p.ReadByte(ctx)
 	keyType = TType(bKeyType)
 	if e != nil {
 		return keyType, valueType, size, e
 	}
 
 	// read valueType
-	bValueType, e := p.ReadByte()
+	bValueType, e := p.ReadByte(ctx)
 	valueType = TType(bValueType)
 	if e != nil {
 		return keyType, valueType, size, e
 	}
 
 	// read size
-	iSize, err := p.ReadI64()
+	iSize, err := p.ReadI64(ctx)
 	size = int(iSize)
 	return keyType, valueType, size, err
 }
 
-func (p *TSimpleJSONProtocol) ReadMapEnd() error {
+func (p *TSimpleJSONProtocol) ReadMapEnd(ctx context.Context) error {
 	return p.ParseListEnd()
 }
 
-func (p *TSimpleJSONProtocol) ReadListBegin() (elemType TType, size int, e error) {
+func (p *TSimpleJSONProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, e error) {
 	return p.ParseElemListBegin()
 }
 
-func (p *TSimpleJSONProtocol) ReadListEnd() error {
+func (p *TSimpleJSONProtocol) ReadListEnd(ctx context.Context) error {
 	return p.ParseListEnd()
 }
 
-func (p *TSimpleJSONProtocol) ReadSetBegin() (elemType TType, size int, e error) {
+func (p *TSimpleJSONProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, e error) {
 	return p.ParseElemListBegin()
 }
 
-func (p *TSimpleJSONProtocol) ReadSetEnd() error {
+func (p *TSimpleJSONProtocol) ReadSetEnd(ctx context.Context) error {
 	return p.ParseListEnd()
 }
 
-func (p *TSimpleJSONProtocol) ReadBool() (bool, error) {
+func (p *TSimpleJSONProtocol) ReadBool(ctx context.Context) (bool, error) {
 	var value bool
 
 	if err := p.ParsePreValue(); err != nil {
@@ -466,32 +478,32 @@
 	return value, p.ParsePostValue()
 }
 
-func (p *TSimpleJSONProtocol) ReadByte() (int8, error) {
-	v, err := p.ReadI64()
+func (p *TSimpleJSONProtocol) ReadByte(ctx context.Context) (int8, error) {
+	v, err := p.ReadI64(ctx)
 	return int8(v), err
 }
 
-func (p *TSimpleJSONProtocol) ReadI16() (int16, error) {
-	v, err := p.ReadI64()
+func (p *TSimpleJSONProtocol) ReadI16(ctx context.Context) (int16, error) {
+	v, err := p.ReadI64(ctx)
 	return int16(v), err
 }
 
-func (p *TSimpleJSONProtocol) ReadI32() (int32, error) {
-	v, err := p.ReadI64()
+func (p *TSimpleJSONProtocol) ReadI32(ctx context.Context) (int32, error) {
+	v, err := p.ReadI64(ctx)
 	return int32(v), err
 }
 
-func (p *TSimpleJSONProtocol) ReadI64() (int64, error) {
+func (p *TSimpleJSONProtocol) ReadI64(ctx context.Context) (int64, error) {
 	v, _, err := p.ParseI64()
 	return v, err
 }
 
-func (p *TSimpleJSONProtocol) ReadDouble() (float64, error) {
+func (p *TSimpleJSONProtocol) ReadDouble(ctx context.Context) (float64, error) {
 	v, _, err := p.ParseF64()
 	return v, err
 }
 
-func (p *TSimpleJSONProtocol) ReadString() (string, error) {
+func (p *TSimpleJSONProtocol) ReadString(ctx context.Context) (string, error) {
 	var v string
 	if err := p.ParsePreValue(); err != nil {
 		return v, err
@@ -521,7 +533,7 @@
 	return v, p.ParsePostValue()
 }
 
-func (p *TSimpleJSONProtocol) ReadBinary() ([]byte, error) {
+func (p *TSimpleJSONProtocol) ReadBinary(ctx context.Context) ([]byte, error) {
 	var v []byte
 	if err := p.ParsePreValue(); err != nil {
 		return nil, err
@@ -552,12 +564,12 @@
 	return v, p.ParsePostValue()
 }
 
-func (p *TSimpleJSONProtocol) Flush() (err error) {
+func (p *TSimpleJSONProtocol) Flush(ctx context.Context) (err error) {
 	return NewTProtocolException(p.writer.Flush())
 }
 
-func (p *TSimpleJSONProtocol) Skip(fieldType TType) (err error) {
-	return SkipDefaultDepth(p, fieldType)
+func (p *TSimpleJSONProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
+	return SkipDefaultDepth(ctx, p, fieldType)
 }
 
 func (p *TSimpleJSONProtocol) Transport() TTransport {
@@ -565,41 +577,41 @@
 }
 
 func (p *TSimpleJSONProtocol) OutputPreValue() error {
-	cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1])
+	cxt, ok := p.dumpContext.peek()
+	if !ok {
+		return errEmptyJSONContextStack
+	}
 	switch cxt {
 	case _CONTEXT_IN_LIST, _CONTEXT_IN_OBJECT_NEXT_KEY:
 		if _, e := p.write(JSON_COMMA); e != nil {
 			return NewTProtocolException(e)
 		}
-		break
 	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
 		if _, e := p.write(JSON_COLON); e != nil {
 			return NewTProtocolException(e)
 		}
-		break
 	}
 	return nil
 }
 
 func (p *TSimpleJSONProtocol) OutputPostValue() error {
-	cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1])
+	cxt, ok := p.dumpContext.peek()
+	if !ok {
+		return errEmptyJSONContextStack
+	}
 	switch cxt {
 	case _CONTEXT_IN_LIST_FIRST:
-		p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
-		p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST))
-		break
+		p.dumpContext.pop()
+		p.dumpContext.push(_CONTEXT_IN_LIST)
 	case _CONTEXT_IN_OBJECT_FIRST:
-		p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
-		p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE))
-		break
+		p.dumpContext.pop()
+		p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE)
 	case _CONTEXT_IN_OBJECT_NEXT_KEY:
-		p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
-		p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE))
-		break
+		p.dumpContext.pop()
+		p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE)
 	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
-		p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
-		p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_KEY))
-		break
+		p.dumpContext.pop()
+		p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_KEY)
 	}
 	return nil
 }
@@ -614,10 +626,13 @@
 	} else {
 		v = string(JSON_FALSE)
 	}
-	switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) {
+	cxt, ok := p.dumpContext.peek()
+	if !ok {
+		return errEmptyJSONContextStack
+	}
+	switch cxt {
 	case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
 		v = jsonQuote(v)
-	default:
 	}
 	if e := p.OutputStringData(v); e != nil {
 		return e
@@ -647,11 +662,14 @@
 	} else if math.IsInf(value, -1) {
 		v = string(JSON_QUOTE) + JSON_NEGATIVE_INFINITY + string(JSON_QUOTE)
 	} else {
+		cxt, ok := p.dumpContext.peek()
+		if !ok {
+			return errEmptyJSONContextStack
+		}
 		v = strconv.FormatFloat(value, 'g', -1, 64)
-		switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) {
+		switch cxt {
 		case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
 			v = string(JSON_QUOTE) + v + string(JSON_QUOTE)
-		default:
 		}
 	}
 	if e := p.OutputStringData(v); e != nil {
@@ -664,11 +682,14 @@
 	if e := p.OutputPreValue(); e != nil {
 		return e
 	}
+	cxt, ok := p.dumpContext.peek()
+	if !ok {
+		return errEmptyJSONContextStack
+	}
 	v := strconv.FormatInt(value, 10)
-	switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) {
+	switch cxt {
 	case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
 		v = jsonQuote(v)
-	default:
 	}
 	if e := p.OutputStringData(v); e != nil {
 		return e
@@ -698,7 +719,7 @@
 	if _, e := p.write(JSON_LBRACE); e != nil {
 		return NewTProtocolException(e)
 	}
-	p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_FIRST))
+	p.dumpContext.push(_CONTEXT_IN_OBJECT_FIRST)
 	return nil
 }
 
@@ -706,7 +727,10 @@
 	if _, e := p.write(JSON_RBRACE); e != nil {
 		return NewTProtocolException(e)
 	}
-	p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
+	_, ok := p.dumpContext.pop()
+	if !ok {
+		return errEmptyJSONContextStack
+	}
 	if e := p.OutputPostValue(); e != nil {
 		return e
 	}
@@ -720,7 +744,7 @@
 	if _, e := p.write(JSON_LBRACKET); e != nil {
 		return NewTProtocolException(e)
 	}
-	p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST_FIRST))
+	p.dumpContext.push(_CONTEXT_IN_LIST_FIRST)
 	return nil
 }
 
@@ -728,7 +752,10 @@
 	if _, e := p.write(JSON_RBRACKET); e != nil {
 		return NewTProtocolException(e)
 	}
-	p.dumpContext = p.dumpContext[:len(p.dumpContext)-1]
+	_, ok := p.dumpContext.pop()
+	if !ok {
+		return errEmptyJSONContextStack
+	}
 	if e := p.OutputPostValue(); e != nil {
 		return e
 	}
@@ -739,10 +766,10 @@
 	if e := p.OutputListBegin(); e != nil {
 		return e
 	}
-	if e := p.WriteByte(int8(elemType)); e != nil {
+	if e := p.OutputI64(int64(elemType)); e != nil {
 		return e
 	}
-	if e := p.WriteI64(int64(size)); e != nil {
+	if e := p.OutputI64(int64(size)); e != nil {
 		return e
 	}
 	return nil
@@ -752,7 +779,10 @@
 	if e := p.readNonSignificantWhitespace(); e != nil {
 		return NewTProtocolException(e)
 	}
-	cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1])
+	cxt, ok := p.parseContextStack.peek()
+	if !ok {
+		return errEmptyJSONContextStack
+	}
 	b, _ := p.reader.Peek(1)
 	switch cxt {
 	case _CONTEXT_IN_LIST:
@@ -771,7 +801,6 @@
 				return NewTProtocolExceptionWithType(INVALID_DATA, e)
 			}
 		}
-		break
 	case _CONTEXT_IN_OBJECT_NEXT_KEY:
 		if len(b) > 0 {
 			switch b[0] {
@@ -788,7 +817,6 @@
 				return NewTProtocolExceptionWithType(INVALID_DATA, e)
 			}
 		}
-		break
 	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
 		if len(b) > 0 {
 			switch b[0] {
@@ -803,7 +831,6 @@
 				return NewTProtocolExceptionWithType(INVALID_DATA, e)
 			}
 		}
-		break
 	}
 	return nil
 }
@@ -812,20 +839,20 @@
 	if e := p.readNonSignificantWhitespace(); e != nil {
 		return NewTProtocolException(e)
 	}
-	cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1])
+	cxt, ok := p.parseContextStack.peek()
+	if !ok {
+		return errEmptyJSONContextStack
+	}
 	switch cxt {
 	case _CONTEXT_IN_LIST_FIRST:
-		p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
-		p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST))
-		break
+		p.parseContextStack.pop()
+		p.parseContextStack.push(_CONTEXT_IN_LIST)
 	case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
-		p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
-		p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_VALUE))
-		break
+		p.parseContextStack.pop()
+		p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_VALUE)
 	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
-		p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
-		p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_KEY))
-		break
+		p.parseContextStack.pop()
+		p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_KEY)
 	}
 	return nil
 }
@@ -978,7 +1005,7 @@
 	}
 	if len(b) > 0 && b[0] == JSON_LBRACE[0] {
 		p.reader.ReadByte()
-		p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_FIRST))
+		p.parseContextStack.push(_CONTEXT_IN_OBJECT_FIRST)
 		return false, nil
 	} else if p.safePeekContains(JSON_NULL) {
 		return true, nil
@@ -991,7 +1018,7 @@
 	if isNull, err := p.readIfNull(); isNull || err != nil {
 		return err
 	}
-	cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1])
+	cxt, _ := p.parseContextStack.peek()
 	if (cxt != _CONTEXT_IN_OBJECT_FIRST) && (cxt != _CONTEXT_IN_OBJECT_NEXT_KEY) {
 		e := fmt.Errorf("Expected to be in the Object Context, but not in Object Context (%d)", cxt)
 		return NewTProtocolExceptionWithType(INVALID_DATA, e)
@@ -1009,7 +1036,7 @@
 			break
 		}
 	}
-	p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
+	p.parseContextStack.pop()
 	return p.ParsePostValue()
 }
 
@@ -1023,7 +1050,7 @@
 		return false, err
 	}
 	if len(b) >= 1 && b[0] == JSON_LBRACKET[0] {
-		p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST_FIRST))
+		p.parseContextStack.push(_CONTEXT_IN_LIST_FIRST)
 		p.reader.ReadByte()
 		isNull = false
 	} else if p.safePeekContains(JSON_NULL) {
@@ -1038,12 +1065,12 @@
 	if isNull, e := p.ParseListBegin(); isNull || e != nil {
 		return VOID, 0, e
 	}
-	bElemType, err := p.ReadByte()
+	bElemType, _, err := p.ParseI64()
 	elemType = TType(bElemType)
 	if err != nil {
 		return elemType, size, err
 	}
-	nSize, err2 := p.ReadI64()
+	nSize, _, err2 := p.ParseI64()
 	size = int(nSize)
 	return elemType, size, err2
 }
@@ -1052,7 +1079,7 @@
 	if isNull, err := p.readIfNull(); isNull || err != nil {
 		return err
 	}
-	cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1])
+	cxt, _ := p.parseContextStack.peek()
 	if cxt != _CONTEXT_IN_LIST {
 		e := fmt.Errorf("Expected to be in the List Context, but not in List Context (%d)", cxt)
 		return NewTProtocolExceptionWithType(INVALID_DATA, e)
@@ -1064,14 +1091,16 @@
 	for _, char := range line {
 		switch char {
 		default:
-			e := fmt.Errorf("Expecting end of list \"]\", but found: \"%s\"", line)
+			e := fmt.Errorf("Expecting end of list \"]\", but found: \"%v\"", line)
 			return NewTProtocolExceptionWithType(INVALID_DATA, e)
 		case ' ', '\n', '\r', '\t', rune(JSON_RBRACKET[0]):
 			break
 		}
 	}
-	p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1]
-	if _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) == _CONTEXT_IN_TOPLEVEL {
+	p.parseContextStack.pop()
+	if cxt, ok := p.parseContextStack.peek(); !ok {
+		return errEmptyJSONContextStack
+	} else if cxt == _CONTEXT_IN_TOPLEVEL {
 		return nil
 	}
 	return p.ParsePostValue()
@@ -1315,7 +1344,7 @@
 func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool {
 	for i := 0; i < len(b); i++ {
 		a, _ := p.reader.Peek(i + 1)
-		if len(a) == 0 || a[i] != b[i] {
+		if len(a) < (i+1) || a[i] != b[i] {
 			return false
 		}
 	}
@@ -1324,8 +1353,8 @@
 
 // Reset the context stack to its initial state.
 func (p *TSimpleJSONProtocol) resetContextStack() {
-	p.parseContextStack = []int{int(_CONTEXT_IN_TOPLEVEL)}
-	p.dumpContext = []int{int(_CONTEXT_IN_TOPLEVEL)}
+	p.parseContextStack = jsonContextStack{_CONTEXT_IN_TOPLEVEL}
+	p.dumpContext = jsonContextStack{_CONTEXT_IN_TOPLEVEL}
 }
 
 func (p *TSimpleJSONProtocol) write(b []byte) (int, error) {
@@ -1335,3 +1364,10 @@
 	}
 	return n, err
 }
+
+// SetTConfiguration implements TConfigurationSetter for propagation.
+func (p *TSimpleJSONProtocol) SetTConfiguration(conf *TConfiguration) {
+	PropagateTConfiguration(p.trans, conf)
+}
+
+var _ TConfigurationSetter = (*TSimpleJSONProtocol)(nil)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/simple_server.go b/vendor/github.com/uber/jaeger-client-go/thrift/simple_server.go
new file mode 100644
index 0000000..563cbfc
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/simple_server.go
@@ -0,0 +1,332 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// ErrAbandonRequest is a special error server handler implementations can
+// return to indicate that the request has been abandoned.
+//
+// TSimpleServer will check for this error, and close the client connection
+// instead of writing the response/error back to the client.
+//
+// It shall only be used when the server handler implementation know that the
+// client already abandoned the request (by checking that the passed in context
+// is already canceled, for example).
+var ErrAbandonRequest = errors.New("request abandoned")
+
+// ServerConnectivityCheckInterval defines the ticker interval used by
+// connectivity check in thrift compiled TProcessorFunc implementations.
+//
+// It's defined as a variable instead of constant, so that thrift server
+// implementations can change its value to control the behavior.
+//
+// If it's changed to <=0, the feature will be disabled.
+var ServerConnectivityCheckInterval = time.Millisecond * 5
+
+/*
+ * This is not a typical TSimpleServer as it is not blocked after accept a socket.
+ * It is more like a TThreadedServer that can handle different connections in different goroutines.
+ * This will work if golang user implements a conn-pool like thing in client side.
+ */
+type TSimpleServer struct {
+	closed int32
+	wg     sync.WaitGroup
+	mu     sync.Mutex
+
+	processorFactory       TProcessorFactory
+	serverTransport        TServerTransport
+	inputTransportFactory  TTransportFactory
+	outputTransportFactory TTransportFactory
+	inputProtocolFactory   TProtocolFactory
+	outputProtocolFactory  TProtocolFactory
+
+	// Headers to auto forward in THeaderProtocol
+	forwardHeaders []string
+
+	logger Logger
+}
+
+func NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer {
+	return NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport)
+}
+
+func NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
+	return NewTSimpleServerFactory4(NewTProcessorFactory(processor),
+		serverTransport,
+		transportFactory,
+		protocolFactory,
+	)
+}
+
+func NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
+	return NewTSimpleServerFactory6(NewTProcessorFactory(processor),
+		serverTransport,
+		inputTransportFactory,
+		outputTransportFactory,
+		inputProtocolFactory,
+		outputProtocolFactory,
+	)
+}
+
+func NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer {
+	return NewTSimpleServerFactory6(processorFactory,
+		serverTransport,
+		NewTTransportFactory(),
+		NewTTransportFactory(),
+		NewTBinaryProtocolFactoryDefault(),
+		NewTBinaryProtocolFactoryDefault(),
+	)
+}
+
+func NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
+	return NewTSimpleServerFactory6(processorFactory,
+		serverTransport,
+		transportFactory,
+		transportFactory,
+		protocolFactory,
+		protocolFactory,
+	)
+}
+
+func NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
+	return &TSimpleServer{
+		processorFactory:       processorFactory,
+		serverTransport:        serverTransport,
+		inputTransportFactory:  inputTransportFactory,
+		outputTransportFactory: outputTransportFactory,
+		inputProtocolFactory:   inputProtocolFactory,
+		outputProtocolFactory:  outputProtocolFactory,
+	}
+}
+
+func (p *TSimpleServer) ProcessorFactory() TProcessorFactory {
+	return p.processorFactory
+}
+
+func (p *TSimpleServer) ServerTransport() TServerTransport {
+	return p.serverTransport
+}
+
+func (p *TSimpleServer) InputTransportFactory() TTransportFactory {
+	return p.inputTransportFactory
+}
+
+func (p *TSimpleServer) OutputTransportFactory() TTransportFactory {
+	return p.outputTransportFactory
+}
+
+func (p *TSimpleServer) InputProtocolFactory() TProtocolFactory {
+	return p.inputProtocolFactory
+}
+
+func (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory {
+	return p.outputProtocolFactory
+}
+
+func (p *TSimpleServer) Listen() error {
+	return p.serverTransport.Listen()
+}
+
+// SetForwardHeaders sets the list of header keys that will be auto forwarded
+// while using THeaderProtocol.
+//
+// "forward" means that when the server is also a client to other upstream
+// thrift servers, the context object user gets in the processor functions will
+// have both read and write headers set, with write headers being forwarded.
+// Users can always override the write headers by calling SetWriteHeaderList
+// before calling thrift client functions.
+func (p *TSimpleServer) SetForwardHeaders(headers []string) {
+	size := len(headers)
+	if size == 0 {
+		p.forwardHeaders = nil
+		return
+	}
+
+	keys := make([]string, size)
+	copy(keys, headers)
+	p.forwardHeaders = keys
+}
+
+// SetLogger sets the logger used by this TSimpleServer.
+//
+// If no logger was set before Serve is called, a default logger using standard
+// log library will be used.
+func (p *TSimpleServer) SetLogger(logger Logger) {
+	p.logger = logger
+}
+
+func (p *TSimpleServer) innerAccept() (int32, error) {
+	client, err := p.serverTransport.Accept()
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	closed := atomic.LoadInt32(&p.closed)
+	if closed != 0 {
+		return closed, nil
+	}
+	if err != nil {
+		return 0, err
+	}
+	if client != nil {
+		p.wg.Add(1)
+		go func() {
+			defer p.wg.Done()
+			if err := p.processRequests(client); err != nil {
+				p.logger(fmt.Sprintf("error processing request: %v", err))
+			}
+		}()
+	}
+	return 0, nil
+}
+
+func (p *TSimpleServer) AcceptLoop() error {
+	for {
+		closed, err := p.innerAccept()
+		if err != nil {
+			return err
+		}
+		if closed != 0 {
+			return nil
+		}
+	}
+}
+
+func (p *TSimpleServer) Serve() error {
+	p.logger = fallbackLogger(p.logger)
+
+	err := p.Listen()
+	if err != nil {
+		return err
+	}
+	p.AcceptLoop()
+	return nil
+}
+
+func (p *TSimpleServer) Stop() error {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if atomic.LoadInt32(&p.closed) != 0 {
+		return nil
+	}
+	atomic.StoreInt32(&p.closed, 1)
+	p.serverTransport.Interrupt()
+	p.wg.Wait()
+	return nil
+}
+
+// If err is actually EOF, return nil, otherwise return err as-is.
+func treatEOFErrorsAsNil(err error) error {
+	if err == nil {
+		return nil
+	}
+	if errors.Is(err, io.EOF) {
+		return nil
+	}
+	var te TTransportException
+	if errors.As(err, &te) && te.TypeId() == END_OF_FILE {
+		return nil
+	}
+	return err
+}
+
+func (p *TSimpleServer) processRequests(client TTransport) (err error) {
+	defer func() {
+		err = treatEOFErrorsAsNil(err)
+	}()
+
+	processor := p.processorFactory.GetProcessor(client)
+	inputTransport, err := p.inputTransportFactory.GetTransport(client)
+	if err != nil {
+		return err
+	}
+	inputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport)
+	var outputTransport TTransport
+	var outputProtocol TProtocol
+
+	// for THeaderProtocol, we must use the same protocol instance for
+	// input and output so that the response is in the same dialect that
+	// the server detected the request was in.
+	headerProtocol, ok := inputProtocol.(*THeaderProtocol)
+	if ok {
+		outputProtocol = inputProtocol
+	} else {
+		oTrans, err := p.outputTransportFactory.GetTransport(client)
+		if err != nil {
+			return err
+		}
+		outputTransport = oTrans
+		outputProtocol = p.outputProtocolFactory.GetProtocol(outputTransport)
+	}
+
+	if inputTransport != nil {
+		defer inputTransport.Close()
+	}
+	if outputTransport != nil {
+		defer outputTransport.Close()
+	}
+	for {
+		if atomic.LoadInt32(&p.closed) != 0 {
+			return nil
+		}
+
+		ctx := SetResponseHelper(
+			defaultCtx,
+			TResponseHelper{
+				THeaderResponseHelper: NewTHeaderResponseHelper(outputProtocol),
+			},
+		)
+		if headerProtocol != nil {
+			// We need to call ReadFrame here, otherwise we won't
+			// get any headers on the AddReadTHeaderToContext call.
+			//
+			// ReadFrame is safe to be called multiple times so it
+			// won't break when it's called again later when we
+			// actually start to read the message.
+			if err := headerProtocol.ReadFrame(ctx); err != nil {
+				return err
+			}
+			ctx = AddReadTHeaderToContext(ctx, headerProtocol.GetReadHeaders())
+			ctx = SetWriteHeaderList(ctx, p.forwardHeaders)
+		}
+
+		ok, err := processor.Process(ctx, inputProtocol, outputProtocol)
+		if errors.Is(err, ErrAbandonRequest) {
+			return client.Close()
+		}
+		if errors.As(err, new(TTransportException)) && err != nil {
+			return err
+		}
+		var tae TApplicationException
+		if errors.As(err, &tae) && tae.TypeId() == UNKNOWN_METHOD {
+			continue
+		}
+		if !ok {
+			break
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport.go
index 4538996..ba2738a 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/transport.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/transport.go
@@ -20,6 +20,7 @@
 package thrift
 
 import (
+	"context"
 	"errors"
 	"io"
 )
@@ -30,15 +31,18 @@
 	Flush() (err error)
 }
 
+type ContextFlusher interface {
+	Flush(ctx context.Context) (err error)
+}
+
 type ReadSizeProvider interface {
 	RemainingBytes() (num_bytes uint64)
 }
 
-
 // Encapsulates the I/O layer
 type TTransport interface {
 	io.ReadWriteCloser
-	Flusher
+	ContextFlusher
 	ReadSizeProvider
 
 	// Opens the transport for communication
@@ -52,7 +56,6 @@
 	WriteString(s string) (n int, err error)
 }
 
-
 // This is "enchanced" transport with extra capabilities. You need to use one of these
 // to construct protocol.
 // Notably, TSocket does not implement this interface, and it is always a mistake to use
@@ -62,7 +65,6 @@
 	io.ByteReader
 	io.ByteWriter
 	stringWriter
-	Flusher
+	ContextFlusher
 	ReadSizeProvider
 }
-
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go
index 9505b44..0a3f076 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go
@@ -46,6 +46,13 @@
 type tTransportException struct {
 	typeId int
 	err    error
+	msg    string
+}
+
+var _ TTransportException = (*tTransportException)(nil)
+
+func (tTransportException) TExceptionType() TExceptionType {
+	return TExceptionTypeTransport
 }
 
 func (p *tTransportException) TypeId() int {
@@ -53,15 +60,27 @@
 }
 
 func (p *tTransportException) Error() string {
-	return p.err.Error()
+	return p.msg
 }
 
 func (p *tTransportException) Err() error {
 	return p.err
 }
 
+func (p *tTransportException) Unwrap() error {
+	return p.err
+}
+
+func (p *tTransportException) Timeout() bool {
+	return p.typeId == TIMED_OUT
+}
+
 func NewTTransportException(t int, e string) TTransportException {
-	return &tTransportException{typeId: t, err: errors.New(e)}
+	return &tTransportException{
+		typeId: t,
+		err:    errors.New(e),
+		msg:    e,
+	}
 }
 
 func NewTTransportExceptionFromError(e error) TTransportException {
@@ -73,18 +92,40 @@
 		return t
 	}
 
-	switch v := e.(type) {
-	case TTransportException:
-		return v
-	case timeoutable:
-		if v.Timeout() {
-			return &tTransportException{typeId: TIMED_OUT, err: e}
-		}
+	te := &tTransportException{
+		typeId: UNKNOWN_TRANSPORT_EXCEPTION,
+		err:    e,
+		msg:    e.Error(),
 	}
 
-	if e == io.EOF {
-		return &tTransportException{typeId: END_OF_FILE, err: e}
+	if isTimeoutError(e) {
+		te.typeId = TIMED_OUT
+		return te
 	}
 
-	return &tTransportException{typeId: UNKNOWN_TRANSPORT_EXCEPTION, err: e}
+	if errors.Is(e, io.EOF) {
+		te.typeId = END_OF_FILE
+		return te
+	}
+
+	return te
+}
+
+func prependTTransportException(prepend string, e TTransportException) TTransportException {
+	return &tTransportException{
+		typeId: e.TypeId(),
+		err:    e,
+		msg:    prepend + e.Error(),
+	}
+}
+
+// isTimeoutError returns true when err is an error caused by timeout.
+//
+// Note that this also includes TTransportException wrapped timeout errors.
+func isTimeoutError(err error) bool {
+	var t timeoutable
+	if errors.As(err, &t) {
+		return t.Timeout()
+	}
+	return false
 }
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go
index 533d1b4..c805807 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go
@@ -24,14 +24,14 @@
 // a ServerTransport and then may want to mutate them (i.e. create
 // a BufferedTransport from the underlying base transport)
 type TTransportFactory interface {
-	GetTransport(trans TTransport) TTransport
+	GetTransport(trans TTransport) (TTransport, error)
 }
 
 type tTransportFactory struct{}
 
 // Return a wrapped instance of the base Transport.
-func (p *tTransportFactory) GetTransport(trans TTransport) TTransport {
-	return trans
+func (p *tTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
+	return trans, nil
 }
 
 func NewTTransportFactory() TTransportFactory {
diff --git a/vendor/github.com/uber/jaeger-client-go/tracer.go b/vendor/github.com/uber/jaeger-client-go/tracer.go
index 8a3fc97..9a627be 100644
--- a/vendor/github.com/uber/jaeger-client-go/tracer.go
+++ b/vendor/github.com/uber/jaeger-client-go/tracer.go
@@ -216,10 +216,10 @@
 		options.StartTime = t.timeNow()
 	}
 
-	// Predicate whether the given span context is a valid reference
-	// which may be used as parent / debug ID / baggage items source
-	isValidReference := func(ctx SpanContext) bool {
-		return ctx.IsValid() || ctx.isDebugIDContainerOnly() || len(ctx.baggage) != 0
+	// Predicate whether the given span context is an empty reference
+	// or may be used as parent / debug ID / baggage items source
+	isEmptyReference := func(ctx SpanContext) bool {
+		return !ctx.IsValid() && !ctx.isDebugIDContainerOnly() && len(ctx.baggage) == 0
 	}
 
 	var references []Reference
@@ -235,7 +235,7 @@
 				reflect.ValueOf(ref.ReferencedContext)))
 			continue
 		}
-		if !isValidReference(ctxRef) {
+		if isEmptyReference(ctxRef) {
 			continue
 		}
 
@@ -245,14 +245,17 @@
 			continue
 		}
 
-		references = append(references, Reference{Type: ref.Type, Context: ctxRef})
+		if ctxRef.IsValid() {
+			// we don't want empty context that contains only debug-id or baggage
+			references = append(references, Reference{Type: ref.Type, Context: ctxRef})
+		}
 
 		if !hasParent {
 			parent = ctxRef
 			hasParent = ref.Type == opentracing.ChildOfRef
 		}
 	}
-	if !hasParent && isValidReference(parent) {
+	if !hasParent && !isEmptyReference(parent) {
 		// If ChildOfRef wasn't found but a FollowFromRef exists, use the context from
 		// the FollowFromRef as the parent
 		hasParent = true
@@ -317,7 +320,7 @@
 	sp.references = references
 	sp.firstInProcess = rpcServer || sp.context.parentID == 0
 
-	if !sp.isSamplingFinalized() {
+	if !sp.context.isSamplingFinalized() {
 		decision := t.sampler.OnCreateSpan(sp)
 		sp.applySamplingDecision(decision, false)
 	}
@@ -410,7 +413,7 @@
 // calling client-side span, but obviously the server side span is
 // no longer a root span of the trace.
 func (t *Tracer) emitNewSpanMetrics(sp *Span, newTrace bool) {
-	if !sp.isSamplingFinalized() {
+	if !sp.context.isSamplingFinalized() {
 		t.metrics.SpansStartedDelayedSampling.Inc(1)
 		if newTrace {
 			t.metrics.TracesStartedDelayedSampling.Inc(1)
@@ -434,9 +437,11 @@
 }
 
 func (t *Tracer) reportSpan(sp *Span) {
-	if !sp.isSamplingFinalized() {
+	ctx := sp.SpanContext()
+
+	if !ctx.isSamplingFinalized() {
 		t.metrics.SpansFinishedDelayedSampling.Inc(1)
-	} else if sp.context.IsSampled() {
+	} else if ctx.IsSampled() {
 		t.metrics.SpansFinishedSampled.Inc(1)
 	} else {
 		t.metrics.SpansFinishedNotSampled.Inc(1)
@@ -445,7 +450,7 @@
 	// Note: if the reporter is processing Span asynchronously then it needs to Retain() the span,
 	// and then Release() it when no longer needed.
 	// Otherwise, the span may be reused for another trace and its data may be overwritten.
-	if sp.context.IsSampled() {
+	if ctx.IsSampled() {
 		t.reporter.Report(sp)
 	}
 
diff --git a/vendor/github.com/uber/jaeger-client-go/transport/http.go b/vendor/github.com/uber/jaeger-client-go/transport/http.go
index bb7eb00..1d6f14d 100644
--- a/vendor/github.com/uber/jaeger-client-go/transport/http.go
+++ b/vendor/github.com/uber/jaeger-client-go/transport/http.go
@@ -16,6 +16,7 @@
 
 import (
 	"bytes"
+	"context"
 	"fmt"
 	"io"
 	"io/ioutil"
@@ -167,7 +168,7 @@
 func serializeThrift(obj thrift.TStruct) (*bytes.Buffer, error) {
 	t := thrift.NewTMemoryBuffer()
 	p := thrift.NewTBinaryProtocolTransport(t)
-	if err := obj.Write(p); err != nil {
+	if err := obj.Write(context.Background(), p); err != nil {
 		return nil, err
 	}
 	return t.Buffer, nil
diff --git a/vendor/github.com/uber/jaeger-client-go/transport_udp.go b/vendor/github.com/uber/jaeger-client-go/transport_udp.go
index 7370d80..0000412 100644
--- a/vendor/github.com/uber/jaeger-client-go/transport_udp.go
+++ b/vendor/github.com/uber/jaeger-client-go/transport_udp.go
@@ -15,10 +15,12 @@
 package jaeger
 
 import (
+	"context"
 	"errors"
 	"fmt"
 
 	"github.com/uber/jaeger-client-go/internal/reporterstats"
+	"github.com/uber/jaeger-client-go/log"
 	"github.com/uber/jaeger-client-go/thrift"
 	j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
 	"github.com/uber/jaeger-client-go/utils"
@@ -57,35 +59,57 @@
 	failedToEmitSpans    int64
 }
 
-// NewUDPTransport creates a reporter that submits spans to jaeger-agent.
+// UDPTransportParams allows specifying options for initializing a UDPTransport. An instance of this struct should
+// be passed to NewUDPTransportWithParams.
+type UDPTransportParams struct {
+	utils.AgentClientUDPParams
+}
+
+// NewUDPTransportWithParams creates a reporter that submits spans to jaeger-agent.
 // TODO: (breaking change) move to transport/ package.
-func NewUDPTransport(hostPort string, maxPacketSize int) (Transport, error) {
-	if len(hostPort) == 0 {
-		hostPort = fmt.Sprintf("%s:%d", DefaultUDPSpanServerHost, DefaultUDPSpanServerPort)
+func NewUDPTransportWithParams(params UDPTransportParams) (Transport, error) {
+	if len(params.HostPort) == 0 {
+		params.HostPort = fmt.Sprintf("%s:%d", DefaultUDPSpanServerHost, DefaultUDPSpanServerPort)
 	}
-	if maxPacketSize == 0 {
-		maxPacketSize = utils.UDPPacketMaxLength
+
+	if params.Logger == nil {
+		params.Logger = log.StdLogger
+	}
+
+	if params.MaxPacketSize == 0 {
+		params.MaxPacketSize = utils.UDPPacketMaxLength
 	}
 
 	protocolFactory := thrift.NewTCompactProtocolFactory()
 
 	// Each span is first written to thriftBuffer to determine its size in bytes.
-	thriftBuffer := thrift.NewTMemoryBufferLen(maxPacketSize)
+	thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize)
 	thriftProtocol := protocolFactory.GetProtocol(thriftBuffer)
 
-	client, err := utils.NewAgentClientUDP(hostPort, maxPacketSize)
+	client, err := utils.NewAgentClientUDPWithParams(params.AgentClientUDPParams)
 	if err != nil {
 		return nil, err
 	}
 
 	return &udpSender{
 		client:         client,
-		maxSpanBytes:   maxPacketSize - emitBatchOverhead,
+		maxSpanBytes:   params.MaxPacketSize - emitBatchOverhead,
 		thriftBuffer:   thriftBuffer,
 		thriftProtocol: thriftProtocol,
 	}, nil
 }
 
+// NewUDPTransport creates a reporter that submits spans to jaeger-agent.
+// TODO: (breaking change) move to transport/ package.
+func NewUDPTransport(hostPort string, maxPacketSize int) (Transport, error) {
+	return NewUDPTransportWithParams(UDPTransportParams{
+		AgentClientUDPParams: utils.AgentClientUDPParams{
+			HostPort:      hostPort,
+			MaxPacketSize: maxPacketSize,
+		},
+	})
+}
+
 // SetReporterStats implements reporterstats.Receiver.
 func (s *udpSender) SetReporterStats(rs reporterstats.ReporterStats) {
 	s.reporterStats = rs
@@ -93,7 +117,7 @@
 
 func (s *udpSender) calcSizeOfSerializedThrift(thriftStruct thrift.TStruct) int {
 	s.thriftBuffer.Reset()
-	_ = thriftStruct.Write(s.thriftProtocol)
+	_ = thriftStruct.Write(context.Background(), s.thriftProtocol)
 	return s.thriftBuffer.Len()
 }
 
@@ -132,7 +156,7 @@
 	}
 	s.batchSeqNo++
 	batchSeqNo := int64(s.batchSeqNo)
-	err := s.client.EmitBatch(&j.Batch{
+	err := s.client.EmitBatch(context.Background(), &j.Batch{
 		Process: s.process,
 		Spans:   s.spanBuffer,
 		SeqNo:   &batchSeqNo,
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/reconnecting_udp_conn.go b/vendor/github.com/uber/jaeger-client-go/utils/reconnecting_udp_conn.go
new file mode 100644
index 0000000..0dffc7f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/reconnecting_udp_conn.go
@@ -0,0 +1,189 @@
+// Copyright (c) 2020 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+	"fmt"
+	"net"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/uber/jaeger-client-go/log"
+)
+
+// reconnectingUDPConn is an implementation of udpConn that resolves hostPort every resolveTimeout, if the resolved address is
+// different than the current conn then the new address is dialed and the conn is swapped.
+type reconnectingUDPConn struct {
+	hostPort    string
+	resolveFunc resolveFunc
+	dialFunc    dialFunc
+	logger      log.Logger
+	bufferBytes int64
+
+	connMtx   sync.RWMutex
+	conn      *net.UDPConn
+	destAddr  *net.UDPAddr
+	closeChan chan struct{}
+}
+
+type resolveFunc func(network string, hostPort string) (*net.UDPAddr, error)
+type dialFunc func(network string, laddr, raddr *net.UDPAddr) (*net.UDPConn, error)
+
+// newReconnectingUDPConn returns a new udpConn that resolves hostPort every resolveTimeout, if the resolved address is
+// different than the current conn then the new address is dialed and the conn is swapped.
+func newReconnectingUDPConn(hostPort string, resolveTimeout time.Duration, resolveFunc resolveFunc, dialFunc dialFunc, logger log.Logger) (*reconnectingUDPConn, error) {
+	conn := &reconnectingUDPConn{
+		hostPort:    hostPort,
+		resolveFunc: resolveFunc,
+		dialFunc:    dialFunc,
+		logger:      logger,
+		closeChan:   make(chan struct{}),
+	}
+
+	if err := conn.attemptResolveAndDial(); err != nil {
+		logger.Error(fmt.Sprintf("failed resolving destination address on connection startup, with err: %q. retrying in %s", err.Error(), resolveTimeout))
+	}
+
+	go conn.reconnectLoop(resolveTimeout)
+
+	return conn, nil
+}
+
+func (c *reconnectingUDPConn) reconnectLoop(resolveTimeout time.Duration) {
+	ticker := time.NewTicker(resolveTimeout)
+	defer ticker.Stop()
+
+	for {
+		select {
+		case <-c.closeChan:
+			return
+		case <-ticker.C:
+			if err := c.attemptResolveAndDial(); err != nil {
+				c.logger.Error(err.Error())
+			}
+		}
+	}
+}
+
+func (c *reconnectingUDPConn) attemptResolveAndDial() error {
+	newAddr, err := c.resolveFunc("udp", c.hostPort)
+	if err != nil {
+		return fmt.Errorf("failed to resolve new addr for host %q, with err: %w", c.hostPort, err)
+	}
+
+	c.connMtx.RLock()
+	curAddr := c.destAddr
+	c.connMtx.RUnlock()
+
+	// dont attempt dial if an addr was successfully dialed previously and, resolved addr is the same as current conn
+	if curAddr != nil && newAddr.String() == curAddr.String() {
+		return nil
+	}
+
+	if err := c.attemptDialNewAddr(newAddr); err != nil {
+		return fmt.Errorf("failed to dial newly resolved addr '%s', with err: %w", newAddr, err)
+	}
+
+	return nil
+}
+
+func (c *reconnectingUDPConn) attemptDialNewAddr(newAddr *net.UDPAddr) error {
+	connUDP, err := c.dialFunc(newAddr.Network(), nil, newAddr)
+	if err != nil {
+		return err
+	}
+
+	if bufferBytes := int(atomic.LoadInt64(&c.bufferBytes)); bufferBytes != 0 {
+		if err = connUDP.SetWriteBuffer(bufferBytes); err != nil {
+			return err
+		}
+	}
+
+	c.connMtx.Lock()
+	c.destAddr = newAddr
+	// store prev to close later
+	prevConn := c.conn
+	c.conn = connUDP
+	c.connMtx.Unlock()
+
+	if prevConn != nil {
+		return prevConn.Close()
+	}
+
+	return nil
+}
+
+// Write calls net.udpConn.Write, if it fails an attempt is made to connect to a new addr, if that succeeds the write is retried before returning
+func (c *reconnectingUDPConn) Write(b []byte) (int, error) {
+	var bytesWritten int
+	var err error
+
+	c.connMtx.RLock()
+	if c.conn == nil {
+		// if connection is not initialized indicate this with err in order to hook into retry logic
+		err = fmt.Errorf("UDP connection not yet initialized, an address has not been resolved")
+	} else {
+		bytesWritten, err = c.conn.Write(b)
+	}
+	c.connMtx.RUnlock()
+
+	if err == nil {
+		return bytesWritten, nil
+	}
+
+	// attempt to resolve and dial new address in case that's the problem, if resolve and dial succeeds, try write again
+	if reconnErr := c.attemptResolveAndDial(); reconnErr == nil {
+		c.connMtx.RLock()
+		defer c.connMtx.RUnlock()
+		return c.conn.Write(b)
+	}
+
+	// return original error if reconn fails
+	return bytesWritten, err
+}
+
+// Close stops the reconnectLoop, then closes the connection via net.udpConn 's implementation
+func (c *reconnectingUDPConn) Close() error {
+	close(c.closeChan)
+
+	// acquire rw lock before closing conn to ensure calls to Write drain
+	c.connMtx.Lock()
+	defer c.connMtx.Unlock()
+
+	if c.conn != nil {
+		return c.conn.Close()
+	}
+
+	return nil
+}
+
+// SetWriteBuffer defers to the net.udpConn SetWriteBuffer implementation wrapped with a RLock. if no conn is currently held
+// and SetWriteBuffer is called store bufferBytes to be set for new conns
+func (c *reconnectingUDPConn) SetWriteBuffer(bytes int) error {
+	var err error
+
+	c.connMtx.RLock()
+	if c.conn != nil {
+		err = c.conn.SetWriteBuffer(bytes)
+	}
+	c.connMtx.RUnlock()
+
+	if err == nil {
+		atomic.StoreInt64(&c.bufferBytes, int64(bytes))
+	}
+
+	return err
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go b/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go
index fadd73e..4c59ae9 100644
--- a/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go
+++ b/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go
@@ -15,11 +15,14 @@
 package utils
 
 import (
+	"context"
 	"errors"
 	"fmt"
 	"io"
 	"net"
+	"time"
 
+	"github.com/uber/jaeger-client-go/log"
 	"github.com/uber/jaeger-client-go/thrift"
 
 	"github.com/uber/jaeger-client-go/thrift-gen/agent"
@@ -35,53 +38,101 @@
 	agent.Agent
 	io.Closer
 
-	connUDP       *net.UDPConn
+	connUDP       udpConn
 	client        *agent.AgentClient
 	maxPacketSize int                   // max size of datagram in bytes
 	thriftBuffer  *thrift.TMemoryBuffer // buffer used to calculate byte size of a span
 }
 
-// NewAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP.
-func NewAgentClientUDP(hostPort string, maxPacketSize int) (*AgentClientUDP, error) {
-	if maxPacketSize == 0 {
-		maxPacketSize = UDPPacketMaxLength
+type udpConn interface {
+	Write([]byte) (int, error)
+	SetWriteBuffer(int) error
+	Close() error
+}
+
+// AgentClientUDPParams allows specifying options for initializing an AgentClientUDP. An instance of this struct should
+// be passed to NewAgentClientUDPWithParams.
+type AgentClientUDPParams struct {
+	HostPort                   string
+	MaxPacketSize              int
+	Logger                     log.Logger
+	DisableAttemptReconnecting bool
+	AttemptReconnectInterval   time.Duration
+}
+
+// NewAgentClientUDPWithParams creates a client that sends spans to Jaeger Agent over UDP.
+func NewAgentClientUDPWithParams(params AgentClientUDPParams) (*AgentClientUDP, error) {
+	// validate hostport
+	if _, _, err := net.SplitHostPort(params.HostPort); err != nil {
+		return nil, err
 	}
 
-	thriftBuffer := thrift.NewTMemoryBufferLen(maxPacketSize)
+	if params.MaxPacketSize == 0 {
+		params.MaxPacketSize = UDPPacketMaxLength
+	}
+
+	if params.Logger == nil {
+		params.Logger = log.StdLogger
+	}
+
+	if !params.DisableAttemptReconnecting && params.AttemptReconnectInterval == 0 {
+		params.AttemptReconnectInterval = time.Second * 30
+	}
+
+	thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize)
 	protocolFactory := thrift.NewTCompactProtocolFactory()
 	client := agent.NewAgentClientFactory(thriftBuffer, protocolFactory)
 
-	destAddr, err := net.ResolveUDPAddr("udp", hostPort)
-	if err != nil {
+	var connUDP udpConn
+	var err error
+
+	if params.DisableAttemptReconnecting {
+		destAddr, err := net.ResolveUDPAddr("udp", params.HostPort)
+		if err != nil {
+			return nil, err
+		}
+
+		connUDP, err = net.DialUDP(destAddr.Network(), nil, destAddr)
+		if err != nil {
+			return nil, err
+		}
+	} else {
+		// host is hostname, setup resolver loop in case host record changes during operation
+		connUDP, err = newReconnectingUDPConn(params.HostPort, params.AttemptReconnectInterval, net.ResolveUDPAddr, net.DialUDP, params.Logger)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	if err := connUDP.SetWriteBuffer(params.MaxPacketSize); err != nil {
 		return nil, err
 	}
 
-	connUDP, err := net.DialUDP(destAddr.Network(), nil, destAddr)
-	if err != nil {
-		return nil, err
-	}
-	if err := connUDP.SetWriteBuffer(maxPacketSize); err != nil {
-		return nil, err
-	}
-
-	clientUDP := &AgentClientUDP{
+	return &AgentClientUDP{
 		connUDP:       connUDP,
 		client:        client,
-		maxPacketSize: maxPacketSize,
-		thriftBuffer:  thriftBuffer}
-	return clientUDP, nil
+		maxPacketSize: params.MaxPacketSize,
+		thriftBuffer:  thriftBuffer,
+	}, nil
+}
+
+// NewAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP.
+func NewAgentClientUDP(hostPort string, maxPacketSize int) (*AgentClientUDP, error) {
+	return NewAgentClientUDPWithParams(AgentClientUDPParams{
+		HostPort:      hostPort,
+		MaxPacketSize: maxPacketSize,
+	})
 }
 
 // EmitZipkinBatch implements EmitZipkinBatch() of Agent interface
-func (a *AgentClientUDP) EmitZipkinBatch(spans []*zipkincore.Span) error {
+func (a *AgentClientUDP) EmitZipkinBatch(context.Context, []*zipkincore.Span) error {
 	return errors.New("Not implemented")
 }
 
 // EmitBatch implements EmitBatch() of Agent interface
-func (a *AgentClientUDP) EmitBatch(batch *jaeger.Batch) error {
+func (a *AgentClientUDP) EmitBatch(ctx context.Context, batch *jaeger.Batch) error {
 	a.thriftBuffer.Reset()
-	a.client.SeqId = 0 // we have no need for distinct SeqIds for our one-way UDP messages
-	if err := a.client.EmitBatch(batch); err != nil {
+	if err := a.client.EmitBatch(ctx, batch); err != nil {
 		return err
 	}
 	if a.thriftBuffer.Len() > a.maxPacketSize {